aboutsummaryrefslogtreecommitdiff
path: root/example/decode_resize_encode.c
diff options
context:
space:
mode:
Diffstat (limited to 'example/decode_resize_encode.c')
-rw-r--r--example/decode_resize_encode.c570
1 files changed, 570 insertions, 0 deletions
diff --git a/example/decode_resize_encode.c b/example/decode_resize_encode.c
new file mode 100644
index 0000000..d436317
--- /dev/null
+++ b/example/decode_resize_encode.c
@@ -0,0 +1,570 @@
+#include <bits/pthreadtypes.h>
+#include <inttypes.h>
+#include <libavcodec/avcodec.h>
+#include <libavfilter/avfilter.h>
+#include <libavformat/avformat.h>
+#include <libavutil/pixdesc.h>
+#include <libswscale/swscale.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <tuberia.h>
+#include <unistd.h>
+
+const char *optstring = "hi:o:s:T";
+
+static void printhelp(const char *argv0)
+{
+ printf(
+ "%s -i <input> -o <output> -s <size> [-T]\n"
+ "%s -h\n"
+ "\n"
+ " -i input the input file\n"
+ " -o output the output file\n"
+ " -s WIDTHxHEIGHT the size to scale the video\n"
+ " -T don't use libtuberia\n"
+ " -h print this help\n", argv0, argv0);
+}
+
+static void close_codec(AVCodecContext **ctx)
+{
+ avcodec_free_context(ctx);
+}
+
+static void close_input(AVFormatContext **ctx)
+{
+ avformat_close_input(ctx);
+}
+
+static int open_input(AVFormatContext **avformatin, AVCodecContext **avcodecin,
+ int *video_idx, const char *file)
+{
+ AVFormatContext *ctx = NULL;
+ AVCodec *codec = NULL;
+ int i = 0;
+
+ printf("Opening input file %s\n", file);
+ if (avformat_open_input(&ctx, file, NULL, NULL)) {
+ return -1;
+ }
+ printf("Parsing input file %s\n", file);
+ if (avformat_find_stream_info(ctx, NULL) < 0) {
+ close_input(&ctx);
+ return -1;
+ }
+
+ for (i = 0; i < (int)ctx->nb_streams; i++) {
+ if (ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
+ *video_idx = i;
+ codec = avcodec_find_decoder(ctx->streams[i]->codecpar->codec_id);
+ if (codec == NULL) {
+ fprintf(stderr, "Couldn't find decoder for codec id %d (%s)\n",
+ ctx->streams[i]->codecpar->codec_id,
+ avcodec_get_name(ctx->streams[i]->codecpar->codec_id));
+ close_input(&ctx);
+ return -1;
+ }
+ printf("Opening input codec %s (%d)\n", avcodec_get_name(codec->id),
+ codec->id);
+ *avcodecin = avcodec_alloc_context3(codec);
+ avcodec_parameters_to_context(*avcodecin, ctx->streams[i]->codecpar);
+ (*avcodecin)->time_base = ctx->streams[i]->time_base;
+ if (avcodec_open2(*avcodecin, codec, NULL) < 0) {
+ fprintf(stderr, "Couldn't open codec %d (%s)\n",
+ ctx->streams[i]->codecpar->codec_id,
+ avcodec_get_name(ctx->streams[i]->codecpar->codec_id));
+ close_input(&ctx);
+ close_codec(avcodecin);
+ return -1;
+ }
+ break;
+ }
+ }
+
+ *avformatin = ctx;
+
+ return 0;
+}
+
+static void close_sws(struct SwsContext **ctx)
+{
+ sws_freeContext(*ctx);
+ *ctx = NULL;
+}
+
+static void close_output(AVFormatContext **ctx)
+{
+ av_write_trailer(*ctx);
+ avformat_flush(*ctx);
+ avio_closep(&(*ctx)->pb);
+ avformat_free_context(*ctx);
+ *ctx = NULL;
+}
+
+static int open_output(AVFormatContext **avformatout,
+ AVCodecContext **avcodecout, struct SwsContext **sws, const char *file,
+ const AVFormatContext *input, int width, int height)
+{
+ AVFormatContext *ctx = NULL;
+ int i = 0;
+ const AVCodec *codec = NULL;
+ const AVCodecParameters *codecparin = NULL;
+ AVStream *stream = NULL;
+
+ printf("Opening output file %s\n", file);
+ if (avformat_alloc_output_context2(&ctx, NULL, NULL, file) < 0
+ || ctx == NULL) {
+ return -1;
+ }
+
+ for (i = 0; i < (int)input->nb_streams; i++) {
+ codecparin = input->streams[i]->codecpar;
+ codec = avcodec_find_encoder(codecparin->codec_id);
+ if (codec == NULL) {
+ fprintf(stderr, "Couldn't find encoder for codec id %d (%s)\n",
+ codecparin->codec_id, avcodec_get_name(codecparin->codec_id));
+ close_output(&ctx);
+ return -1;
+ }
+ printf("Opening output codec %s (%d)\n", avcodec_get_name(codec->id),
+ codec->id);
+ stream = avformat_new_stream(ctx, codec);
+ avcodec_parameters_copy(stream->codecpar, codecparin);
+ stream->time_base = input->streams[i]->time_base;
+ if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
+ stream->codecpar->width = width;
+ stream->codecpar->height = height;
+ }
+ if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
+ *avcodecout = avcodec_alloc_context3(codec);
+ avcodec_parameters_to_context(*avcodecout, stream->codecpar);
+ (*avcodecout)->time_base = stream->time_base;
+ if (avcodec_open2(*avcodecout, codec, NULL) < 0) {
+ fprintf(stderr, "Couldn't open codec %s (%d)\n",
+ avcodec_get_name(codecparin->codec_id),
+ codecparin->codec_id);
+ close_output(&ctx);
+ close_codec(avcodecout);
+ return -1;
+ }
+ printf("Opening resize context %dx%d %s -> %dx%d %s\n",
+ codecparin->width, codecparin->height,
+ av_get_pix_fmt_name((enum AVPixelFormat)codecparin->format),
+ stream->codecpar->width, stream->codecpar->height,
+ av_get_pix_fmt_name((enum AVPixelFormat)stream->codecpar->format));
+ *sws = sws_getContext(codecparin->width, codecparin->height,
+ (enum AVPixelFormat)codecparin->format,
+ stream->codecpar->width, stream->codecpar->height,
+ (enum AVPixelFormat)stream->codecpar->format,
+ SWS_GAUSS, NULL, NULL, NULL);
+ if (*sws == NULL) {
+ fprintf(stderr, "Couldn't open resize context %dx%d %s -> %dx%d %s\n",
+ codecparin->width, codecparin->height,
+ av_get_pix_fmt_name((enum AVPixelFormat)codecparin->format),
+ stream->codecpar->width, stream->codecpar->height,
+ av_get_pix_fmt_name((enum AVPixelFormat)stream->codecpar->format));
+ close_output(&ctx);
+ close_codec(avcodecout);
+ close_sws(sws);
+ }
+ }
+ }
+
+ if (!(ctx->flags & AVFMT_NOFILE)) {
+ int ret = avio_open(&ctx->pb, file, AVIO_FLAG_WRITE);
+ if (ret < 0) {
+ fprintf(stderr, "Couldn't create output file %s %s (%d)\n", file,
+ av_err2str(ret), ret);
+ close_output(&ctx);
+ close_codec(avcodecout);
+ close_sws(sws);
+ return -1;
+ }
+ }
+ if (avformat_write_header(ctx, NULL) < 0) {
+ fprintf(stderr, "Couldn't write header to output file %s\n", file);
+ }
+ *avformatout = ctx;
+ return 0;
+}
+
+static int decode_packet(AVCodecContext *ctx, AVPacket *packet, AVFrame *frame)
+{
+ int ret = 0;
+
+ ret = avcodec_send_packet(ctx, packet);
+ if (!ret) {
+ ret = avcodec_receive_frame(ctx, frame);
+ if (!ret) {
+ frame->opaque = av_malloc(sizeof(int));
+ *(int *)frame->opaque = packet->stream_index;
+ }
+ } else {
+ fprintf(stderr, "Couldn't decode video packet %s (%d)\n",
+ av_err2str(ret), ret);
+ }
+ av_packet_unref(packet);
+
+ return ret;
+}
+
+static int scale_frame(struct SwsContext *sws, AVFrame *frame, AVFrame *scaled,
+ int width, int height, enum AVPixelFormat pix_fmt)
+{
+ av_frame_copy_props(scaled, frame);
+ scaled->width = width;
+ scaled->height = height;
+ scaled->format = pix_fmt;
+ av_frame_get_buffer(scaled, 0);
+ sws_scale(sws, (const uint8_t * const *)frame->data, frame->linesize,
+ 0, frame->height, scaled->data, scaled->linesize);
+ scaled->opaque = frame->opaque;
+ frame->opaque = NULL;
+ av_frame_unref(frame);
+
+ return 0;
+}
+
+static int encode_frame(AVCodecContext *ctx, AVFrame *frame, AVPacket *packet)
+{
+ int ret = 0;
+
+ ret = avcodec_send_frame(ctx, frame);
+ if (!ret) {
+ ret = avcodec_receive_packet(ctx, packet);
+ if (!ret && frame->opaque != NULL) {
+ packet->stream_index = *(int *)frame->opaque;
+ }
+ } else {
+ fprintf(stderr, "Couldn't encode video packet %s (%d)\n",
+ av_err2str(ret), ret);
+ }
+ av_freep(&frame->opaque);
+ av_frame_unref(frame);
+
+ return ret;
+}
+
+static void flush_video(AVFormatContext *avformatin, AVCodecContext *avcodecin,
+ struct SwsContext *sws, AVCodecContext *avcodecout,
+ AVFormatContext *avformatout, int idx)
+{
+ AVFrame *frame = NULL;
+ AVPacket *packet = NULL;
+ AVFrame *scaled = NULL;
+
+ frame = av_frame_alloc();
+ packet = av_packet_alloc();
+ scaled = av_frame_alloc();
+ avcodec_send_packet(avcodecin, NULL);
+ while (!avcodec_receive_frame(avcodecin, frame)) {
+ scale_frame(sws, frame, scaled, avcodecout->width,
+ avcodecout->height, avcodecout->pix_fmt);
+ avcodec_send_frame(avcodecout, scaled);
+ if (!encode_frame(avcodecout, scaled, packet)) {
+ av_packet_rescale_ts(packet, avformatin->streams[idx]->time_base,
+ avformatout->streams[idx]->time_base);
+ packet->stream_index = idx;
+ av_interleaved_write_frame(avformatout, packet);
+ }
+ }
+ avcodec_send_frame(avcodecout, NULL);
+ while (!avcodec_receive_packet(avcodecout, packet)) {
+ av_packet_rescale_ts(packet, avformatin->streams[idx]->time_base,
+ avformatout->streams[idx]->time_base);
+ packet->stream_index = idx;
+ av_interleaved_write_frame(avformatout, packet);
+ }
+ av_frame_free(&frame);
+ av_packet_free(&packet);
+ av_frame_free(&scaled);
+}
+
+void tube_packet_free(void *element)
+{
+ av_packet_free((AVPacket **)&element);
+}
+
+void tube_frame_free(void *element)
+{
+ av_frame_free((AVFrame **)&element);
+}
+
+static void *tube_decode(void *element, void *opaque)
+{
+ AVPacket *packet = element;
+ AVCodecContext *avcodecin = opaque;
+ AVFrame *frame = av_frame_alloc();
+
+ if (decode_packet(avcodecin, packet, frame)) {
+ av_frame_free(&frame);
+ }
+ av_packet_free(&packet);
+
+ return frame;
+}
+
+struct tube_resize_opaque {
+ struct SwsContext *sws;
+ int width;
+ int height;
+ enum AVPixelFormat pix_fmt;
+};
+
+static void *tube_resize(void *element, void *opaque)
+{
+ AVFrame *frame = element;
+ struct tube_resize_opaque *o = opaque;
+ AVFrame *scaled = av_frame_alloc();
+
+ scale_frame(o->sws, frame, scaled, o->width, o->height, o->pix_fmt);
+ av_frame_free(&frame);
+
+ return scaled;
+}
+
+static void *tube_encode(void *element, void *opaque)
+{
+ AVFrame *frame = element;
+ AVCodecContext *avcodecout = opaque;
+ AVPacket *packet = av_packet_alloc();
+
+ if (encode_frame(avcodecout, frame, packet)) {
+ av_packet_free(&packet);
+ }
+ av_frame_free(&frame);
+
+ return packet;
+}
+
+struct tube_mux_opaque {
+ AVFormatContext *avformatin;
+ AVFormatContext *avformatout;
+};
+
+void tube_mux(void *element, void *opaque)
+{
+ AVPacket *packet = element;
+ struct tube_mux_opaque *o = opaque;
+ AVPacket packet2mux;
+ int idx = packet->stream_index;
+
+ av_packet_rescale_ts(packet, o->avformatin->streams[idx]->time_base,
+ o->avformatout->streams[idx]->time_base);
+ av_packet_ref(&packet2mux, packet);
+ av_interleaved_write_frame(o->avformatout, &packet2mux);
+ av_packet_free(&packet);
+}
+
+static void print_tube_stats(tube *ctx)
+{
+ int queued_read = tube_get_queued(ctx, 0);
+ int slots_read = tube_get_slots(ctx, 0);
+ int queued_decode = tube_get_queued(ctx, 1);
+ int slots_decode = tube_get_slots(ctx, 1);
+ int queued_resize = tube_get_queued(ctx, 2);
+ int slots_resize = tube_get_slots(ctx, 2);
+ int queued_encode = tube_get_queued(ctx, 3);
+ int slots_encode = tube_get_slots(ctx, 3);
+
+ printf("read -> [%d/%d] -> deco -> [%d/%d] -> resize -> [%d/%d] -> encode -> [%d/%d] -> write\n",
+ queued_read, slots_read, queued_decode, slots_decode,
+ queued_resize, slots_resize, queued_encode, slots_encode);
+}
+
+static tube *build_tuberia(AVCodecContext *avcodecin, AVCodecContext *avcodecout,
+ struct tube_resize_opaque *tube_resize_opaque,
+ struct tube_mux_opaque *tube_mux_opaque)
+{
+ tube *ctx = NULL;
+ tube_source *source = NULL;
+ tube_stage *decode = NULL;
+ tube_stage *resize = NULL;
+ tube_stage *encode = NULL;
+
+ source = tube_source_alloc(10, NULL, NULL, tube_packet_free);
+ decode = tube_stage_alloc(10, tube_decode, avcodecin, tube_frame_free);
+ resize = tube_stage_alloc(10, tube_resize, tube_resize_opaque, tube_frame_free);
+ encode = tube_stage_alloc(10, tube_encode, avcodecout, tube_packet_free);
+
+ if (tube_stage_append(source, decode)) {
+ fprintf(stderr, "Couldn't append decode stage to tuberia\n");
+ return NULL;
+ }
+ if (tube_stage_append(source, resize)) {
+ fprintf(stderr, "Couldn't append resize stage to tuberia\n");
+ return NULL;
+ }
+ if (tube_stage_append(source, encode)) {
+ fprintf(stderr, "Couldn't append encode stage to tuberia\n");
+ return NULL;
+ }
+ ctx = tube_alloc(source, tube_mux, tube_mux_opaque);
+ tube_source_and_stages_free(&source);
+
+ return ctx;
+}
+
+static void do_tuberia(AVFormatContext *avformatin, AVCodecContext *avcodecin,
+ AVFormatContext *avformatout, struct SwsContext *sws,
+ AVCodecContext *avcodecout, int video_idx)
+{
+ tube *ctx = NULL;
+ AVPacket *packet = NULL;
+ int idx = 0;
+ struct tube_resize_opaque tube_resize_opaque = {
+ .sws = sws,
+ .width = avcodecout->width,
+ .height = avcodecout->height,
+ .pix_fmt = avcodecout->pix_fmt
+ };
+ struct tube_mux_opaque tube_mux_opaque = {
+ .avformatin = avformatin,
+ .avformatout = avformatout,
+ };
+
+ ctx = build_tuberia(avcodecin, avcodecout, &tube_resize_opaque,
+ &tube_mux_opaque);
+ if (ctx == NULL) {
+ fprintf(stderr, "Couldn't create tuberia\n");
+ return;
+ }
+
+ printf("Starting to transcode with libtuberia\n");
+ tube_start(ctx);
+ packet = av_packet_alloc();
+ while (!av_read_frame(avformatin, packet)) {
+ idx = packet->stream_index;
+ if (idx == video_idx) {
+ if (tube_inject(ctx, 15000, packet)) {
+ fprintf(stderr, "Couldn't inject packet to tuberia after 15s\n");
+ break;
+ }
+ print_tube_stats(ctx);
+ } else {
+ tube_inject_at(ctx, 3, 15000, packet);
+ }
+ packet = av_packet_alloc();
+ }
+ tube_stop_and_wait_empty(ctx);
+ if (video_idx >= 0) {
+ flush_video(avformatin, avcodecin, sws, avcodecout, avformatout, video_idx);
+ }
+ printf("Transcode with libtuberia finished\n");
+
+ av_packet_free(&packet);
+ tube_free(&ctx);
+}
+
+static void do_no_tuberia(AVFormatContext *avformatin, AVCodecContext *avcodecin,
+ AVFormatContext *avformatout, struct SwsContext *sws,
+ AVCodecContext *avcodecout, int video_idx)
+{
+ AVPacket *packet = NULL;
+ AVFrame *frame = NULL;
+ AVFrame *scaled = NULL;
+ int idx = 0;
+
+ packet = av_packet_alloc();
+ frame = av_frame_alloc();
+ scaled = av_frame_alloc();
+
+ printf("Starting to transcode without libtuberia\n");
+ while (!av_read_frame(avformatin, packet)) {
+ idx = packet->stream_index;
+ if (idx == video_idx) {
+ if (decode_packet(avcodecin, packet, frame)) {
+ continue;
+ }
+ scale_frame(sws, frame, scaled, avcodecout->width,
+ avcodecout->height, avcodecout->pix_fmt);
+ if (encode_frame(avcodecout, scaled, packet)) {
+ continue;
+ }
+ }
+ av_packet_rescale_ts(packet, avformatin->streams[idx]->time_base,
+ avformatout->streams[idx]->time_base);
+ av_interleaved_write_frame(avformatout, packet);
+ }
+ if (video_idx >= 0) {
+ flush_video(avformatin, avcodecin, sws, avcodecout, avformatout, video_idx);
+ }
+ printf("Transcode without libtuberia finished\n");
+
+ av_packet_free(&packet);
+ av_frame_free(&frame);
+ av_frame_free(&scaled);
+}
+
+int main(int argc, char **argv)
+{
+ int c = 0;
+ char *input = NULL;
+ char *output = NULL;
+ int width = 720;
+ int height = 404;
+ int notuberia = 0;
+ AVFormatContext *avformatin = NULL;
+ AVFormatContext *avformatout = NULL;
+ AVCodecContext *avcodecin = NULL;
+ AVCodecContext *avcodecout = NULL;
+ struct SwsContext *sws = NULL;
+ int video_idx = -1;
+
+ while ((c = getopt(argc, argv, optstring)) >= 0) {
+ switch (c) {
+ case 'h':
+ printhelp(argv[0]);
+ return 0;
+ break;
+ case 'i':
+ input = optarg;
+ break;
+ case 'o':
+ output = optarg;
+ break;
+ case 's':
+ if (sscanf(optarg, "%dx%d", &width, &height) != 2) {
+ fprintf(stderr, "Size parameter must be WxH, instead it is %s\n",
+ optarg);
+ return -1;
+ }
+ break;
+ case 'T':
+ notuberia = 1;
+ break;
+ case '?':
+ default:
+ fprintf(stderr, "Incorrect arguments. Run %s -h\n", argv[0]);
+ return -1;
+ }
+ }
+ if (input == NULL || !input[0]) {
+ printhelp(argv[0]);
+ return -1;
+ }
+
+ if (open_input(&avformatin, &avcodecin, &video_idx, input) < 0) {
+ fprintf(stderr, "Couldn't open input %s\n", input);
+ return -1;
+ }
+ if (open_output(&avformatout, &avcodecout, &sws, output, avformatin, width,
+ height) < 0) {
+ fprintf(stderr, "Couldn't initiate output %s\n", output);
+ return -1;
+ }
+
+ if (notuberia) {
+ do_no_tuberia(avformatin, avcodecin, avformatout, sws, avcodecout, video_idx);
+ } else {
+ do_tuberia(avformatin, avcodecin, avformatout, sws, avcodecout, video_idx);
+ }
+
+ close_input(&avformatin);
+ close_output(&avformatout);
+ close_codec(&avcodecin);
+ close_codec(&avcodecout);
+ close_sws(&sws);
+
+ return 0;
+}
+