ffmpeg实现AVFrame实现yuv420p翻转180°最优算法

xingyun86 2021-11-10 966

ffmpeg实现AVFrame实现yuv420p翻转180°最优算法

参考网上的总是缺少东西,无法正常使用,给一个标准完整的例子供大家参考。

1.使用YUV图片数据翻转180°算法实现:

const AVFrame* pAVT = (const AVFrame*)pAVFrame;
if (m_bFlip == true)
{
	auto rotate_v = [](AVFrame* d, const AVFrame* s)
	{
		int hw = s->width >> 1;
		int hh = s->height >> 1;
		int wxh = s->height * s->width;
		int hwxhh = wxh >> 2;
		for (int i = s->height - 1, n = 0; i >= 0; i--) {
			wxh -= s->width;
			for (int j = s->width - 1; j >= 0; j--, n++) {
				d->data[0][n] = s->data[0][wxh + j];
			}
		}
		for (int i = hh - 1, n = 0, dt = 0; i >= 0; i--) {
			hwxhh -= hw;
			for (int j = hw - 1; j >= 0; j--, n++, dt = hwxhh + j) {
				d->data[1][n] = s->data[1][dt];
				d->data[2][n] = s->data[2][dt];
			}
		}
		d->pts = s->pts;
		d->width = s->width;
		d->height = s->height;
		d->format = s->format;
		d->pkt_dts = s->pkt_dts;
		d->key_frame = s->key_frame;
		d->linesize[0] = s->linesize[0];
		d->linesize[1] = s->linesize[1];
		d->linesize[2] = s->linesize[2];
	};
	static AVFrame * pAVFrameFlip = NULL;
	if (pAVFrameFlip == NULL)
	{
		pAVFrameFlip = av_frame_alloc();
		if (pAVFrameFlip != NULL)
		{
			pRotateBuffer = (uint8_t*)malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pAVCodecContext->height, pAVCodecContext->width, 1));
			if (pRotateBuffer != NULL)
			{
				av_image_fill_arrays(pAVFrameFlip->data, pAVFrameFlip->linesize, pRotateBuffer, AV_PIX_FMT_YUV420P, pAVCodecContext->height, pAVCodecContext->width, 1);
			}
		}
	}
	rotate_v(pAVFrameFlip, pAVFrame);
	pAVT = pAVFrameFlip;
}

2.使用ffmpeg中的AVFilter过滤器实现旋转180°:

// 引入头文件
#include <libavfilter/avfilter.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>

// 注册初始化所有过滤器
avfilter_register_all();

// 打开视频流上下文
...

// 初始化过滤器
char args[512];
int ret;
AVFilter* buffersrc = (AVFilter*)avfilter_get_by_name("buffer");
AVFilter* buffersink = (AVFilter*)avfilter_get_by_name("buffersink");//新版的ffmpeg库必须为buffersink
AVFilterInOut* outputs = avfilter_inout_alloc();
AVFilterInOut* inputs = avfilter_inout_alloc();
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
AVBufferSinkParams* buffersink_params;
//const char* filters_descr = "lutyuv='u=128:v=128'";
//const char *filters_descr = "hflip";
const char *filters_descr = "rotate=PI";
//const char *filters_descr = "hue='h=60:s=-3'";
//const char *filters_descr = "crop=2/3*in_w:2/3*in_h";
//const char *filters_descr = "drawbox=x=200:y=200:w=300:h=300:color=pink@0.5";
//const char *filters_descr = "movie=/storage/emulated/0/ws.jpg[wm];[in][wm]overlay=5:5[out]";
//const char *filters_descr="drawgrid=width=100:height=100:thickness=4:color=pink@0.9";
AVFilterContext* buffersink_ctx;
AVFilterContext* buffersrc_ctx;
AVFilterGraph* filter_graph;
filter_graph = avfilter_graph_alloc();
/* buffer video source: the decoded frames from the decoder will be inserted here. */
snprintf(args, sizeof(args),
    "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
    pAVCodecContext->width, pAVCodecContext->height, pAVCodecContext->pix_fmt,
    /*pAVCodecContext->time_base.num*/1, /*pAVCodecContext->time_base.den*/1,
    /*pAVCodecContext->sample_aspect_ratio.num*/25, /*pAVCodecContext->sample_aspect_ratio.den*/1);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", args, NULL, filter_graph);
if (ret < 0) {
    printf("Cannot create buffer source\n");
    return ret;
}
/* buffer video sink: to terminate the filter chain. */
buffersink_params = av_buffersink_params_alloc();
buffersink_params->pixel_fmts = pix_fmts;
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", NULL, buffersink_params, filter_graph);
av_free(buffersink_params);
if (ret < 0) {
    printf("Cannot create buffer sink\n");
    return ret;
}
/* Endpoints for the filter graph. */
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr, &inputs, &outputs, NULL)) < 0) {
    printf("Cannot avfilter_graph_parse_ptr\n");
    return ret;
}
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) {
    printf("Cannot avfilter_graph_config\n");
    return ret;
}
AVFrame* frame_in = av_frame_alloc();
unsigned char* frame_buffer_in = (unsigned char*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P,
    pAVCodecContext->width, pAVCodecContext->height, 1));
av_image_fill_arrays(frame_in->data, frame_in->linesize, frame_buffer_in,
    AV_PIX_FMT_YUV420P, pAVCodecContext->width, pAVCodecContext->height, 1);
AVFrame* frame_out = av_frame_alloc();
unsigned char* frame_buffer_out = (unsigned char*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P,
    pAVCodecContext->width, pAVCodecContext->height, 1));
av_image_fill_arrays(frame_out->data, frame_out->linesize, frame_buffer_out,
    AV_PIX_FMT_YUV420P, pAVCodecContext->width, pAVCodecContext->height, 1);
frame_in->width = pAVCodecContext->width;
frame_in->height = pAVCodecContext->height;
frame_in->format = AV_PIX_FMT_YUV420P;

// 解码视频帧获取AVFrame
if(bFlip == true)
{
    //添加帧数据到过滤器
    if (av_buffersrc_add_frame(buffersrc_ctx, pAVFrame) < 0)
    {
        printf("av_buffersrc_add_frame() failed!\n");
        break;
    }
    av_frame_unref(pAVFrame);
    //获取输出帧数据
    ret = av_buffersink_get_frame(buffersink_ctx, pAVFrame);
    if (ret < 0)
    {
        printf("av_buffersink_get_frame() failed!\n");
        break;
    }
    pAVT = pAVFrame;
}

//退出时释放
if(filter_graph != NULL)
{
    avfilter_graph_free(&filter_graph); 
}


×
打赏作者
最新回复 (0)
只看楼主
全部楼主
返回