自学内容网 自学内容网

ffmpeg滤镜-添加文字-cpp

如果遇到“No such filter: 'drawtext'”错误,通常是由于 FFmpeg 未正确编译或缺少支持 drawtext 滤镜的库。在使用 FFmpeg 时,确保它是用 libfreetype 支持编译的。

如果在编译 FFmpeg 时没有使用 --enable-libfreetype 选项,就不能使用 drawtext 滤镜。

通过以下命令来检查 FFmpeg 是否支持 drawtext 滤镜:

ffmpeg -filters | grep drawtext

 在运行 ./configure --enable-libfreetype 时,如果遇到这个错误,说明pkg-config 没有找到 freetype2。这通常是因为没有安装 pkg-configfreetype 开发包。

ERROR: freetype2 not found using pkg-config

解决方法,安装 pkg-configfreetype 开发包(ubuntu):

sudo apt-get update
sudo apt-get install pkg-config libfreetype6-dev

代码实现如下:

#include <iostream>
#include <fstream>
#include <string>
extern "C"{
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavfilter/avfilter.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
}

void add_text_to_video(const char* input_filename, const char* output_filename, const char* text) {
    av_register_all();
    avfilter_register_all();

    AVFormatContext* formatContext = nullptr;
    if (avformat_open_input(&formatContext, input_filename, nullptr, nullptr) < 0) {
        std::cerr << "Could not open input file" << std::endl;
        return;
    }

    if (avformat_find_stream_info(formatContext, nullptr) < 0) {
        std::cerr << "Could not find stream info" << std::endl;
        return;
    }

    AVCodec* decoder = nullptr;
    int videoStreamIndex = av_find_best_stream(formatContext, AVMEDIA_TYPE_VIDEO, -1, -1, &decoder, 0);
    if (videoStreamIndex < 0) {
        std::cerr << "Could not find video stream" << std::endl;
        return;
    }

    AVCodecContext* codecContext = avcodec_alloc_context3(decoder);
    avcodec_parameters_to_context(codecContext, formatContext->streams[videoStreamIndex]->codecpar);
    avcodec_open2(codecContext, decoder, nullptr);

    AVFilterGraph* filterGraph = avfilter_graph_alloc();
    AVFilterContext* buffersrcContext = nullptr;
    AVFilterContext* buffersinkContext = nullptr;

    const AVFilter* buffersrc = avfilter_get_by_name("buffer");
    const AVFilter* buffersink = avfilter_get_by_name("buffersink");

    char args[512];
    snprintf(args, sizeof(args),
             "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
             codecContext->width, codecContext->height, codecContext->pix_fmt,
             formatContext->streams[videoStreamIndex]->time_base.num, formatContext->streams[videoStreamIndex]->time_base.den,
             codecContext->sample_aspect_ratio.num, codecContext->sample_aspect_ratio.den);

    avfilter_graph_create_filter(&buffersrcContext, buffersrc, "in", args, nullptr, filterGraph);
    avfilter_graph_create_filter(&buffersinkContext, buffersink, "out", nullptr, nullptr, filterGraph);

    AVFilterInOut* inputs = avfilter_inout_alloc();
    AVFilterInOut* outputs = avfilter_inout_alloc();

    inputs->name = av_strdup("in");
    inputs->filter_ctx = buffersrcContext;
    inputs->pad_idx = 0;
    inputs->next = nullptr;

    outputs->name = av_strdup("out");
    outputs->filter_ctx = buffersinkContext;
    outputs->pad_idx = 0;
    outputs->next = nullptr;

    std::string drawtextArgs = "drawtext=fontfile=/usr/share/fonts/truetype/freefont/FreeSans.ttf:text='" + std::string(text) + "':x=10:y=10:fontsize=24:fontcolor=white";
    avfilter_graph_parse_ptr(filterGraph, drawtextArgs.c_str(), &inputs, &outputs, nullptr);
    avfilter_graph_config(filterGraph, nullptr);

    AVFormatContext* outputFormatContext = nullptr;
    avformat_alloc_output_context2(&outputFormatContext, nullptr, nullptr, output_filename);

    AVStream* out_stream = avformat_new_stream(outputFormatContext, nullptr);
    avcodec_parameters_copy(out_stream->codecpar, formatContext->streams[videoStreamIndex]->codecpar);

    if (avio_open(&outputFormatContext->pb, output_filename, AVIO_FLAG_WRITE) < 0) {
        std::cerr << "Could not open output file" << std::endl;
        return;
    }

    avformat_write_header(outputFormatContext, nullptr);

    AVPacket packet;
    av_init_packet(&packet);
    AVFrame* frame;
    while (av_read_frame(formatContext, &packet) >= 0) {
        if (packet.stream_index == videoStreamIndex) {
            avcodec_send_packet(codecContext, &packet);
            while (avcodec_receive_frame(codecContext, frame) >= 0) {
                frame->pts = frame->best_effort_timestamp;
                av_buffersrc_add_frame(buffersrcContext, frame);
                while (av_buffersink_get_frame(buffersinkContext, frame) >= 0) {
                    av_interleaved_write_frame(outputFormatContext, &packet);
                    av_frame_unref(frame);
                }
            }
        }
        av_packet_unref(&packet);
    }

    av_write_trailer(outputFormatContext);

    avfilter_graph_free(&filterGraph);
    avcodec_free_context(&codecContext);
    avformat_close_input(&formatContext);
    avformat_free_context(outputFormatContext);
    av_frame_free(&frame);
    av_packet_unref(&packet);
}

int main() {
    const char* input_filename = "input.mp4";
    const char* output_filename = "output.mp4";
    const char* text = "Hello, World!";

    add_text_to_video(input_filename, output_filename, text);

    return 0;
}


原文地址:https://blog.csdn.net/melonbo/article/details/140158965

免责声明:本站文章内容转载自网络资源,如本站内容侵犯了原著者的合法权益,可联系本站删除。更多内容请关注自学内容网(zxcms.com)!