我正在尝试使用h264编解码器将AVFrames矢量编码为MP4文件。
代码运行没有错误,但当我试图打开保存的视频文件与windows媒体和adobe媒体编码,他们说这是一个不支持的格式。
我用调试器调试了一下,一切似乎都很正常。
这是我用来保存视频的函数:
void SaveVideo(std::string& output_filename, std::vector<AVFrame> video)
{
// Initialize FFmpeg
avformat_network_init();
// Open the output file context
AVFormatContext* format_ctx = nullptr;
int ret = avformat_alloc_output_context2(&format_ctx, nullptr, nullptr, output_filename.c_str());
if (ret < 0) {
wxMessageBox("Error creating output context: ");
wxMessageBox(av_err2str(ret));
return;
}
// Open the output file
ret = avio_open(&format_ctx->pb, output_filename.c_str(), AVIO_FLAG_WRITE);
if (ret < 0) {
std::cerr << "Error opening output file: " << av_err2str(ret) << std::endl;
avformat_free_context(format_ctx);
return;
}
// Create the video stream
const AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!codec) {
std::cerr << "Error finding H.264 encoder" << std::endl;
avformat_free_context(format_ctx);
return;
}
AVStream* stream = avformat_new_stream(format_ctx, codec);
if (!stream) {
std::cerr << "Error creating output stream" << std::endl;
avformat_free_context(format_ctx);
return;
}
// Set the stream parameters
stream->codecpar->codec_id = AV_CODEC_ID_H264;
stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
stream->codecpar->width =video.front().width;
stream->codecpar->height = video.front().height;
stream->codecpar->format = AV_PIX_FMT_YUV420P;
stream->codecpar->bit_rate = 400000;
AVRational framerate = { 1, 30};
stream->time_base = av_inv_q(framerate);
// Open the codec context
AVCodecContext* codec_ctx = avcodec_alloc_context3(codec);
codec_ctx->codec_tag = 0;
codec_ctx->time_base = stream->time_base;
codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
if (!codec_ctx) {
std::cout << "Error allocating codec context" << std::endl;
avformat_free_context(format_ctx);
return;
}
ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);
if (ret < 0) {
std::cout << "Error setting codec context parameters: " << av_err2str(ret) << std::endl;
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
AVDictionary* opt = NULL;
ret = avcodec_open2(codec_ctx, codec, &opt);
if (ret < 0) {
wxMessageBox("Error opening codec: ");
wxMessageBox(av_err2str(ret));
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
// Allocate a buffer for the frame data
AVFrame* frame = av_frame_alloc();
if (!frame) {
std::cerr << "Error allocating frame" << std::endl;
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
frame->format = codec_ctx->pix_fmt;
frame->width = codec_ctx->width;
frame->height = codec_ctx->height;
ret = av_frame_get_buffer(frame, 0);
if (ret < 0) {
std::cerr << "Error allocating frame buffer: " << av_err2str(ret) << std::endl;
av_frame_free(&frame);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
// Allocate a buffer for the converted frame data
AVFrame* converted_frame = av_frame_alloc();
if (!converted_frame) {
std::cerr << "Error allocating converted frame" << std::endl;
av_frame_free(&frame);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
converted_frame->format = AV_PIX_FMT_YUV420P;
converted_frame->width = codec_ctx->width;
converted_frame->height = codec_ctx->height;
ret = av_frame_get_buffer(converted_frame, 0);
if (ret < 0) {
std::cerr << "Error allocating converted frame buffer: " << av_err2str(ret) << std::endl;
av_frame_free(&frame);
av_frame_free(&converted_frame);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
// Initialize the converter
SwsContext* converter = sws_getContext(
codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt,
codec_ctx->width, codec_ctx->height, AV_PIX_FMT_YUV420P,
SWS_BICUBIC, nullptr, nullptr, nullptr
);
if (!converter) {
std::cerr << "Error initializing converter" << std::endl;
av_frame_free(&frame);
av_frame_free(&converted_frame);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
// Write the header to the output file
ret = avformat_write_header(format_ctx, nullptr);
if (ret < 0) {
std::cerr << "Error writing header to output file: " << av_err2str(ret) << std::endl;
av_frame_free(&frame);
av_frame_free(&converted_frame);
sws_freeContext(converter);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
// Iterate over the frames and write them to the output file
int frame_count = 0;
for (auto& frame: video) {
{
// Convert the frame to the output format
sws_scale(converter,
srcFrame.data, srcFrame.linesize, 0, srcFrame.height,
converted_frame->data, converted_frame->linesize
);
// Set the frame properties
converted_frame->pts = av_rescale_q(frame_count, stream->time_base, codec_ctx->time_base);
frame_count++;
//converted_frame->time_base.den = codec_ctx->time_base.den;
//converted_frame->time_base.num = codec_ctx->time_base.num;
// Encode the frame and write it to the output
ret = avcodec_send_frame(codec_ctx, converted_frame);
if (ret < 0) {
std::cerr << "Error sending frame for encoding: " << av_err2str(ret) << std::endl;
av_frame_free(&frame);
av_frame_free(&converted_frame);
sws_freeContext(converter);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
AVPacket* pkt = av_packet_alloc();
if (!pkt) {
std::cerr << "Error allocating packet" << std::endl;
return;
}
while (ret >= 0) {
ret = avcodec_receive_packet(codec_ctx, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
std::string a = av_err2str(ret);
break;
}
else if (ret < 0) {
wxMessageBox("Error during encoding");
wxMessageBox(av_err2str(ret));
av_packet_unref(pkt);
av_frame_free(&frame);
av_frame_free(&converted_frame);
sws_freeContext(converter);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
// Write the packet to the output file
av_packet_rescale_ts(pkt, codec_ctx->time_base, stream->time_base);
pkt->stream_index = stream->index;
ret = av_interleaved_write_frame(format_ctx, pkt);
av_packet_unref(pkt);
if (ret < 0) {
std::cerr << "Error writing packet to output file: " << av_err2str(ret) << std::endl;
av_frame_free(&frame);
av_frame_free(&converted_frame);
sws_freeContext(converter);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
}
}
}
// Flush the encoder
ret = avcodec_send_frame(codec_ctx, nullptr);
if (ret < 0) {
std::cerr << "Error flushing encoder: " << av_err2str(ret) << std::endl;
av_frame_free(&frame);
av_frame_free(&converted_frame);
sws_freeContext(converter);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
while (ret >= 0) {
AVPacket* pkt = av_packet_alloc();
if (!pkt) {
std::cerr << "Error allocating packet" << std::endl;
return;
}
ret = avcodec_receive_packet(codec_ctx, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
wxMessageBox("Error recieving packet");
wxMessageBox(av_err2str(ret));
break;
}
else if (ret < 0) {
std::cerr << "Error during encoding: " << av_err2str(ret) << std::endl;
av_packet_unref(pkt);
av_frame_free(&frame);
av_frame_free(&converted_frame);
sws_freeContext(converter);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
// Write the packet to the output file
av_packet_rescale_ts(pkt, codec_ctx->time_base, stream->time_base);
pkt->stream_index = stream->index;
ret = av_interleaved_write_frame(format_ctx, pkt);
av_packet_unref(pkt);
if (ret < 0) {
std::cerr << "Error writing packet to output file: " << av_err2str(ret) << std::endl;
av_frame_free(&frame);
av_frame_free(&converted_frame);
sws_freeContext(converter);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
}
// Write the trailer to the output file
ret = av_write_trailer(format_ctx);
if (ret < 0) {
std::cerr << "Error writing trailer to output file: " << av_err2str(ret) << std::endl;
}
// Free all resources
av_frame_free(&frame);
av_frame_free(&converted_frame);
sws_freeContext(converter);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
}
**我知道这不是最漂亮的方式来写这段代码,我只是想尝试做一些类似的事情。
**这是函数的修改版本,因为原来的函数在class中。我改变了它,所以你可以编译它,但它可能有一些错误,如果我忘记改变什么
如有任何帮助,不胜感激。
有多个问题:
-
编码时建议初始化
codec_ctx
,使用avcodec_parameters_from_context(stream->codecpar, codec_ctx)
将codec_ctx
的编解码器参数拷贝到stream->codecpar
,视频解码时通常使用avcodec_parameters_to_context
。
我们可以在代码转换教程中看到这两种用法。 -
对于30pfs,使用
framerate = { 30, 1}
代替framerate = { 1, 30}
。 -
sws_getContext
初始化可能不正确。
我们最好从AVFrame
中获取源像素格式:sws_getContext(codec_ctx->width, codec_ctx->height, (AVPixelFormat)video.front().format, ...
执行sws_scale
后,我们最好验证成功:if (ret != frame.height)
…错误… -
av_rescale_q
的参数顺序错误,应该是:converted_frame->pts = av_rescale_q(frame_count, codec_ctx->time_base, stream->time_base);
. -
我们必须设置
pkt->duration
:pkt->duration = av_rescale_q(1, codec_ctx->time_base, stream->time_base);
。 -
冲洗编码器时,不要检查
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
。
为了测试,我们可以使用FFmpeg CLI准备100帧rgb24
像素格式的原始输入视频:
ffmpeg -y -f lavfi -i testsrc=size=192x108:rate=1:duration=100 -f rawvideo -pix_fmt rgb24 input.rgb24
然后在执行SaveVideo
之前将帧读入AVFrame
的向量。
代码示例:
#define __STDC_CONSTANT_MACROS
#include <iostream>
#include <vector>
#include <string>
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavdevice/avdevice.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
#include <cassert>
}
void SaveVideo(std::string& output_filename, std::vector<AVFrame> video)
{
// Initialize FFmpeg
//avformat_network_init(); //Not required.
// Open the output file context
AVFormatContext* format_ctx = nullptr;
int ret = avformat_alloc_output_context2(&format_ctx, nullptr, nullptr, output_filename.c_str());
if (ret < 0) {
std::cerr << "Error creating output context: " << std::to_string(ret) << std::endl;
return;
}
// Open the output file
ret = avio_open(&format_ctx->pb, output_filename.c_str(), AVIO_FLAG_WRITE);
if (ret < 0) {
std::cerr << "Error opening output file: " << std::to_string(ret) << std::endl;
avformat_free_context(format_ctx);
return;
}
// Create the video stream
const AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!codec) {
std::cerr << "Error finding H.264 encoder" << std::endl;
avformat_free_context(format_ctx);
return;
}
AVStream* stream = avformat_new_stream(format_ctx, codec);
if (!stream) {
std::cerr << "Error creating output stream" << std::endl;
avformat_free_context(format_ctx);
return;
}
// Set the stream parameters
//stream->codecpar->codec_id = AV_CODEC_ID_H264;
//stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
//stream->codecpar->width =video.front().width;
//stream->codecpar->height = video.front().height;
//stream->codecpar->format = AV_PIX_FMT_YUV420P;
//stream->codecpar->bit_rate = 400000;
////AVRational framerate = { 1, 30}; //<--- the framerate is 0.0333fps instead of 30fps
//AVRational framerate = { 30, 1}; //30 fps
//stream->time_base = av_inv_q(framerate);
// Open the codec context
AVCodecContext* codec_ctx = avcodec_alloc_context3(codec);
//codec_ctx->codec_tag = 0;
//codec_ctx->time_base = stream->time_base;
//codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
//if (!codec_ctx) {
// std::cout << "Error allocating codec context" << std::endl;
// avformat_free_context(format_ctx);
// return;
//}
//I think to don't suppose to use avcodec_parameters_to_context when encoding - use avcodec_parameters_from_context instead
//ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);
//if (ret < 0) {
// std::cout << "Error setting codec context parameters: " << std::to_string(ret) << std::endl;
// avcodec_free_context(&codec_ctx);
// avformat_free_context(format_ctx);
// return;
//}
//Don't use avcodec_parameters_to_context when encoding - initialize codec_ctx and use avcodec_parameters_from_context to copy from codec_ctx to stream->codecpar.
//1. Initialize codec_ctx
//2. Open codec_ctx
//3. Copy from codec_ctx to stream->codecpar using avcodec_parameters_from_context
////////////////////////////////////////////////////////////////////////////
codec_ctx->codec_id = AV_CODEC_ID_H264;
codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
codec_ctx->width =video.front().width;
codec_ctx->height = video.front().height;
codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
codec_ctx->bit_rate = 400000;
//AVRational framerate = { 1, 30}; //<--- the framerate is 0.0333fps instead of 30fps
AVRational framerate = { 30, 1}; //30 fps
codec_ctx->time_base = av_inv_q(framerate);
//<--- Place it after avcodec_parameters_to_context
codec_ctx->codec_tag = 0;
//codec_ctx->time_base = stream->time_base;
codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
stream->time_base = codec_ctx->time_base;
AVDictionary* opt = NULL;
ret = avcodec_open2(codec_ctx, codec, &opt);
if (ret < 0) {
std::cout << "Error opening codec: " << std::to_string(ret) << std::endl;
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
avcodec_parameters_from_context(stream->codecpar, codec_ctx);
////////////////////////////////////////////////////////////////////////////
// Allocate a buffer for the frame data
AVFrame* frame = av_frame_alloc();
if (!frame) {
std::cerr << "Error allocating frame" << std::endl;
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
frame->format = codec_ctx->pix_fmt;
frame->width = codec_ctx->width;
frame->height = codec_ctx->height;
ret = av_frame_get_buffer(frame, 0);
if (ret < 0) {
std::cerr << "Error allocating frame buffer: " << std::to_string(ret) << std::endl;
av_frame_free(&frame);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
// Allocate a buffer for the converted frame data
AVFrame* converted_frame = av_frame_alloc();
if (!converted_frame) {
std::cerr << "Error allocating converted frame" << std::endl;
av_frame_free(&frame);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
converted_frame->format = AV_PIX_FMT_YUV420P;
converted_frame->width = codec_ctx->width;
converted_frame->height = codec_ctx->height;
ret = av_frame_get_buffer(converted_frame, 0);
if (ret < 0) {
std::cerr << "Error allocating converted frame buffer: " << std::to_string(ret) << std::endl;
av_frame_free(&frame);
av_frame_free(&converted_frame);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
// Initialize the converter
SwsContext* converter = sws_getContext(
codec_ctx->width, codec_ctx->height, (AVPixelFormat)video.front().format, //codec_ctx->pix_fmt, <--- The source format comes from the input AVFrame
codec_ctx->width, codec_ctx->height, AV_PIX_FMT_YUV420P,
SWS_BICUBIC, nullptr, nullptr, nullptr
);
if (!converter) {
std::cerr << "Error initializing converter" << std::endl;
av_frame_free(&frame);
av_frame_free(&converted_frame);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
// Write the header to the output file
ret = avformat_write_header(format_ctx, nullptr);
if (ret < 0) {
std::cerr << "Error writing header to output file: " << std::to_string(ret) << std::endl;
av_frame_free(&frame);
av_frame_free(&converted_frame);
sws_freeContext(converter);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
// Iterate over the frames and write them to the output file
int frame_count = 0;
for (AVFrame& frame: video) {
AVFrame *pFrame = &frame;
{
// Convert the frame to the output format
ret = sws_scale(converter,
frame.data, frame.linesize, 0, frame.height,
converted_frame->data, converted_frame->linesize
);
if (ret != frame.height) //<--- Check status of sws_scale
{
std::cerr << "sws_scale error: " << std::to_string(ret) << std::endl;
}
// Set the frame properties
//converted_frame->pts = av_rescale_q(frame_count, stream->time_base, codec_ctx->time_base);
converted_frame->pts = av_rescale_q(frame_count, codec_ctx->time_base, stream->time_base); //<------- codec_ctx->time_base should come first.
frame_count++;
//converted_frame->time_base.den = codec_ctx->time_base.den;
//converted_frame->time_base.num = codec_ctx->time_base.num;
// Encode the frame and write it to the output
ret = avcodec_send_frame(codec_ctx, converted_frame);
if (ret < 0) {
std::cerr << "Error sending frame for encoding: " << std::to_string(ret) << std::endl;
av_frame_free(&pFrame);
av_frame_free(&converted_frame);
sws_freeContext(converter);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
AVPacket* pkt = av_packet_alloc();
if (!pkt) {
std::cerr << "Error allocating packet" << std::endl;
return;
}
while (ret >= 0) {
ret = avcodec_receive_packet(codec_ctx, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
std::string a = std::to_string(ret);
break;
}
else if (ret < 0) {
std::cerr << "Error during encoding" << std::to_string(ret) << std::endl;
av_packet_unref(pkt);
av_frame_free(&pFrame);
av_frame_free(&converted_frame);
sws_freeContext(converter);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
// Write the packet to the output file
//av_packet_rescale_ts(pkt, codec_ctx->time_base, stream->time_base); <--- Do we need it???
pkt->stream_index = stream->index;
pkt->duration = av_rescale_q(1, codec_ctx->time_base, stream->time_base); // <---- Set packet duration
ret = av_interleaved_write_frame(format_ctx, pkt);
av_packet_unref(pkt);
if (ret < 0) {
std::cerr << "Error writing packet to output file: " << std::to_string(ret) << std::endl;
av_frame_free(&pFrame);
av_frame_free(&converted_frame);
sws_freeContext(converter);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
}
}
}
// Flush the encoder
ret = avcodec_send_frame(codec_ctx, nullptr);
if (ret < 0) {
std::cerr << "Error flushing encoder: " << std::to_string(ret) << std::endl;
av_frame_free(&frame);
av_frame_free(&converted_frame);
sws_freeContext(converter);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
while (ret >= 0) {
AVPacket* pkt = av_packet_alloc();
if (!pkt) {
std::cerr << "Error allocating packet" << std::endl;
return;
}
ret = avcodec_receive_packet(codec_ctx, pkt);
//if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { <--- This part should not be in the encoder flushing loop
// std::cerr << "Error receiving packet: " << std::to_string(ret) << std::endl;
// break;
//}
//else if (ret < 0) {
// std::cerr << "Error during encoding: " << std::to_string(ret) << std::endl;
// av_packet_unref(pkt);
// av_frame_free(&frame);
// av_frame_free(&converted_frame);
// sws_freeContext(converter);
// avcodec_free_context(&codec_ctx);
// avformat_free_context(format_ctx);
// return;
//}
// Write the packet to the output file
//av_packet_rescale_ts(pkt, codec_ctx->time_base, stream->time_base); <--- Do we need it???
if (ret == 0) //<--- Write packet if ret == 0
{
pkt->stream_index = stream->index;
pkt->duration = av_rescale_q(1, codec_ctx->time_base, stream->time_base); // <---- Set packet duration
ret = av_interleaved_write_frame(format_ctx, pkt);
av_packet_unref(pkt);
if (ret < 0) {
std::cerr << "Error writing packet to output file: " << std::to_string(ret) << std::endl;
av_frame_free(&frame);
av_frame_free(&converted_frame);
sws_freeContext(converter);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
return;
}
}
}
// Write the trailer to the output file
ret = av_write_trailer(format_ctx);
if (ret < 0) {
std::cerr << "Error writing trailer to output file: " << std::to_string(ret) << std::endl;
}
// Free all resources
av_frame_free(&frame);
av_frame_free(&converted_frame);
sws_freeContext(converter);
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
}
//Building input:
//ffmpeg -y -f lavfi -i testsrc=size=192x108:rate=1:duration=100 -f rawvideo -pix_fmt rgb24 input.rgb24
int main()
{
std::string output_filename = "output_filename.mp4";
std::vector<AVFrame> video;
std::vector<AVFrame*> av_Frames;
FILE *f = fopen("input.rgb24", "rb");
for (int i = 0; i < 100; i++)
{
AVFrame* pRGBFrame = av_frame_alloc();
pRGBFrame->format = AV_PIX_FMT_RGB24;
pRGBFrame->width = 192;
pRGBFrame->height = 108;
int sts = av_frame_get_buffer(pRGBFrame, 0);
assert(sts == 0);
assert((pRGBFrame->linesize[0] == 192*3)); //Make sure buffers are continuous in memory.
fread(pRGBFrame->data[0], 1, 192*108*3, f); //Read RGB
video.push_back(*pRGBFrame);
av_Frames.push_back(pRGBFrame);
}
fclose(f);
SaveVideo(output_filename, video);
for (int i = 0; i < 10; i++)
{
av_frame_free(&av_Frames[i]);
}
return 0;
}