FFMPEG错误:必须选择一个标量算法



我目前正在做一个FFMPEG项目。我正在尝试使用此代码将RGB图像转换为YUV图像(我昨晚在互联网上找到它):

 void Decode::video_encode_example(const char *filename, int codec_id)
    {
    AVCodec *codec;
    AVCodecContext *c= NULL;
    int i, ret, x, y, got_output;
    FILE *f;
    AVFrame *frame;
    AVPacket pkt;
    uint8_t endcode[] = { 0, 0, 1, 0xb7 };
    printf("Encode video file %sn", filename);
    /* find the mpeg1 video encoder */
    codec = avcodec_find_encoder((enum AVCodecID)codec_id);
    if (!codec) {
        fprintf(stderr, "Codec not foundn");
        exit(1);
    }
    c = avcodec_alloc_context3(codec);
    if (!c) {
        fprintf(stderr, "Could not allocate video codec contextn");
        exit(2);
    }
    /* put sample parameters */
    c->bit_rate = 400000;
    /* resolution must be a multiple of two */
    c->width = 352;
    c->height = 288;
    /* frames per second */
    c->time_base = (AVRational){1,25};
    /* emit one intra frame every ten frames
     * check frame pict_type before passing frame
     * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
     * then gop_size is ignored and the output of encoder
     * will always be I frame irrespective to gop_size
     */
    c->gop_size = 10;
    c->max_b_frames = 1;
    c->pix_fmt = AV_PIX_FMT_YUV420P;
    if (codec_id == AV_CODEC_ID_H264)
        av_opt_set(c->priv_data, "preset", "slow", 0);
    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "Could not open codecn");
        exit(3);
    }
    f = fopen(filename, "wb");
    if (!f) {
        fprintf(stderr, "Could not open %sn", filename);
        exit(4);
    }
    frame = avcodec_alloc_frame();// Dans une version plus récente c'est av_frame_alloc
    if (!frame) {
        fprintf(stderr, "Could not allocate video framen");
        exit(5);
    }
    frame->format = c->pix_fmt;
    frame->width  = c->width;
    frame->height = c->height;
    /* the image can be allocated by any means and av_image_alloc() is
     * just the most convenient way if av_malloc() is to be used */
    ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
                         c->pix_fmt, 32);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate raw picture buffern");
        exit(6);
    }
    //
    // RGB to YUV:
    //    http://stackoverflow.com/questions/16667687/how-to-convert-rgb-from-yuv420p-for-ffmpeg-encoder
    //
    // Create some dummy RGB "frame"
    uint8_t *rgba32Data = new uint8_t[4*c->width*c->height];
    SwsContext * ctx = sws_getContext(c->width, c->height,
                                      AV_PIX_FMT_RGBA, c->width, c->height,
                                      AV_PIX_FMT_YUV420P, 0, 0, 0, 0);

    /* encode 1 second of video */
    for (i = 0; i < 25; i++) {
        av_init_packet(&pkt);
        pkt.data = NULL;    // packet data will be allocated by the encoder
        pkt.size = 0;

        fflush(stdout);
        /* prepare a dummy image */
        /* Y */
        //        for (y = 0; y < c->height; y++) {
        //            for (x = 0; x < c->width; x++) {
        //                frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
        //            }
        //        }
        //
        //        /* Cb and Cr */
        //        for (y = 0; y < c->height/2; y++) {
        //            for (x = 0; x < c->width/2; x++) {
        //                frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
        //                frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
        //            }
        //        }
        uint8_t *pos = rgba32Data;
        for (y = 0; y < c->height; y++)
        {
            for (x = 0; x < c->width; x++)
            {
                pos[0] = i / (float)25 * 255;
                pos[1] = 0;
                pos[2] = x / (float)(c->width) * 255;
                pos[3] = 255;
                pos += 4;
            }
        }
        uint8_t * inData[1] = { rgba32Data }; // RGBA32 have one plane
        //
        // NOTE: In a more general setting, the rows of your input image may
        //       be padded; that is, the bytes per row may not be 4 * width.
        //       In such cases, inLineSize should be set to that padded width.
        //
        int inLinesize[1] = { 4*c->width }; // RGBA stride
        sws_scale(ctx, inData, inLinesize, 0, c->height, frame->data, frame->linesize);
        frame->pts = i;
        /* encode the image */
        ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
        if (ret < 0) {
            fprintf(stderr, "Error encoding framen");
            exit(7);
        }
        if (got_output) {
            printf("Write frame %3d (size=%5d)n", i, pkt.size);
            fwrite(pkt.data, 1, pkt.size, f);
            av_free_packet(&pkt);
        }
    }
    /* get the delayed frames */
    for (got_output = 1; got_output; i++) {
        fflush(stdout);
        ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
        if (ret < 0) {
            fprintf(stderr, "Error encoding framen");
            exit(8);
        }
        if (got_output) {
            printf("Write frame %3d (size=%5d)n", i, pkt.size);
            fwrite(pkt.data, 1, pkt.size, f);
            av_free_packet(&pkt);
        }
    }
    /* add sequence end code to have a real mpeg file */
    fwrite(endcode, 1, sizeof(endcode), f);
    fclose(f);
    avcodec_close(c);
    av_free(c);
    av_freep(&frame->data[0]);
    avcodec_free_frame(&frame);// Dans une version plus récente c'est av_frame_alloc
    printf("n");
    }
        int main()
        {
            Decode d;
            avcodec_register_all();
            d.video_encode_example("/home/Dave/Desktop/test.mpg",AV_CODEC_ID_MPEG2VIDEO);
        }

当我运行这个应用程序时,我的Linux终端显示以下错误:

[swscaler @ 0x1e1dc60]必须只选择一个标量算法
段错误(core dump)

我不知道到底发生了什么。你能帮我一下吗?

SwsContext * ctx = sws_getContext(c->width, c->height,
                                  AV_PIX_FMT_RGBA, c->width, c->height,
                                  AV_PIX_FMT_YUV420P, 0, 0, 0, 0);

你的"flags"字段(参见文档-你的4个零列表中的第一个)需要是非零的。有效值为本页顶部列表中的SWS_FAST_BILINEAR - SWS_ERROR_DIFFUSION。一个好的默认值是只设置缩放算法,并使用例如SWS_BICUBIC作为双三次插值的值。高端缩放算法(如用于样条插值的SPS_SPLINE)将需要更多的计算量,而低端缩放算法(如用于最近邻插值的SWS_POINT)往往看起来更差,因此您在这里选择的确切值取决于您愿意花费多少CPU以及您对最终结果的视觉质量的关心程度。

相关内容

  • 没有找到相关文章

最新更新