如何使用ffmpeg-gpu编码将视频中的帧保存到内存中



我正在尝试从视频中提取帧并将其保存到内存(ram(中。使用CPU编码,我没有任何问题:

ffmpeg -i input -s 224x224 -pix_fmt bgr24 -vcodec rawvideo -an -sn -f image2pipe -

但当我尝试使用一些NVIDIA GPU编码时,我总是会得到嘈杂的图像。我试着在Windows和Ubuntu上使用不同的命令,但结果总是一样的。

ffmpeg -hwaccel cuda -i 12.mp4 -s 224x224 -f image2pipe - -vcodec rawvideo

把JPG保存在磁盘上,我没有任何问题。

ffmpeg -hwaccel cuvid -c:v h264_cuvid -resize 224x224 -i {input_video} 
-vf thumbnail_cuda=2,hwdownload,format=nv12 {output_dir}/%d.jpg

我的python代码用于测试这些命令:

import cv2
import subprocess as sp
import numpy
IMG_W = 224
IMG_H = 224
input = '12.mp4'
ffmpeg_cmd = [ 'ffmpeg','-i', input,'-s', '224x224','-pix_fmt', 'bgr24',  '-vcodec', 'rawvideo', '-an','-sn', '-f', 'image2pipe', '-']

#ffmpeg_cmd = ['ffmpeg','-hwaccel' ,'cuda' ,'-i' ,'12.mp4','-s', '224x224','-f' , 'image2pipe' ,'-' , '-vcodec' ,'rawvideo']
pipe = sp.Popen(ffmpeg_cmd, stdout = sp.PIPE, bufsize=10)
images = []
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 95]
cnt = 0
while True:
cnt += 1
raw_image = pipe.stdout.read(IMG_W*IMG_H*3)
image =  numpy.fromstring(raw_image, dtype='uint8')     # convert read bytes to np
if image.shape[0] == 0:
del images
break   
else:
image = image.reshape((IMG_H,IMG_W,3))

cv2.imshow('test',image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
pipe.stdout.flush()
cv2.destroyAllWindows()

为了加速H.264解码,最好选择-c:v h264_cuvid——它在GPU中使用专用视频硬件。

使用GPU-Z监控软件进行测试,看起来-hwaccel cuda也使用了专用加速器(与-c:v h264_cuvid相同(,但我不确定。

注:

  • NVIDIA CUVID视频解码加速器不支持所有尺寸和像素格式

问题:

  • bufsize=10太小,与其设置bufsize=10,不如不设置bufsize参数。

  • 使用'-f', 'rawvideo'而不是'-f', 'image2pipe'(我们从管道中读取原始视频帧,而不是图像[如JPEG或PNG](
    使用'-f', 'rawvideo'时,我们可以删除'-vcodec', 'rawvideo'

  • 我们不需要参数'-s', '224x224',因为输出大小是从输入视频中已知的。

更新的FFmpeg命令:

ffmpeg_cmd = ['ffmpeg', '-hwaccel', 'cuda', '-c:v', 'h264_cuvid', '-i', input, '-pix_fmt', 'bgr24', '-f', 'rawvideo', '-']

为了创建可复制的代码样本,我首先创建一个合成视频文件'test.mp4',该文件将用作输入:

# Build synthetic video file for testing.
################################################################################
sp.run(['ffmpeg', '-y', '-f', 'lavfi', '-i', f'testsrc=size={IMG_W}x{IMG_H}:rate=1',
'-f', 'lavfi', '-i', 'sine=frequency=300', '-c:v', 'libx264', '-pix_fmt', 'nv12', 
'-c:a', 'aac', '-ar', '22050', '-t', '50', input])
################################################################################

这里有一个完整的(可执行的(代码示例:

import cv2
import subprocess as sp
import numpy

IMG_W = 224
IMG_H = 224
input = 'test.mp4'
# Build synthetic video file for testing.
################################################################################
sp.run(['ffmpeg', '-y', '-f', 'lavfi', '-i', f'testsrc=size={IMG_W}x{IMG_H}:rate=1',
'-f', 'lavfi', '-i', 'sine=frequency=300', '-c:v', 'libx264', '-pix_fmt', 'nv12', 
'-c:a', 'aac', '-ar', '22050', '-t', '50', input])
################################################################################
# There is no damage using both '-hwaccel cuda' and '-c:v 'h264_cuvid'.
ffmpeg_cmd = ['ffmpeg', '-hwaccel', 'cuda', '-c:v', 'h264_cuvid', '-i', input, '-pix_fmt', 'bgr24', '-f', 'rawvideo', '-']

pipe = sp.Popen(ffmpeg_cmd, stdout=sp.PIPE)
cnt = 0
while True:
cnt += 1
raw_image = pipe.stdout.read(IMG_W*IMG_H*3)
image =  numpy.fromstring(raw_image, dtype='uint8')     # convert read bytes to np
if image.shape[0] == 0:
break
else:
image = image.reshape((IMG_H, IMG_W, 3))

cv2.imshow('test', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
pipe.stdout.close()
pipe.wait()
cv2.destroyAllWindows()

更新:

生成JPEG而不是原始帧:

我发现的用于在存储器中建立JPEG图像列表的解决方案应用";手动";解析输出流。

FFmpeg命令(选择YUV420像素格式(:

ffmpeg_cmd = ['ffmpeg', '-hwaccel', 'cuda', '-c:v', 'h264_cuvid', '-i', input, '-c:v', 'mjpeg', '-pix_fmt', 'yuvj420p', '-f', 'image2pipe', '-']

JPEG文件格式在SOS有效载荷的标头中没有长度
找到SOS负载的末尾需要字节扫描,而Python实现的速度非常慢。

以下解决方案与大多数用户无关
我决定发布它,因为它可能与某人有关。

这里有一个代码示例(第一部分构建用于测试的合成视频文件(:

import cv2
import subprocess as sp
import numpy as np
import struct

IMG_W = 224
IMG_H = 224
input = 'test.mp4'
# Build synthetic video file for testing.
################################################################################
sp.run(['ffmpeg', '-y', '-f', 'lavfi', '-i', f'testsrc=size={IMG_W}x{IMG_H}:rate=1',
'-f', 'lavfi', '-i', 'sine=frequency=300', '-c:v', 'libx264', '-pix_fmt', 'nv12',
'-c:a', 'aac', '-ar', '22050', '-t', '50', input])
################################################################################
def read_from_pipe(p_stdout, n_bytes):
""" Read n_bytes bytes from p_stdout pipe, and return the read data bytes. """
data = p_stdout.read(n_bytes)
while len(data) < n_bytes:
data += p_stdout.read(n_bytes - len(data))
return data

ffmpeg_cmd = ['ffmpeg', '-hwaccel', 'cuda', '-c:v', 'h264_cuvid', '-i', input, '-c:v', 'mjpeg', '-pix_fmt', 'yuvj420p', '-f', 'image2pipe', '-']
pipe = sp.Popen(ffmpeg_cmd, stdout=sp.PIPE)
jpg_list = []
cnt = 0
while True:
if not pipe.poll() is None:
break
# https://en.wikipedia.org/wiki/JPEG_File_Interchange_Format
jpeg_parts = []
# SOI
soi = read_from_pipe(pipe.stdout, 2)  # Read Start of Image (FF D8)
assert soi == b'xffxd8', 'Error: first two bytes are not FF D8'
jpeg_parts.append(soi)
# JFIF APP0 marker segment
marker = read_from_pipe(pipe.stdout, 2)  # APP0 marker (FF E0)
assert marker == b'xffxe0', 'Error: APP0 marker is not FF E0'
jpeg_parts.append(marker)
xx = 0
# Keep reading markers and segments until marker is EOI (0xFFD9)
while xx != 0xD9:  # marker != b'xffxd9':
# Length of segment excluding APP0 marker
length_of_segment = read_from_pipe(pipe.stdout, 2)
jpeg_parts.append(length_of_segment)
length_of_segment = struct.unpack('>H', length_of_segment)[0]  # Unpack to uint16 (big endian)
segment = read_from_pipe(pipe.stdout, length_of_segment - 2)  # Read the segment (minus 2 bytes because length includes the 2 bytes of length)
jpeg_parts.append(segment)
marker = read_from_pipe(pipe.stdout, 2)  # JFXX-APP0 marker (FF E0) or SOF or DHT or COM or SOS or EOI
jpeg_parts.append(marker)
if marker == b'xffxda':  # SOS marker (0xFFDA)
# https://stackoverflow.com/questions/26715684/parsing-jpeg-sos-marker
# Summary of how to find next marker after SOS marker (0xFFDA):
#
# Skip first 3 bytes after SOS marker (2 bytes header size + 1 byte number of image components in scan).
# Search for next FFxx marker (skip every FF00 and range from FFD0 to FFD7 because they are part of scan).
# *This is summary of comments below post of user3344003 + my knowledge + Table B.1 from https://www.w3.org/Graphics/JPEG/itu-t81.pdf.
#
# *Basing on Table B.1 I can also suspect that values FF01 and FF02 through FFBF should also be skipped in point 2 but I am not sure if they cannot appear as part of encoded SOS data.
first3bytes = read_from_pipe(pipe.stdout, 3)
jpeg_parts.append(first3bytes)  # Skip first 3 bytes after SOS marker (2 bytes header size + 1 byte number of image components in scan).
xx = 0
# Search for next FFxx marker, skip every FF00 and range from FFD0 to FFD7 and FF01 and FF02 through FFBF
while (xx < 0xBF) or ((xx >= 0xD0) and (xx <= 0xD7)):
# Search for next FFxx marker
b = 0
while b != 0xFF:
b = read_from_pipe(pipe.stdout, 1)
jpeg_parts.append(b)
b = b[0]

xx = read_from_pipe(pipe.stdout, 1)  # Read next byte after FF
jpeg_parts.append(xx)
xx = xx[0]
# Join list parts elements to bytes array, and append the bytes array to jpg_list (convert to NumPy array).
jpg_list.append(np.frombuffer(b''.join(jpeg_parts), np.uint8))
cnt += 1

pipe.stdout.close()
pipe.wait()

# Decode and show images for testing
for im in jpg_list:
image = cv2.imdecode(im, cv2.IMREAD_UNCHANGED)
cv2.imshow('test', image)
if cv2.waitKey(100) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()

最新更新