在python opencv中通过网络发送实时视频帧



我正试图将我用相机捕捉到的实时视频帧发送到服务器并进行处理。我使用opencv进行图像处理,使用python进行语言处理。这是我的代码

client_cv.py

import cv2
import numpy as np
import socket
import sys
import pickle
cap=cv2.VideoCapture(0)
clientsocket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
clientsocket.connect(('localhost',8089))
while True:
    ret,frame=cap.read()
    print sys.getsizeof(frame)
    print frame
    clientsocket.send(pickle.dumps(frame))

server_cv.py

import socket
import sys
import cv2
import pickle
import numpy as np
HOST=''
PORT=8089
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
print 'Socket created'
s.bind((HOST,PORT))
print 'Socket bind complete'
s.listen(10)
print 'Socket now listening'
conn,addr=s.accept()
while True:
    data=conn.recv(80)
    print sys.getsizeof(data)
    frame=pickle.loads(data)
    print frame
    cv2.imshow('frame',frame)

这段代码给了我文件结尾错误,这是合乎逻辑的,因为数据总是不断地到达服务器,pickle不知道什么时候结束。我在互联网上的搜索让我使用泡菜,但到目前为止还不起作用。

注意:我将conn.recv设置为80,因为这是我说print sys.getsizeof(frame)时得到的数字。

很少有东西:

  • 使用sendall而不是send,因为不能保证所有内容都会一次性发送
  • pickle可以用于数据序列化,但您必须制定对于在客户端和服务器之间交换的消息您可以提前知道要读取的数据量以进行取消拾取(请参阅以下)
  • 对于recv,如果你收到大块,你会得到更好的性能,所以用4096或更多替换80
  • 小心sys.getsizeof:它返回内存中对象的大小,而不是与要在网络上发送的字节的大小(长度)相同;对于Python字符串这两个值根本不相同
  • 请注意您发送的帧的大小。下面的代码支持最高65535的帧。更改";H〃;至";L";如果你有一个更大的框架

协议示例:

client_cv.py

import cv2
import numpy as np
import socket
import sys
import pickle
import struct ### new code
cap=cv2.VideoCapture(0)
clientsocket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
clientsocket.connect(('localhost',8089))
while True:
    ret,frame=cap.read()
    data = pickle.dumps(frame) ### new code
    clientsocket.sendall(struct.pack("H", len(data))+data) ### new code

server_cv.py

import socket
import sys
import cv2
import pickle
import numpy as np
import struct ## new
HOST=''
PORT=8089
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
print('Socket created')
s.bind((HOST,PORT))
print('Socket bind complete')
s.listen(10)
print('Socket now listening')
conn,addr=s.accept()
### new
data = ""
payload_size = struct.calcsize("H") 
while True:
    while len(data) < payload_size:
        data += conn.recv(4096)
    packed_msg_size = data[:payload_size]
    data = data[payload_size:]
    msg_size = struct.unpack("H", packed_msg_size)[0]
    while len(data) < msg_size:
        data += conn.recv(4096)
    frame_data = data[:msg_size]
    data = data[msg_size:]
    ###
    frame=pickle.loads(frame_data)
    print frame
    cv2.imshow('frame',frame)

您可能可以对所有这些进行大量优化(减少复制、使用缓冲区接口等),但至少您可以得到这个想法。

在互联网上搜索了几个月后,这就是我想到的,我已经把它整齐地打包成了类,并在SmoothStream中进行了单元测试和文档,这是我在任何地方都能找到的唯一简单有效的流媒体版本。

我用了这个代码并把我的代码包起来。

Viewer.py

import cv2
import zmq
import base64
import numpy as np
context = zmq.Context()
footage_socket = context.socket(zmq.SUB)
footage_socket.bind('tcp://*:5555')
footage_socket.setsockopt_string(zmq.SUBSCRIBE, np.unicode(''))
while True:
    try:
        frame = footage_socket.recv_string()
        img = base64.b64decode(frame)
        npimg = np.fromstring(img, dtype=np.uint8)
        source = cv2.imdecode(npimg, 1)
        cv2.imshow("Stream", source)
        cv2.waitKey(1)
    except KeyboardInterrupt:
        cv2.destroyAllWindows()
        break

Streamer.py

import base64
import cv2
import zmq
context = zmq.Context()
footage_socket = context.socket(zmq.PUB)
footage_socket.connect('tcp://localhost:5555')
camera = cv2.VideoCapture(0)  # init the camera
while True:
    try:
        grabbed, frame = camera.read()  # grab the current frame
        frame = cv2.resize(frame, (640, 480))  # resize the frame
        encoded, buffer = cv2.imencode('.jpg', frame)
        jpg_as_text = base64.b64encode(buffer)
        footage_socket.send(jpg_as_text)
    except KeyboardInterrupt:
        camera.release()
        cv2.destroyAllWindows()
        break

我将代码从@mguijarr更改为使用Python 3。对代码所做的更改:

  • data现在是字节文字,而不是字符串文字
  • 将"H"更改为"L"以发送更大的帧大小。根据文档,我们现在可以发送2^32大小的帧,而不仅仅是2^16

服务器.py

import pickle
import socket
import struct
import cv2
HOST = ''
PORT = 8089
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('Socket created')
s.bind((HOST, PORT))
print('Socket bind complete')
s.listen(10)
print('Socket now listening')
conn, addr = s.accept()
data = b'' ### CHANGED
payload_size = struct.calcsize("L") ### CHANGED
while True:
    # Retrieve message size
    while len(data) < payload_size:
        data += conn.recv(4096)
    packed_msg_size = data[:payload_size]
    data = data[payload_size:]
    msg_size = struct.unpack("L", packed_msg_size)[0] ### CHANGED
    # Retrieve all data based on message size
    while len(data) < msg_size:
        data += conn.recv(4096)
    frame_data = data[:msg_size]
    data = data[msg_size:]
    # Extract frame
    frame = pickle.loads(frame_data)
    # Display
    cv2.imshow('frame', frame)
    cv2.waitKey(1)

客户端.py

import cv2
import numpy as np
import socket
import sys
import pickle
import struct
cap=cv2.VideoCapture(0)
clientsocket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
clientsocket.connect(('localhost',8089))
while True:
    ret,frame=cap.read()
    # Serialize frame
    data = pickle.dumps(frame)
    # Send message length first
    message_size = struct.pack("L", len(data)) ### CHANGED
    # Then data
    clientsocket.sendall(message_size + data)

我有点迟到了,但我强大的&线程化的VidGear视频处理python库现在提供NetGear API,专门用于通过网络在互连系统之间实时同步传输视频帧。这里有一个例子:

A。服务器端:(最少裸示例)

打开您喜欢的终端并执行以下python代码:

注意: 您可以随时在服务器端和客户端上按键盘上的[Ctrl+c]来结束流媒体

# import libraries
from vidgear.gears import VideoGear
from vidgear.gears import NetGear
stream = VideoGear(source='test.mp4').start() #Open any video stream
server = NetGear() #Define netgear server with default settings
# infinite loop until [Ctrl+C] is pressed
while True:
    try: 
        frame = stream.read()
        # read frames
        # check if frame is None
        if frame is None:
            #if True break the infinite loop
            break
        # do something with frame here
        # send frame to server
        server.send(frame)
    
    except KeyboardInterrupt:
        #break the infinite loop
        break
# safely close video stream
stream.stop()
# safely close server
server.close()

B。客户端:(最低限度示例)

然后在同一系统上打开另一个终端,执行以下python代码,并查看输出:

# import libraries
from vidgear.gears import NetGear
import cv2
#define netgear client with `receive_mode = True` and default settings
client = NetGear(receive_mode = True)
# infinite loop
while True:
    # receive frames from network
    frame = client.recv()
    # check if frame is None
    if frame is None:
        #if True break the infinite loop
        break
    # do something with frame here
    # Show output window
    cv2.imshow("Output Frame", frame)
    key = cv2.waitKey(1) & 0xFF
    # check for 'q' key-press
    if key == ord("q"):
        #if 'q' key-pressed break out
        break
# close output window
cv2.destroyAllWindows()
# safely close client
client.close()

更多高级用法和相关文档可在此处找到:https://github.com/abhiTronix/vidgear/wiki/NetGear

@Rohan-Sawant说我使用了zmq库而没有使用base64编码。这是新的代码

Streamer.py

import base64
import cv2
import zmq
import numpy as np
import time
context = zmq.Context()
footage_socket = context.socket(zmq.PUB)
footage_socket.connect('tcp://192.168.1.3:5555')
camera = cv2.VideoCapture(0)  # init the camera
while True:
        try:
                grabbed, frame = camera.read()  # grab the current frame
                frame = cv2.resize(frame, (640, 480))  # resize the frame
                encoded, buffer = cv2.imencode('.jpg', frame)
                footage_socket.send(buffer)

        except KeyboardInterrupt:
                camera.release()
                cv2.destroyAllWindows()
                break

Viewer.py

import cv2
import zmq
import base64
import numpy as np
context = zmq.Context()
footage_socket = context.socket(zmq.SUB)
footage_socket.bind('tcp://*:5555')
footage_socket.setsockopt_string(zmq.SUBSCRIBE, np.unicode(''))
while True:
    try:
        frame = footage_socket.recv()
        npimg = np.frombuffer(frame, dtype=np.uint8)
        #npimg = npimg.reshape(480,640,3)
        source = cv2.imdecode(npimg, 1)
        cv2.imshow("Stream", source)
        cv2.waitKey(1)
    except KeyboardInterrupt:
        cv2.destroyAllWindows()
        break

最近我发布了imagiz包,用于通过OpenCV和ZMQ在网络上进行快速无阻塞实时视频流。

https://pypi.org/project/imagiz/

客户:

import imagiz
import cv2

client=imagiz.Client("cc1",server_ip="localhost")
vid=cv2.VideoCapture(0)
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
while True:
    r,frame=vid.read()
    if r:
        r, image = cv2.imencode('.jpg', frame, encode_param)
        client.send(image)
    else:
        break

服务器:

import imagiz
import cv2
server=imagiz.Server()
while True:
    message=server.recive()
    frame=cv2.imdecode(message.image,1)
    cv2.imshow("",frame)
    cv2.waitKey(1)

我已经可以在我的MacOS上工作了。

我使用了@mguijarr中的代码,并将struct.pack从";H〃;至";L〃;。

# Server.py:
import socket
import sys
import cv2
import pickle
import numpy as np
import struct ## new

HOST=''
PORT=8089
    
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
print 'Socket created'
    
s.bind((HOST,PORT))
print 'Socket bind complete'
s.listen(10)
print 'Socket now listening'
    
conn,addr=s.accept()
    
# new
data = ""
payload_size = struct.calcsize("L") 
while True:
    while len(data) < payload_size:
        data += conn.recv(4096)
    packed_msg_size = data[:payload_size]
    data = data[payload_size:]
    msg_size = struct.unpack("L", packed_msg_size)[0]
    while len(data) < msg_size:
        data += conn.recv(4096)
    frame_data = data[:msg_size]
    data = data[msg_size:]
    
    
    frame=pickle.loads(frame_data)
    print frame
    cv2.imshow('frame',frame)
        
    key = cv2.waitKey(10)
    if (key == 27) or (key == 113):
        break
    
cv2.destroyAllWindows()
# Client.py
import cv2
import numpy as np
import socket
import sys
import pickle
import struct ### new code

cap=cv2.VideoCapture(0)
clientsocket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
clientsocket.connect(('localhost',8089))
while True:
    ret,frame=cap.read()
    data = pickle.dumps(frame) ### new code
    clientsocket.sendall(struct.pack("L", len(data))+data) ### new code

相关内容

  • 没有找到相关文章

最新更新