Python[Erno 32]管道破裂



关于这个错误几乎没有什么问题。他们不能解决我的问题。我一直在尝试使用深度学习技术开发一个汽车计数系统。我并没有上传hw.mp4视频,因为它在任何其他视频上都会出现同样的错误。我已经上传了json文件、EfficientDet文件、模型、utils和object_tracking,以便用最少的代码重现相同的错误。

import json
import numpy as np
from model import efficientdet
from utils import preprocess_image, postprocess_boxes
from object_tracking.centroidtracker import CentroidTracker
from object_tracking.trackableobject import TrackableObject
from imutils.video import FPS
import multiprocessing
import dlib
import cv2
import os
import imutils
import time
def start_tracker(box, label, rgb, inputQueue, outputQueue):
# construct a dlib rectangle object from the bounding box
# coordinates and then start the correlation tracker
t = dlib.correlation_tracker()
rect = dlib.rectangle(int(box[0]), int(box[1]), int(box[2]), int(box[3]))
t.start_track(rgb, rect)
# loop indefinitely -- this function will be called as a daemon
# process so we don't need to worry about joining it
while True:
# attempt to grab the next frame from the input queue
rgb = inputQueue.get()
# if there was an entry in our queue, process it
if rgb is not None:
# update the tracker and grab the position of the tracked
# object
t.update(rgb)
pos = t.get_position()
# unpack the position object
startX = int(pos.left())
startY = int(pos.top())
endX = int(pos.right())
endY = int(pos.bottom())    
# add the label + bounding box coordinates to the output
# queue
outputQueue.put((label, (startX, startY, endX, endY)))
def run():
# initialize our list of queues -- both input queue and output queue
# for *every* object that we will be tracking
inputQueues = []
outputQueues = []
os.environ['CUDA_VISIBLE_DEVICES'] = '0' #ekran kartı olsaydı 0 -> 1 olacaktı.
phi = 0 # çözünürlük indisi. phi, hangi çözünürlük ile eğitildiyse ona göre ayarlanmalıdır.
weighted_bifpn = True
model_path = 'efficientdet-d0.h5' #modelin dizini ve kendisi.
image_sizes = (512, 640, 768, 896, 1024, 1280, 1408)
image_size = image_sizes[phi]
# coco datasetindeki class'lar
classes = {value['id'] - 1: value['name'] for value in json.load(open('coco_90.json', 'r')).values()} # bu datasetler 90 farklı nesneye göre
num_classes = 90 #toplam class sayısı
score_threshold = 0.1 # modelin verdiği score'un eşiklenmesi.
_, model = efficientdet(phi=phi,
weighted_bifpn=weighted_bifpn,
num_classes=num_classes,
score_threshold=score_threshold)
model.load_weights(model_path, by_name=True) #modele gerekli parametrelerin verilmesi
vs = cv2.VideoCapture('i.mp4') #video dosyası
ct = CentroidTracker(maxDisappeared=40, maxDistance=50) #centroid tracker'ın ilklendirilmesi
trackers = [] #herbir dlib correlation tracker'ın depolanması için bir liste oluşturuldu
trackableObjects = {} #her araca özel ID'lerin tutulduğu dictionary oluşturuldu
totalFrames = 0 #fps hesaplaması için kaç  adet frame'in işlendiğini tutan değişken
totalDown = 0
totalUp = 0
# fps ölçümünü başlat
#yukarisi yaklasik 50 ms suruyo
fps = FPS().start()
# bütün frame'ler tek tek gezilir.
while True:
(grabbed, frame) = vs.read()
if frame is None:
break
else:
frame = imutils.resize(frame, width=600)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if len(inputQueues) == 0:
(H, W) = frame.shape[:2]
rgb, scale = preprocess_image(rgb, image_size=image_size)
# scale = 1.6
#cv2.imshow("rgb2", rgb)
#print(scale)
boxes, scores, labels = model.predict_on_batch([np.expand_dims(rgb, axis=0)]) # modelin çalıştığı yer
boxes, scores, labels = np.squeeze(boxes), np.squeeze(scores), np.squeeze(labels)
boxes.setflags(write=1)
boxes = postprocess_boxes(boxes=boxes, scale=scale, height=H, width=W)
# eşik değerinden yüksek olan ve araba olan nesneler seçilir
indices = np.where(scores[:] > score_threshold)[0]
indices = np.where(labels[:] == 2|3|5|7)
for i in np.arange(0, boxes[indices].shape[0]):
#cv2.waitKey(0)
boxes[indices][i] = np.array([W, H, W, H])*boxes[indices][i] # buraya dikkat
#bounding box koordinatları alınır
(startX, startY, endX, endY) = boxes[indices][i].astype("int")
bb = (startX, startY, endX, endY)
iq = multiprocessing.Queue()
oq = multiprocessing.Queue()
inputQueues.append(iq)
outputQueues.append(oq)
# spawn a daemon process for a new object tracker
p = multiprocessing.Process(
target=start_tracker,
args=(bb, labels, frame, iq, oq))
p.daemon = True
p.start()
# grab the corresponding class label for the detection
# and draw the bounding box
cv2.rectangle(frame, (startX, startY), (endX, endY),
(0, 255, 0), 2)
# cv2.putText(frame, labels, (startX, startY - 15),
# cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)
else:
# loop over each of our input ques and add the input RGB
# frame to it, enabling us to update each of the respective
# object trackers running in separate processes
for iq in inputQueues:
iq.put(rgb)
# loop over each of the output queues
for oq in outputQueues:
# grab the updated bounding box coordinates for the
# object -- the .get method is a blocking operation so
# this will pause our execution until the respective
# process finishes the tracking update
(label, (startX, startY, endX, endY)) = oq.get()
# draw the bounding box from the correlation object
# tracker
#cv2.rectangle(frame, (startX, startY), (endX, endY),
# (0, 255, 0), 2)
#cv2.putText(frame, label, (startX, startY - 15),
# cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)
# show the output frame
cv2.imshow("Frame", rgb)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
fps.update()
# stop the timer and display FPS information
fps.stop()
print(totalUp)
print(totalDown)
print('vehicles detected')
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
cv2.destroyAllWindows()
vs.release()
# code goes here
if __name__ == '__main__':
run()

您应该添加

if __name__ == "__main__":

start_tracker之后

我认为这是沟通过程的问题。我得到了相同的BrokenPipeError:[Erno 32]管道破裂,当我在关闭视频捕获之前没有正确关闭视频strem时。在子流程stdin.write 期间检查此断管

相关内容

  • 没有找到相关文章

最新更新