我最近一直在使用python学习计算机视觉,在制作手部检测器项目时,我遇到了这个错误:-
Traceback (most recent call last):
File "c:UsersidhantOneDrive - 007lakshyaIdhantProgrammingProjectsMY MACHINE
LEARNING PROJECTSHand Tracking Module.py", line 64, in <module>
main()
File "c:UsersidhantOneDrive - 007lakshyaIdhantProgrammingProjectsMY MACHINE
LEARNING PROJECTSHand Tracking Module.py", line 41, in main
detector = handDetector()
File "c:UsersidhantOneDrive - 007lakshyaIdhantProgrammingProjectsMY MACHINE
LEARNING PROJECTSHand Tracking Module.py", line 13, in __init__
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.detectionCon,
self.trackCon)
File "C:UsersidhantAppDataRoamingPythonPython39site-
packagesmediapipepythonsolutionshands.py", line 114, in __init__
super().__init__(
File "C:UsersidhantAppDataRoamingPythonPython39site-
packagesmediapipepythonsolution_base.py", line 258, in __init__
self._input_side_packets = {
File "C:UsersidhantAppDataRoamingPythonPython39site-
packagesmediapipepythonsolution_base.py", line 259, in <dictcomp>
name: self._make_packet(self._side_input_type_info[name], data)
File "C:UsersidhantAppDataRoamingPythonPython39site-
packagesmediapipepythonsolution_base.py", line 513, in _make_packet
return getattr(packet_creator, 'create_' + packet_data_type.value)(data)
TypeError: create_int(): incompatible function arguments. The following argument types
are supported:
1. (arg0: int) -> mediapipe.python._framework_bindings.packet.Packet
Invoked with: 0.5
[ WARN:0] global D:aopencv-pythonopencv-
pythonopencvmodulesvideoiosrccap_msmf.cpp (438) `anonymous-
namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
我尝试了很多调试,但没有成功:(,所以请帮助我,这是我写的代码:-
import cv2
import mediapipe as mp
import time
class handDetector():
def __init__(self, mode=False, maxHands = 2, detectionCon=0.5, trackCon = 0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.detectionCon,
self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
def findHands(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = self.hands.process(imgRGB)
# print(results.multi_hand_landmarks)
if results.multi_hand_landmarks:
for handLms in results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms, self.mpHands.HAND_CONNECTIONS)
return img
# for id, lm in enumerate(handLms.landmark):
# # print(id, lm)
# h, w, c = img.shape
# cx, cy = int(lm.x*w), int(lm.y*h)
# print(id, cx, cy)
# # if id == 4:
# cv2.circle(img, (cx, cy), 15, (255,0,255), cv2.FILLED)
def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(0)
detector = handDetector()
while True:
success, img = cap.read()
img = detector.findHands(img)
cTime = time.time()
fps = 1/(cTime-pTime)
pTime = cTime
cv2.putText(img, str(int(fps)),(10, 70), cv2.FONT_HERSHEY_COMPLEX, 3, (255,0,255),3)
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
我已经尝试制作一个类的手检测器,它做同样的事情来检测手,但我们也可以在我们的其他文件中使用它,这就是为什么我写了这段代码,并遇到了这个问题!
在def __init__()
中,代码为:
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.detectionCon, self.trackCon)
尝试为Hands()
中的第三个参数添加模型复杂度,如下所示:
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.modelComplex, self.detectionCon, self.trackCon)
所以self.mpHands.Hands()
中总共有5个参数
这是我的完整代码,为我工作:
class handDetector():
def __init__(self, mode=False, maxHands=1, modelComplexity=1, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.modelComplex = modelComplexity
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.modelComplex,
self.detectionCon, self.trackCon)
我面临同样的问题,只需将model_complexity
添加到您的init
功能,您就可以了:
def __init__(self, mode=False, model_complexity=1, upBody=False, smooth=True, detectionCon=0.5, trackCon=0.5):
您需要在handDetector()
类的__init__
方法中再分配一个参数。
完整的代码可能看起来像:
import cv2
import mediapipe as mp
import time
# class creation
class handDetector():
def __init__(self, mode=False, maxHands=2, detectionCon=0.5,modelComplexity=1,trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.modelComplex = modelComplexity
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands,self.modelComplex,
self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils # it gives small dots onhands total 20 landmark points
def findHands(self,img,draw=True):
# Send rgb image to hands
imgRGB = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB) # process the frame
# print(results.multi_hand_landmarks)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
#Draw dots and connect them
self.mpDraw.draw_landmarks(img,handLms,
self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self,img, handNo=0, draw=True):
"""Lists the position/type of landmarks
we give in the list and in the list ww have stored
type and position of the landmarks.
List has all the lm position"""
lmlist = []
# check wether any landmark was detected
if self.results.multi_hand_landmarks:
#Which hand are we talking about
myHand = self.results.multi_hand_landmarks[handNo]
# Get id number and landmark information
for id, lm in enumerate(myHand.landmark):
# id will give id of landmark in exact index number
# height width and channel
h,w,c = img.shape
#find the position
cx,cy = int(lm.x*w), int(lm.y*h) #center
# print(id,cx,cy)
lmlist.append([id,cx,cy])
# Draw circle for 0th landmark
if draw:
cv2.circle(img,(cx,cy), 15 , (255,0,255), cv2.FILLED)
return lmlist
def main():
#Frame rates
pTime = 0
cTime = 0
cap = cv2.VideoCapture(0)
detector = handDetector()
while True:
success,img = cap.read()
img = detector.findHands(img)
lmList = detector.findPosition(img)
if len(lmList) != 0:
print(lmList[4])
cTime = time.time()
fps = 1/(cTime-pTime)
pTime = cTime
cv2.putText(img,str(int(fps)),(10,70), cv2.FONT_HERSHEY_PLAIN,3,(255,0,255),3)
cv2.imshow("Video",img)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
我不知道它现在是否有帮助,或者可能是将来参考。这是mediapipe最新版本的已知问题。恢复到0.8.8版本将解决这个问题。
这是一个缩进问题,因为findHands()需要成为类handDetector()的一部分。输入tab键findHands()就可以了
解决方案:
def __init__(self, mode=False, maxHands = 2, detectionCon=0.5, trackCon = 0.5):
问题出在代码的第6行。库更新后,必须将整数值初始化为detectionconquot;。为获得最佳效果,初始化detectionCon = 1和trackCon = 0.5。
如果这解决了你的问题,请投票,让其他人可以很容易地看到。