重新格式化

This commit is contained in:
leafiber 2022-05-28 21:38:34 +08:00
parent 1fa9906377
commit 84e8c23fe3

107
demo.py
View File

@ -1,13 +1,10 @@
# -*- coding:utf-8 -*- # -*- coding:utf-8 -*-
""" """
信号设计课程小组设计
CODE >>> SINCE IN CAIXYPROMISE. @ by: Leaf
STRIVE FOR EXCELLENT. @ date: 2022-05-28
CONSTANTLY STRIVING FOR SELF-IMPROVEMENT.
@ by: caixy
@ date: 2021-10-1
""" """
import cv2 import cv2
@ -20,37 +17,38 @@ class HandDetector:
如查找方式许多手指向上或两个手指之间的距离而且提供找到的手的边界框信息 如查找方式许多手指向上或两个手指之间的距离而且提供找到的手的边界框信息
""" """
def __init__(self, mode=False, maxHands=2, detectionCon=0.5, minTrackCon=0.5): def __init__(self, mode=False, max_hands=2, detection_con=0.5, min_track_con=0.5):
""" """
:param mode: 在静态模式下对每个图像进行检测 :param mode: 在静态模式下对每个图像进行检测
:param maxHands: 要检测的最大手数 :param max_hands: 要检测的最大手数
:param detectionCon: 最小检测置信度 :param detection_con: 最小检测置信度
:param minTrackCon: 最小跟踪置信度 :param min_track_con: 最小跟踪置信度
""" """
self.results = None
self.mode = mode self.mode = mode
self.maxHands = maxHands self.max_hands = max_hands
self.modelComplex = False self.modelComplex = False
self.detectionCon = detectionCon self.detection_con = detection_con
self.minTrackCon = minTrackCon self.min_track_con = min_track_con
# 初始化手部识别模型 # 初始化手部识别模型
self.mpHands = mp.solutions.hands self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.modelComplex, self.hands = self.mpHands.Hands(self.mode, self.max_hands, self.modelComplex,
self.detectionCon, self.minTrackCon) self.detection_con, self.min_track_con)
self.mpDraw = mp.solutions.drawing_utils # 初始化绘图器 self.mpDraw = mp.solutions.drawing_utils # 初始化绘图器
self.tipIds = [4, 8, 12, 16, 20] # 指尖列表 self.tipIds = [4, 8, 12, 16, 20] # 指尖列表
self.fingers = [] self.fingers = []
self.lmList = [] self.lmList = []
def findHands(self, img, draw=True): def find_hands(self, img, draw=True):
""" """
从图像(BRG)中找到手部 从图像(BRG)中找到手部
:param img: 用于查找手的图像 :param img: 用于查找手的图像
:param draw: 在图像上绘制输出的标志 :param draw: 在图像上绘制输出的标志
:return: 带或不带图形的图像 :return: 带或不带图形的图像
""" """
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # 将传入的图像由BGR模式转标准的Opencv模式——RGB模式 img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # 将传入的图像由BGR模式转标准的Opencv模式——RGB模式
self.results = self.hands.process(imgRGB) self.results = self.hands.process(img_rgb)
if self.results.multi_hand_landmarks: if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks: for handLms in self.results.multi_hand_landmarks:
@ -59,55 +57,53 @@ class HandDetector:
self.mpHands.HAND_CONNECTIONS) self.mpHands.HAND_CONNECTIONS)
return img return img
def findPosition(self, img, handNo=0, draw=True): def find_position(self, img, hand_no=0, draw=True):
""" """
查找单手的地标并将其放入列表中像素格式还可以返回手部周围的边界框 查找单手的地标并将其放入列表中像素格式还可以返回手部周围的边界框
:param img: 要查找的主图像 :param img: 要查找的主图像
:param handNo: 如果检测到多只手则为手部id :param hand_no: 如果检测到多只手则为手部id
:param draw: 在图像上绘制输出的标志(默认绘制矩形框) :param draw: 在图像上绘制输出的标志(默认绘制矩形框)
:return: 像素格式的手部关节位置列表手部边界框 :return: 像素格式的手部关节位置列表手部边界框
""" """
xList = [] x_list = []
yList = [] y_list = []
bbox = [] bbox_info = []
bboxInfo = []
self.lmList = [] self.lmList = []
if self.results.multi_hand_landmarks: if self.results.multi_hand_landmarks:
myHand = self.results.multi_hand_landmarks[handNo] my_hand = self.results.multi_hand_landmarks[hand_no]
for id, lm in enumerate(myHand.landmark): for _, lm in enumerate(my_hand.landmark):
h, w, c = img.shape h, w, c = img.shape
px, py = int(lm.x * w), int(lm.y * h) px, py = int(lm.x * w), int(lm.y * h)
xList.append(px) x_list.append(px)
yList.append(py) y_list.append(py)
self.lmList.append([px, py]) self.lmList.append([px, py])
if draw: if draw:
cv2.circle(img, (px, py), 5, (255, 0, 255), cv2.FILLED) cv2.circle(img, (px, py), 5, (255, 0, 255), cv2.FILLED)
xmin, xmax = min(xList), max(xList) x_min, x_max = min(x_list), max(x_list)
ymin, ymax = min(yList), max(yList) y_min, y_max = min(y_list), max(y_list)
boxW, boxH = xmax - xmin, ymax - ymin box_w, box_h = x_max - x_min, y_max - y_min
bbox = xmin, ymin, boxW, boxH bbox = x_min, y_min, box_w, box_h
cx, cy = bbox[0] + (bbox[2] // 2), \ cx, cy = bbox[0] + (bbox[2] // 2), bbox[1] + (bbox[3] // 2)
bbox[1] + (bbox[3] // 2) bbox_info = {"id": hand_no, "bbox": bbox, "center": (cx, cy)}
bboxInfo = {"id": id, "bbox": bbox, "center": (cx, cy)}
if draw: if draw:
cv2.rectangle(img, (bbox[0] - 20, bbox[1] - 20), cv2.rectangle(img, (bbox[0] - 20, bbox[1] - 20),
(bbox[0] + bbox[2] + 20, bbox[1] + bbox[3] + 20), (bbox[0] + bbox[2] + 20, bbox[1] + bbox[3] + 20),
(0, 255, 0), 2) (0, 255, 0), 2)
return self.lmList, bboxInfo return self.lmList, bbox_info
def fingersUp(self): def fingers_up(self):
""" """
查找列表中打开并返回的手指数会分别考虑左手和右手 查找列表中打开并返回的手指数会分别考虑左手和右手
return竖起手指的列表 :return: 竖起手指的列表
""" """
fingers = []
if self.results.multi_hand_landmarks: if self.results.multi_hand_landmarks:
myHandType = self.handType() my_hand_type = self.hand_type()
fingers = []
# Thumb # Thumb
if myHandType == "Right": if my_hand_type == "Right":
if self.lmList[self.tipIds[0]][0] > self.lmList[self.tipIds[0] - 1][0]: if self.lmList[self.tipIds[0]][0] > self.lmList[self.tipIds[0] - 1][0]:
fingers.append(1) fingers.append(1)
else: else:
@ -118,17 +114,17 @@ class HandDetector:
else: else:
fingers.append(0) fingers.append(0)
# 4 Fingers # 4 Fingers
for id in range(1, 5): for i in range(1, 5):
if self.lmList[self.tipIds[id]][1] < self.lmList[self.tipIds[id] - 2][1]: if self.lmList[self.tipIds[i]][1] < self.lmList[self.tipIds[i] - 2][1]:
fingers.append(1) fingers.append(1)
else: else:
fingers.append(0) fingers.append(0)
return fingers return fingers
def handType(self): def hand_type(self):
""" """
检查传入的手部是左还是右 检查传入的手部是左还是右
return: "Right" "Left" :return: "Right" "Left"
""" """
if self.results.multi_hand_landmarks: if self.results.multi_hand_landmarks:
if self.lmList[17][0] < self.lmList[5][0]: if self.lmList[17][0] < self.lmList[5][0]:
@ -139,21 +135,21 @@ class HandDetector:
class Main: class Main:
def __init__(self): def __init__(self):
self.detector = None
self.camera = cv2.VideoCapture(0, cv2.CAP_DSHOW) self.camera = cv2.VideoCapture(0, cv2.CAP_DSHOW)
self.camera.set(3, 1280) self.camera.set(3, 1280)
self.camera.set(4, 720) self.camera.set(4, 720)
def Gesture_recognition(self): def gesture_recognition(self):
fps = cv2.CAP_PROP_FPS
self.detector = HandDetector() self.detector = HandDetector()
while True: while True:
frame, img = self.camera.read() frame, img = self.camera.read()
img = self.detector.findHands(img) img = self.detector.find_hands(img)
lmList, bbox = self.detector.findPosition(img) lm_list, bbox = self.detector.find_position(img)
if lmList: if lm_list:
x_1, y_1 = bbox["bbox"][0], bbox["bbox"][1] x_1, y_1 = bbox["bbox"][0], bbox["bbox"][1]
x1, x2, x3, x4, x5 = self.detector.fingersUp() x1, x2, x3, x4, x5 = self.detector.fingers_up()
if (x2 == 1 and x3 == 1) and (x4 == 0 and x5 == 0 and x1 == 0): if (x2 == 1 and x3 == 1) and (x4 == 0 and x5 == 0 and x1 == 0):
cv2.putText(img, "2_TWO", (x_1, y_1), cv2.FONT_HERSHEY_PLAIN, 3, cv2.putText(img, "2_TWO", (x_1, y_1), cv2.FONT_HERSHEY_PLAIN, 3,
@ -179,8 +175,8 @@ class Main:
elif x1 and (x2 == 0, x3 == 0, x4 == 0, x5 == 0): elif x1 and (x2 == 0, x3 == 0, x4 == 0, x5 == 0):
cv2.putText(img, "GOOD!", (x_1, y_1), cv2.FONT_HERSHEY_PLAIN, 3, cv2.putText(img, "GOOD!", (x_1, y_1), cv2.FONT_HERSHEY_PLAIN, 3,
(0, 0, 255), 3) (0, 0, 255), 3)
cv2.imshow("camera", img)
cv2.imshow("camera", img)
key = cv2.waitKey(1) key = cv2.waitKey(1)
if cv2.getWindowProperty('camera', cv2.WND_PROP_VISIBLE) < 1: if cv2.getWindowProperty('camera', cv2.WND_PROP_VISIBLE) < 1:
break break
@ -188,7 +184,6 @@ class Main:
break break
if __name__ == '__main__': if __name__ == '__main__':
Solution = Main() Solution = Main()
Solution.Gesture_recognition() Solution.gesture_recognition()