lmList = hand1["lmList"] for button in buttonList: x, y = button.pos w, h = button.size dx = lmList[8][0] - lmList[4][0] dy = lmList[8][1] - lmList[4][1] dis = np.sqrt(dx * dx + dy * dy) if x < lmList[8][0] < x + w and y < lmList[8][ 1] < y + h and dis < 100: cv2.rectangle(img, (x - 5, y - 5), (x + w + 5, y + h + 5), (175, 0, 175), cv2.FILLED) cv2.putText(img, button.text, (x + 20, y + 65), cv2.FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 4) l, _, _ = detector.findDistance( (x, y), (lmList[8][0], lmList[8][1]), img) print(l) if l < 60: pygame.mixer.Sound("audios/" + button.text.lower() + ".wav").play(maxtime=1000) cv2.rectangle(img, button.pos, (x + w, y + h), (0, 255, 0), cv2.FILLED) cv2.putText(img, button.text, (x + 20, y + 65), cv2.FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 4) sleep(0.2) cv2.imshow("Image", img) cv2.waitKey(1)
hands, img = detector.findHands(img) if hands: lmList = hands[0]['lmList'] x1, y1 = lmList[4][0], lmList[4][1] x2, y2 = lmList[8][0], lmList[8][1] cx, cy = (x1+x2)//2 , (y1+y2)//2 cv2.circle(img,(x1,y1),10,(0,255,0),cv2.FILLED) cv2.circle(img, (x2, y2), 10, (0, 255, 0), cv2.FILLED) cv2.circle(img, (cx, cy), 10, (0, 255, 0), cv2.FILLED) cv2.line(img, (x1,y1),(x2,y2),(0,255,0),3) length, info = detector.findDistance((x1,y1),(x2,y2)) #print(length) #Convert volume into hand range vol = np.interp(length,[10,130],[minVol,maxVol]) volBar = np.interp(length,[10,130],[300,100]) volPer = np.interp(length,[10,130],[0,100]) volume.SetMasterVolumeLevel(vol, None) if length < 25: cv2.circle(img, (cx, cy), 10, (255, 0, 255), cv2.FILLED) cv2.rectangle(img, (100,100), (80,300), (0,255,0),3) cv2.rectangle(img, (100, int(volBar)), (80, 300), (0,255, 0), cv2.FILLED) cv2.putText(img, f'{int(volPer)} %', (70, 350), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 3)
while True: success, img = cap.read() img = detector.findHands(img) lmList, bboxInfo = detector.findPosition(img) img = drawAll(img, buttonList) if lmList: for button in buttonList: x, y = button.pos w, h = button.size if x < lmList[8][0] < x + w and y < lmList[8][1] < y + h: cv2.rectangle(img, (x - 5, y - 5), (x + w + 5, y + h + 5), (175, 0, 175), cv2.FILLED) cv2.putText(img, button.text, (x + 20, y + 65), cv2.FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 4) l, _, _ = detector.findDistance(8, 12, img, draw=False) print(l) ## when clicked if l < 30: keyboard.press(button.text) cv2.rectangle(img, button.pos, (x + w, y + h), (0, 255, 0), cv2.FILLED) cv2.putText(img, button.text, (x + 20, y + 65), cv2.FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 4) finalText += button.text sleep(0.15) cv2.rectangle(img, (50, 350), (700, 450), (175, 0, 175), cv2.FILLED) cv2.putText(img, finalText, (60, 430), cv2.FONT_HERSHEY_PLAIN, 5, (255, 255, 255), 5)
mcq.choice3, [60, 300], 1, 1, offset=20, border=3) img, bbox4 = cvzone.putTextRect(img, mcq.choice4, [230, 300], 1, 1, offset=20, border=3) if hands: lmList = hands[0]['lmList'] cursor = lmList[8] length, info = detector.findDistance(lmList[8], lmList[12]) #print((length)) if length < 15: mcq.update(cursor, [bbox1, bbox2, bbox3, bbox4]) print(mcq.userAnswer) if mcq.userAnswer is not None: time.sleep(0.6) qNo += 1 else: score = 0 for mcq in mcqlist: if mcq.answer == mcq.userAnswer:
img = cv2.flip(img, 1) hands, img = detector.findHands(img) # Draw All cv2.rectangle(img, (800, 70), (800 + 400, 70 + 100), (225, 225, 225), cv2.FILLED) cv2.rectangle(img, (800, 70), (800 + 400, 70 + 100), (50, 50, 50), 3) for button in buttonList: button.draw(img) # Check for Hand if hands: # Find distance between fingers lmList = hands[0]['lmList'] length, _, img = detector.findDistance(lmList[8], lmList[12], img) print(length) x, y = lmList[8] # If clicked check which button and perform action if length < 50 and delayCounter == 0: for i, button in enumerate(buttonList): if button.checkClick(x, y): myValue = buttonListValues[int(i % 4)][int( i / 4)] # get correct number if myValue == '=': myEquation = str(eval(myEquation)) else: myEquation += myValue delayCounter = 1
detector = HandDetector(detectionCon=0.5, maxHands=1) while True: # Get image frame success, img = cap.read() # Find the hand and its landmarks img = detector.findHands(img) lmList, bboxInfo = detector.findPosition(img) if lmList: bbox = bboxInfo['bbox'] # Find how many fingers are up fingers = detector.fingersUp() totalFingers = fingers.count(1) cv2.putText(img, f'Fingers:{totalFingers}', (bbox[0] + 200, bbox[1] - 30), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 2) # Find Distance Between Two Fingers distance, img, info = detector.findDistance(8, 12, img) cv2.putText(img, f'Dist:{int(distance)}', (bbox[0] + 400, bbox[1] - 30), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 2) # Find Hand Type myHandType = detector.handType() cv2.putText(img, f'Hand:{myHandType}', (bbox[0], bbox[1] - 30), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 2) # Display cv2.imshow("Image", img) cv2.waitKey(1)
import cvzone from cvzone.HandTrackingModule import HandDetector import cv2 import pyautogui cap = cv2.VideoCapture(0) cap.set(3, 1500) cap.set(4, 900) detector = HandDetector(detectionCon=0.6, maxHands=1, minTrackCon=0.4) while True: success, img = cap.read() img = cv2.flip(img, 1) hands, img = detector.findHands(img) if hands: hand = hands[0] lmList1 = hand["lmList"] #print(lmList1) x = lmList1[8][0] y = lmList1[8][1] pyautogui.moveTo(x, y) length_single_click, info, img = detector.findDistance( lmList1[8], lmList1[12], img) length_double_click, info, img = detector.findDistance( lmList1[12], lmList1[16], img) if length_single_click < 40: pyautogui.click(button='left') if length_double_click + length_single_click < 90: pyautogui.click(clicks=2, interval=0, button=pyautogui.PRIMARY) cv2.imshow("Image", img) cv2.waitKey(1)
if hands: lmList = hands[0]['lmList'] cursor = lmList[8] w, h = listImg[0].size # resimlerin yerlerini kordinatlarını alıyoruz xr, yr = listImg[0].posOrigin xg, yg = listImg[1].posOrigin xb, yb = listImg[2].posOrigin xrs, yrs = listImg[3].posOrigin #curcor bu aralığa girip girmediğini kontrol ediyoruz if xr < cursor[0] < xr + w and yr < cursor[1] < yr + h: length, info, img = detector.findDistance(lmList[8], lmList[4], img, 5, (0, 0, 255)) length1 = int((length / 1000) * 500) - 15 if xg < cursor[0] < xg + w and yg < cursor[1] < yg + h: length, info, img = detector.findDistance(lmList[8], lmList[4], img, 5, (0, 255, 0)) length2 = int((length / 1000) * 500) - 15 if xb < cursor[0] < xb + w and yb < cursor[1] < yb + h: length, info, img = detector.findDistance(lmList[8], lmList[4], img, 5, (255, 0, 0)) length3 = int((length / 1000) * 500) - 15 if xrs < cursor[0] < xrs + w and yrs < cursor[1] < yrs + h: length1 = 0 length2 = 0
success, img = cap.read() hands, img = detector.findHands(img) img1 = cv2.imread("cvarduino.jpg") if len(hands) == 2: # print(detector.fingersUp(hands[0]), detector.fingersUp(hands[1])) if detector.fingersUp(hands[0]) == [1, 1, 0, 0, 0] and \ detector.fingersUp(hands[1]) == [1, 1, 0, 0, 0]: # print("Zoom Gesture") lmList1 = hands[0]["lmList"] lmList2 = hands[1]["lmList"] # point 8 is the tip of the index finger if startDist is None: #length, info, img = detector.findDistance(lmList1[8], lmList2[8], img) length, info, img = detector.findDistance(hands[0]["center"], hands[1]["center"], img) startDist = length #length, info, img = detector.findDistance(lmList1[8], lmList2[8], img) length, info, img = detector.findDistance(hands[0]["center"], hands[1]["center"], img) scale = int((length - startDist) // 2) cx, cy = info[4:] print(scale) else: startDist = None try: h1, w1, _= img1.shape newH, newW = ((h1+scale)//2)*2, ((w1+scale)//2)*2 img1 = cv2.resize(img1, (newW,newH))