Esempio n. 1
0
import cv2
from cvzone.HandTrackingModule import HandDetector
import cvzone
import numpy as np

cap = cv2.VideoCapture(2)
cap.set(3, 1280)
cap.set(4, 720)
detector = HandDetector(detectionCon=0.8)
colorR = (255, 0, 255)

cx, cy, w, h = 100, 100, 200, 200


class DragRect():
    def __init__(self, posCenter, size=[200, 200]):
        self.posCenter = posCenter
        self.size = size

    def update(self, cursor):
        cx, cy = self.posCenter
        w, h = self.size

        # If the index finger tip is in the rectangle region
        if cx - w // 2 < cursor[0] < cx + w // 2 and \
                cy - h // 2 < cursor[1] < cy + h // 2:
            self.posCenter = cursor


rectList = []
for x in range(5):
Esempio n. 2
0
# for right only

import cv2
import numpy as np
import mediapipe as mp
import time
from cvzone.HandTrackingModule import HandDetector

detector = HandDetector(0.8)
prevTime = 0

cap = cv2.VideoCapture(0)
tipIds = [8, 12, 16]

while True:
    success, img = cap.read()
    hands, img = detector.findHands(img)

    if hands:
        lmList = hands[0]['lmList']
        fingers = []

        q1 = lmList[20][1]
        q2 = lmList[18][1]
        if q1 < q2:
            fingers.append(1)
        else:
            fingers.append(0)

        s1 = lmList[4][0]
        s2 = lmList[3][0]
Esempio n. 3
0
import cv2
import os
import numpy as np

# Parameters
width, height = 1280, 720
gestureThreshold = 300
folderPath = "Presentation"

# Camera Setup
cap = cv2.VideoCapture(0)
cap.set(3, width)
cap.set(4, height)

# Hand Detector
detectorHand = HandDetector(detectionCon=0.8, maxHands=1)

# Variables
imgList = []
delay = 30
buttonPressed = False
counter = 0
drawMode = False
imgNumber = 0
delayCounter = 0
annotations = [[]]
annotationNumber = -1
annotationStart = False
hs, ws = int(120 * 1), int(213 * 1)  # width and height of small image

# Get list of presentation images
# pip install cvzone
# pip install opencv-contrib-python

from cvzone.HandTrackingModule import HandDetector
import cv2
cap = cv2.VideoCapture(1)
detector = HandDetector(detectionCon=0.5, maxHands=2)
while True:
    success, img = cap.read()
    img = cv2.flip(img, 1)
    img = detector.findHands(img)
    lmList, bbox = detector.findPosition(img, draw=False)
    if lmList:
        myHandType = detector.handType()
        cv2.putText(img, myHandType, (50, 50), cv2.FONT_HERSHEY_PLAIN, 2,
                    (0, 255, 0), 2)
    cv2.imshow("Image", img)
    cv2.waitKey(1)
Esempio n. 5
0
import cv2
import numpy as np
from cvzone.HandTrackingModule import HandDetector
import cvzone

cap = cv2.VideoCapture(0)

detector = HandDetector(detectionCon=0.8, maxHands=1)
# function
x = [300, 245, 200, 170, 100, 257, 80, 112, 145, 130, 103, 93, 80, 75, 70, 60]
y = [20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95]

coff = np.polyfit(x, y, 2)

while True:
    success, img = cap.read()
    # print(img.shape)

    hands = detector.findHands(img, draw=False)

    if hands:
        lmList = hands[0]['lmList']

        x1, y1 = lmList[5]
        x2, y2 = lmList[17]
        x, y, w, h = hands[0]['bbox']
        distance = np.sqrt(((y2 - y1) ** 2 + (x2 - x1) ** 2))
        A, B, C = coff
        distanceCm = A * (distance ** 2) + B * distance + C
        # print(distanceCm)
        cvzone.putTextRect(img, f'{int(distanceCm)} cm', (x + 5, y - 50))
Esempio n. 6
0
import cv2
import numpy as np
import time
import mediapipe as mp
import pycaw
from cvzone.HandTrackingModule import HandDetector
from ctypes import cast, POINTER
from comtypes import CLSCTX_ALL
from pycaw.pycaw import AudioUtilities, IAudioEndpointVolume


cap = cv2.VideoCapture(0)
detector = HandDetector(0.8)

devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(
    IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
#volume.GetMute()
#volume.GetMasterVolumeLevel()
volRange = volume.GetVolumeRange()
vol = 0
volBar = 300

#print(volume.GetVolumeRange())
minVol = volRange[0]
maxVol = volRange[1]

prevTime = 0
volPer = 0
while True:
Esempio n. 7
0
"""

import cv2
from cvzone.HandTrackingModule import HandDetector
import cvzone
import os
import serial

sericom = serial.Serial("COM10", 9600)
sericom.timeout = 1
cap = cv2.VideoCapture(0)  #kamera ıd belirtilir
cap.set(3, 1280)  #ıd ve kenar uzunluğu
cap.set(4, 720)

detector = HandDetector(
    detectionCon=0.8,
    maxHands=1)  #el algılama hasssasiyeti ve algılayacagı en fazla el sayısı


class DragImg():
    def __init__(self, path, posOrigin, imgType):

        self.posOrigin = posOrigin
        self.imgType = imgType
        self.path = path

        if self.imgType == 'png':  #png ve jpg ayrımı
            self.img = cv2.imread(self.path, cv2.IMREAD_UNCHANGED)
        else:
            self.img = cv2.imread(self.path)
Esempio n. 8
0
                    ['1', '2', '3', '+'], ['0', '/', '.', '=']]
buttonList = []
for x in range(4):
    for y in range(4):
        xpos = x * 100 + 800
        ypos = y * 100 + 150

        buttonList.append(
            Button((xpos, ypos), 100, 100, buttonListValues[y][x]))

# Variables
myEquation = ''
delayCounter = 0
# Webcam
cap = cv2.VideoCapture(0)
detector = HandDetector(detectionCon=0.8, maxHands=1)

while True:
    # Get image frame
    success, img = cap.read()
    img = cv2.flip(img, 1)
    hands, img = detector.findHands(img)

    # Draw All
    cv2.rectangle(img, (800, 70), (800 + 400, 70 + 100), (225, 225, 225),
                  cv2.FILLED)

    cv2.rectangle(img, (800, 70), (800 + 400, 70 + 100), (50, 50, 50), 3)
    for button in buttonList:
        button.draw(img)
Esempio n. 9
0
from cvzone.HandTrackingModule import HandDetector
import cv2
import socket

cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 720)
success, img = cap.read()
h, w, _ = img.shape
detector = HandDetector(detectionCon=0.8, maxHands=2)

sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
serverAddressPort = ("127.0.0.1", 5052)

while True:
    # Get image frame
    success, img = cap.read()
    # Find the hand and its landmarks
    hands, img = detector.findHands(img)  # with draw
    # hands = detector.findHands(img, draw=False)  # without draw
    data = []

    if hands:
        # Hand 1
        hand = hands[0]
        lmList = hand["lmList"]  # List of 21 Landmark points
        for lm in lmList:
            data.extend([lm[0], h - lm[1], lm[2]])

        sock.sendto(str.encode(str(data)), serverAddressPort)
Esempio n. 10
0
"""
Eli modül yardımı ile algılma
cvzone modülü 1.5 sürümü ve üstünde findPosition kaldırıldı
"""
import cv2
from cvzone.HandTrackingModule import HandDetector

cap = cv2.VideoCapture(0)  #kamera ID numarası
detector = HandDetector(detectionCon=0.5,
                        maxHands=2)  #max el  sayısı ve el alglama hassasiyeti
I = []  #parmakların konumunu tutmak için

while True:
    _, img = cap.read()  #kamera okuma
    img = cv2.flip(img, 1)  #resmi aynalama
    img = cv2.resize(img, (1280, 640))  #ekran boyutlarını ayarlama
    img = detector.findHands(img)  #elleri algılama
    I, box = detector.findPosition(
        img)  #parmakları algılama (20 nokta algılama)

    if I:
        #x ve y  konumlarını alma ilk parametre nokta ikincisi x veya y demek
        f = detector.fingersUp()
        x1 = I[4][0]
        y1 = I[4][1]
        x2 = I[8][0]
        y2 = I[8][1]

        #belirtilen konumları cembere alma ve cizgi cekme
        cv2.circle(img, (x1, y1), 7, (0, 255, 255), 2)
        cv2.circle(img, (x2, y2), 7, (0, 255, 255), 2)
Esempio n. 11
0
from cvzone.HandTrackingModule import HandDetector
import cv2

cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 720)
detector = HandDetector(detectionCon=0.5, maxHands=1)

while True:
    # Get image frame
    success, img = cap.read()

    # Find the hand and its landmarks
    img = detector.findHands(img)
    lmList, bboxInfo = detector.findPosition(img)
    if lmList:
        bbox = bboxInfo['bbox']
        # Find how many fingers are up
        fingers = detector.fingersUp()
        totalFingers = fingers.count(1)
        cv2.putText(img, f'Fingers:{totalFingers}', (bbox[0] + 200, bbox[1] - 30),
                    cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 2)

        # Find Distance Between Two Fingers
        distance, img, info = detector.findDistance(8, 12, img)
        cv2.putText(img, f'Dist:{int(distance)}', (bbox[0] + 400, bbox[1] - 30),
                    cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 2)

        # Find Hand Type
        myHandType = detector.handType()
        cv2.putText(img, f'Hand:{myHandType}', (bbox[0], bbox[1] - 30),
Esempio n. 12
0
import cvzone
from cvzone.HandTrackingModule import HandDetector
import cv2
import pyautogui
cap = cv2.VideoCapture(0)
cap.set(3, 1500)
cap.set(4, 900)
detector = HandDetector(detectionCon=0.6, maxHands=1, minTrackCon=0.4)

while True:
    success, img = cap.read()
    img = cv2.flip(img, 1)
    hands, img = detector.findHands(img)
    if hands:
        hand = hands[0]
        lmList1 = hand["lmList"]
        #print(lmList1)
        x = lmList1[8][0]
        y = lmList1[8][1]
        pyautogui.moveTo(x, y)
        length_single_click, info, img = detector.findDistance(
            lmList1[8], lmList1[12], img)
        length_double_click, info, img = detector.findDistance(
            lmList1[12], lmList1[16], img)
        if length_single_click < 40:
            pyautogui.click(button='left')
        if length_double_click + length_single_click < 90:
            pyautogui.click(clicks=2, interval=0, button=pyautogui.PRIMARY)

    cv2.imshow("Image", img)
    cv2.waitKey(1)
Esempio n. 13
0
import math
import random
import cvzone
import cv2
import numpy as np
from cvzone.HandTrackingModule import HandDetector

cap = cv2.VideoCapture(2)
cap.set(3, 1280)
cap.set(4, 720)

detector = HandDetector(detectionCon=0.8, maxHands=1)


class SnakeGameClass:
    def __init__(self, pathFood):
        self.points = []  # all points of the snake
        self.lengths = []  # distance between each point
        self.currentLength = 0  # total length of the snake
        self.allowedLength = 150  # total allowed Length
        self.previousHead = 0, 0  # previous head point

        self.imgFood = cv2.imread(pathFood, cv2.IMREAD_UNCHANGED)
        self.hFood, self.wFood, _ = self.imgFood.shape
        self.foodPoint = 0, 0
        self.randomFoodLocation()

        self.score = 0
        self.gameOver = False

    def randomFoodLocation(self):
# pip install cvzone
# pip install opencv-contrib-python

from cvzone.HandTrackingModule import HandDetector
import cv2
cap = cv2.VideoCapture(1)
detector = HandDetector(detectionCon=0.5, maxHands=2)
while True:
    success, img = cap.read()
    img = cv2.flip(img, 1)
    img = detector.findHands(img)
    cv2.imshow("Image", img)
    cv2.waitKey(1)
Esempio n. 15
0
import cv2
import csv
from cvzone.HandTrackingModule import HandDetector
import cvzone
import time

widthFrame = 1280
heightFrame = 720
cap = cv2.VideoCapture(0)
cap.set(3, widthFrame)
cap.set(4, heightFrame)
detector = HandDetector(detectionCon=0.8)


class MCQ():
    def __init__(self, data):
        self.question = data[0]
        self.choice1 = data[1]
        self.choice2 = data[2]
        self.choice3 = data[3]
        self.choice4 = data[4]
        self.answer = int(data[5])

        self.userAnswer = None

    def update(self, cursor, bboxs):

        for x, bbox in enumerate(bboxs):

            x1, y1, x2, y2 = bbox
            if x1 < cursor[0] < x2 and y1 < cursor[1] < y2:
Esempio n. 16
0
import cvzone
from cvzone.HandTrackingModule import HandDetector
import cv2

cap = cv2.VideoCapture(0)
detector = HandDetector()

while True:
    # Get image frame
    success, img = cap.read()

    # Find the hand and its landmarks
    img = detector.findHands(img, draw=False)
    lmList, bbox = detector.findPosition(img, draw=False)
    if bbox:
        # Draw  Corner Rectangle
        cvzone.cornerRect(img, bbox)

    # Display
    cv2.imshow("Image", img)
    cv2.waitKey(1)
Esempio n. 17
0
import asyncio

import cv2
from cvzone.HandTrackingModule import HandDetector
from time import sleep
import numpy as np
import cvzone
import pygame

pygame.mixer.init()

cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 720)

detector = HandDetector(detectionCon=0.8)
keys = [["C", "D", "E", "F", "G", "A", "B"]]


def drawAll(img, buttonList):
    for button in buttonList:
        x, y = button.pos
        w, h = button.size
        cvzone.cornerRect(
            img,
            (button.pos[0], button.pos[1], button.size[0], button.size[1]),
            20,
            rt=0)
        cv2.rectangle(img, button.pos, (x + w, y + h), (255, 0, 255),
                      cv2.FILLED)
        cv2.putText(img, button.text, (x + 20, y + 65), cv2.FONT_HERSHEY_PLAIN,
Esempio n. 18
0
import cv2
from cvzone.HandTrackingModule import HandDetector
from time import sleep
import numpy as np
import cvzone
from pynput.keyboard import Controller

cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 720)

detector = HandDetector(detectionCon=0.8)
keys = [["Q", "W", "E", "R", "T", "Y", "U", "I", "O", "P"],
        ["A", "S", "D", "F", "G", "H", "J", "K", "L", ";"],
        ["Z", "X", "C", "V", "B", "N", "M", ",", ".", "/"]]
finalText = ""

keyboard = Controller()


def drawAll(img, buttonList):
    for button in buttonList:
        x, y = button.pos
        w, h = button.size
        cvzone.cornerRect(img, (button.pos[0], button.pos[1], button.size[0], button.size[1]),
                          20, rt=0)
        cv2.rectangle(img, button.pos, (x + w, y + h), (255, 0, 255), cv2.FILLED)
        cv2.putText(img, button.text, (x + 20, y + 65),
                    cv2.FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 4)
    return img
Esempio n. 19
0
dH, dW = 480, 380


def resizeimg(image):
    h, w = image.shape[:2]
    if h < w:
        img = cv2.resize(image,
                         (dW, math.floor(h / (w / dW))))  #有小數點無條件捨去,負數就進位
    else:
        img = cv2.resize(image, (math.floor(w / (h / dH)), dH))
    return img


img = resizeimg(cv2.imread('/content/drive/MyDrive/lcc/fcu/hand1.jpg'))
detector = HandDetector(mode=False, maxHands=2)  #動態追蹤True
img1 = detector.findHands(img)
cv2_imshow(img)

myHandType = detector.handType
print(myHandType)

ImList, bboxInfo = detector.findPosition(img)
print(ImList)
print(bboxInfo)

from cvzone.PoseModule import PoseDetector

img = resizeimg(cv2.imread('/content/drive/MyDrive/lcc/fcu/pose1.jpg'))
pose = PoseDetector()
img = pose.findPose(img)
Esempio n. 20
0
import cv2
from cvzone.HandTrackingModule import HandDetector

cap = cv2.VideoCapture(1)
cap.set(3, 1280)
cap.set(4, 720)

detector = HandDetector(detectionCon=0.7)
startDist = None
scale = 0
cx, cy = 500,500
while True:
    success, img = cap.read()
    hands, img = detector.findHands(img)
    img1 = cv2.imread("cvarduino.jpg")

    if len(hands) == 2:
        # print(detector.fingersUp(hands&#91;0]), detector.fingersUp(hands&#91;1]))
        if detector.fingersUp(hands&#91;0]) == &#91;1, 1, 0, 0, 0] and \
                detector.fingersUp(hands&#91;1]) == &#91;1, 1, 0, 0, 0]:
            # print("Zoom Gesture")
            lmList1 = hands&#91;0]&#91;"lmList"]
            lmList2 = hands&#91;1]&#91;"lmList"]
            # point 8 is the tip of the index finger
            if startDist is None:
                #length, info, img = detector.findDistance(lmList1&#91;8], lmList2&#91;8], img)
                length, info, img = detector.findDistance(hands&#91;0]&#91;"center"], hands&#91;1]&#91;"center"], img)

                startDist = length

            #length, info, img = detector.findDistance(lmList1&#91;8], lmList2&#91;8], img)