Esempio n. 1
0
dH, dW = 480, 380


def resizeimg(image):
    h, w = image.shape[:2]
    if h < w:
        img = cv2.resize(image,
                         (dW, math.floor(h / (w / dW))))  #有小數點無條件捨去,負數就進位
    else:
        img = cv2.resize(image, (math.floor(w / (h / dH)), dH))
    return img


img = resizeimg(cv2.imread('/content/drive/MyDrive/lcc/fcu/hand1.jpg'))
detector = HandDetector(mode=False, maxHands=2)  #動態追蹤True
img1 = detector.findHands(img)
cv2_imshow(img)

myHandType = detector.handType
print(myHandType)

ImList, bboxInfo = detector.findPosition(img)
print(ImList)
print(bboxInfo)

from cvzone.PoseModule import PoseDetector

img = resizeimg(cv2.imread('/content/drive/MyDrive/lcc/fcu/pose1.jpg'))
pose = PoseDetector()
img = pose.findPose(img)
cv2_imshow(img)
Esempio n. 2
0
vol = 0
volBar = 300

#print(volume.GetVolumeRange())
minVol = volRange[0]
maxVol = volRange[1]

prevTime = 0
volPer = 0
while True:
    success, img = cap.read()




    hands, img = detector.findHands(img)
    if hands:
        lmList = hands[0]['lmList']

        x1, y1 = lmList[4][0], lmList[4][1]

        x2, y2 = lmList[8][0], lmList[8][1]
        cx, cy = (x1+x2)//2 , (y1+y2)//2

        cv2.circle(img,(x1,y1),10,(0,255,0),cv2.FILLED)
        cv2.circle(img, (x2, y2), 10, (0, 255, 0), cv2.FILLED)
        cv2.circle(img, (cx, cy), 10, (0, 255, 0), cv2.FILLED)

        cv2.line(img, (x1,y1),(x2,y2),(0,255,0),3)
        length, info = detector.findDistance((x1,y1),(x2,y2))
        #print(length)
Esempio n. 3
0
    dataAll = list(reader)[1:]
#print(len(dataAll))
#Create Object for each mcq
mcqlist = []
for q in dataAll:
    mcqlist.append(MCQ(q))

#print(len(mcqlist))

qNo = 0
qTotal = len(dataAll)

while True:
    success, img = cap.read()
    img = cv2.flip(img, 1)
    hands, img = detector.findHands(img, flipType=False)

    if qNo < qTotal:

        mcq = mcqlist[qNo]

        img, bbox = cvzone.putTextRect(img,
                                       mcq.question, [60, 100],
                                       1,
                                       1,
                                       offset=20,
                                       border=3)
        img, bbox1 = cvzone.putTextRect(img,
                                        mcq.choice1, [60, 200],
                                        1,
                                        1,
Esempio n. 4
0
annotationStart = False
hs, ws = int(120 * 1), int(213 * 1)  # width and height of small image

# Get list of presentation images
pathImages = sorted(os.listdir(folderPath), key=len)
print(pathImages)

while True:
    # Get image frame
    success, img = cap.read()
    img = cv2.flip(img, 1)
    pathFullImage = os.path.join(folderPath, pathImages[imgNumber])
    imgCurrent = cv2.imread(pathFullImage)

    # Find the hand and its landmarks
    hands, img = detectorHand.findHands(img)  # with draw
    # Draw Gesture Threshold line
    cv2.line(img, (0, gestureThreshold), (width, gestureThreshold),
             (0, 255, 0), 10)

    if hands and buttonPressed is False:  # If hand is detected

        hand = hands[0]
        cx, cy = hand["center"]
        lmList = hand["lmList"]  # List of 21 Landmark points
        fingers = detectorHand.fingersUp(hand)  # List of which fingers are up

        # Constrain values for easier drawing
        xVal = int(np.interp(lmList[8][0], [width // 2, width], [0, width]))
        yVal = int(np.interp(lmList[8][1], [150, height - 150], [0, height]))
        indexFinger = xVal, yVal
Esempio n. 5
0
import cvzone

cap = cv2.VideoCapture(0)

detector = HandDetector(detectionCon=0.8, maxHands=1)
# function
x = [300, 245, 200, 170, 100, 257, 80, 112, 145, 130, 103, 93, 80, 75, 70, 60]
y = [20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95]

coff = np.polyfit(x, y, 2)

while True:
    success, img = cap.read()
    # print(img.shape)

    hands = detector.findHands(img, draw=False)

    if hands:
        lmList = hands[0]['lmList']

        x1, y1 = lmList[5]
        x2, y2 = lmList[17]
        x, y, w, h = hands[0]['bbox']
        distance = np.sqrt(((y2 - y1) ** 2 + (x2 - x1) ** 2))
        A, B, C = coff
        distanceCm = A * (distance ** 2) + B * distance + C
        # print(distanceCm)
        cvzone.putTextRect(img, f'{int(distanceCm)} cm', (x + 5, y - 50))
        cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 255), 3)
        if distanceCm <= 70:
            cv2.putText(img, "Hand Detected", (50, 80), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 5)
Esempio n. 6
0
Eli modül yardımı ile algılma
cvzone modülü 1.5 sürümü ve üstünde findPosition kaldırıldı
"""
import cv2
from cvzone.HandTrackingModule import HandDetector

cap = cv2.VideoCapture(0)  #kamera ID numarası
detector = HandDetector(detectionCon=0.5,
                        maxHands=2)  #max el  sayısı ve el alglama hassasiyeti
I = []  #parmakların konumunu tutmak için

while True:
    _, img = cap.read()  #kamera okuma
    img = cv2.flip(img, 1)  #resmi aynalama
    img = cv2.resize(img, (1280, 640))  #ekran boyutlarını ayarlama
    img = detector.findHands(img)  #elleri algılama
    I, box = detector.findPosition(
        img)  #parmakları algılama (20 nokta algılama)

    if I:
        #x ve y  konumlarını alma ilk parametre nokta ikincisi x veya y demek
        f = detector.fingersUp()
        x1 = I[4][0]
        y1 = I[4][1]
        x2 = I[8][0]
        y2 = I[8][1]

        #belirtilen konumları cembere alma ve cizgi cekme
        cv2.circle(img, (x1, y1), 7, (0, 255, 255), 2)
        cv2.circle(img, (x2, y2), 7, (0, 255, 255), 2)
        cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), 2)