Exemple #1
0
def init_yolo():
    global yolo
    config = "models/cross-hands.cfg"
    weights = "models/cross-hands.weights"
    if not (os.path.isfile(config) and os.path.isfile(weights)):
        subprocess.call("./models/download-some-models.sh", shell=True)
    yolo = YOLO(config, weights, ["hand"])
    yolo.size = int(256)
    yolo.confidence = float(0.3)
    yolo = YOLO("models/cross-hands.cfg", "models/cross-hands.weights",
                ["hand"])

while True:
    timecounter = 0
    starter = 0
    ix = 0
    iy = 0
    ixx = 0
    iyy = 0
    t = 0
    arrows = cv2.imread('images/sofa.jpeg')
    sofa = cv2.imread('images/sofa.jpeg')

    yolo.size = int(args.size)
    yolo.confidence = float(args.confidence)

    print("starting webcam...")
    cv2.namedWindow("preview")
    # cv2.setWindowProperty ("preview", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)

    try:
        vc = cv2.VideoCapture(0)
        if vc.isOpened():  # try to get the first frame
            rval, frame = vc.read()

        else:
            rval = False

        while rval:
            while frame.shape[0] < arrows.shape[0] or frame.shape[
Exemple #3
0
def scrollV(cap, network, device, size, confidence):
    if network == "normal":
        print("loading yolo...")
        yolo = YOLO("models/cross-hands.cfg", "models/cross-hands.weights",
                    ["hand"])
    elif network == "prn":
        print("loading yolo-tiny-prn...")
        yolo = YOLO("models/cross-hands-tiny-prn.cfg",
                    "models/cross-hands-tiny-prn.weights", ["hand"])
    else:
        print("loading yolo-tiny...")
        yolo = YOLO("models/cross-hands-tiny.cfg",
                    "models/cross-hands-tiny.weights", ["hand"])

    yolo.size = size
    yolo.confidence = confidence

    cnt = 0
    curr = 0
    prev = 0
    exit = 0

    rval, frame = cap.read()

    while True:
        width, height, inference_time, results = yolo.inference(frame)

        if len(results) == 1:
            exit = 0
            cnt += 1

            id, name, confidence, x, y, w, h = results[0]
            cx = x + (w // 2)
            cy = y + (h // 2)

            if cnt <= 5:
                curr = cy

            color = (0, 255, 255)
            cv2.circle(frame, (cx, cy), 10, color, -1)
            #print("Cy: ", cy)

            if cnt % 10 == 0 and cnt > 5:
                prev = curr
                curr = cy
                #print("Prev: ",prev)
                #print("Curr: ", curr)
                clicks = prev - curr
                #print(clicks)
                #if clicks>30 and clicks<170:
                clicks = clicks // 2

                if abs(clicks) > 10:
                    pyautogui.scroll(clicks)

        else:
            exit += 1
            if exit > 50:
                print(exit)
                break

        cv2.imshow("preview", frame)
        rval, frame = cap.read()

        key = cv2.waitKey(1)
        if key == 27:  # exit on ESC
            break

    cv2.destroyWindow("preview")
import argparse
import cv2

from yolo import YOLO

yolo = YOLO("models/cross-hands.cfg", "models/cross-hands.weights", ["hand"])
yolo.size = 416
yolo.confidence = 0.2

print("Iniciando webcam...")
cv2.namedWindow("Camera")
vc = cv2.VideoCapture(0)

if vc.isOpened():
    rval, frame = vc.read()
else:
    rval = False

while rval:
    width, height, inference_time, results = yolo.inference(frame)
    for detection in results:
        id, name, confidence, x, y, w, h = detection
        cx = x + (w / 2)
        cy = y + (h / 2)

        color = (0, 255, 255)
        cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
        text = "%s (%s)" % (name, round(confidence, 2))
        cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
                    0.5, color, 2)
Exemple #5
0
if network == "normal":
    print("loading yolo...")
    yolo = YOLO("models/cross-hands.cfg", "models/cross-hands.weights", ["hand"])
elif network == "prn":
    print("loading yolo-tiny-prn...")
    yolo = YOLO("models/cross-hands-tiny-prn.cfg", "models/cross-hands-tiny-prn.weights", ["hand"])
elif network == "v4-tiny":
    print("loading yolov4-tiny-prn...")
    yolo = YOLO("models/cross-hands-yolov4-tiny.cfg", "models/cross-hands-yolov4-tiny.weights", ["hand"])
else:
    print("loading yolo-tiny...")
    yolo = YOLO("models/cross-hands-tiny.cfg", "models/cross-hands-tiny.weights", ["hand"])

yolo.size = int(size)
yolo.confidence = float(confidence)


@app.route("/hand_detection")
def hand_detection():
    image_path = request.args.get("image_path")
    print(image_path)
    mat = cv2.imread(image_path)

    width, height, inference_time, results = yolo.inference(mat)

    print("%s seconds: %s classes found!" % (round(inference_time, 2), len(results)))

    if len(results) < 1:
        return json.dumps({"nums_of_hand": 0})
Exemple #6
0
# Khatna Bold
# Adapted from: https://github.com/cansik/yolo-hand-detection/tree/master/
import argparse
import cv2
import numpy as np
from yolo import YOLO
from PIL import Image
import utils

yolo = YOLO("models/cross-hands-tiny-prn.cfg",
            "models/cross-hands-tiny-prn.weights", ["hand"])
yolo.size = int(512)
yolo.confidence = float(0.2)

# main Loop
print("starting webcam...")
vc = cv2.VideoCapture(0)
detecting = True

code = ''
text = '---'
correct_count = 0
while True:
    _, frame = vc.read()
    frame = np.flip(frame, axis=1)
    H, W, _ = frame.shape
    ref = int(min(H, W) / 3)

    window = None
    if detecting:
        width, height, inference_time, results = yolo.inference(frame)
Exemple #7
0
def reading_video(filename):
    ap = argparse.ArgumentParser()
    ap.add_argument('-n',
                    '--network',
                    default="normal",
                    help='Network Type: normal / tiny / prn / v4-tiny')
    ap.add_argument('-d', '--device', default=0, help='Device to use')
    ap.add_argument('-v',
                    '--videos',
                    default="videos",
                    help='Path to videos or video file')
    ap.add_argument('-s', '--size', default=416, help='Size for yolo')
    ap.add_argument('-c',
                    '--confidence',
                    default=0.2,
                    help='Confidence for yolo')
    ap.add_argument("-f",
                    "--fff",
                    help="a dummy argument to fool ipython",
                    default="1")
    args = ap.parse_args()
    if args.network == "normal":
        print("loading yolo...")
        yolo = YOLO("models/cross-hands.cfg", "models/cross-hands.weights",
                    ["hand"])
    elif args.network == "prn":
        print("loading yolo-tiny-prn...")
        yolo = YOLO("models/cross-hands-tiny-prn.cfg",
                    "models/cross-hands-tiny-prn.weights", ["hand"])
    elif args.network == "v4-tiny":
        print("loading yolov4-tiny-prn...")
        yolo = YOLO("models/cross-hands-yolov4-tiny.cfg",
                    "models/cross-hands-yolov4-tiny.weights", ["hand"])
    else:
        print("loading yolo-tiny...")
        yolo = YOLO("models/cross-hands-tiny.cfg",
                    "models/cross-hands-tiny.weights", ["hand"])

    yolo.size = int(args.size)
    yolo.confidence = float(args.confidence)

    # opening a window called preview
    cv2.namedWindow("preview")
    # to open and capture frames from video
    vc = cv2.VideoCapture(filename)
    if vc.isOpened():  # try to get the first frame
        rval, frame = vc.read()
        # to get the first frame

    else:
        # some error causes the video to not open
        rval = False
    while (vc.isOpened()):
        # Applying YOLO on the frames
        width, height, inference_time, results = yolo.inference(frame)
        for detection in results:
            id, name, confidence, x, y, w, h = detection
            cx = x + (w / 2)
            cy = y + (h / 2)
            # draw a bounding box rectangle and label on the image
            color = (0, 255, 255)
            cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
            text = "%s (%s)" % (name, round(confidence, 2))
            # put a text on the detected hand with the confidence ratio
            cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                        color, 2)
            cv2.imshow("preview", frame)
            rval, frame = vc.read()
            # to close the window we need to click on the ESC button
            key = cv2.waitKey(20)
            if key == 27:  # exit on ESC
                break
    cv2.destroyWindow("preview")
    vc.release()