rval, frame = vc.read() else: rval = False while rval: while frame.shape[0] < arrows.shape[0] or frame.shape[ 1] < arrows.shape[1]: scale_percent = 30 # percent of original size width = int(arrows.shape[1] * scale_percent / 100) height = int(arrows.shape[0] * scale_percent / 100) dim = (width, height) arrows = cv2.resize(arrows, dim, interpolation=cv2.INTER_AREA) sofa = cv2.resize(sofa, dim, interpolation=cv2.INTER_AREA) width, height, inference_time, results = yolo.inference(frame) for detection in results: id, name, confidence, x, y, w, h = detection cx = x + (w / 2) cy = y + (h / 2) # draw a bounding box rectangle and label on the image color = (0, 255, 255) cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2) x_offset = x y_offset = y if x_offset < 0: x_offset = 0 elif x_offset + arrows.shape[1] > frame.shape[1]: x_offset = frame.shape[1] - arrows.shape[1] if y_offset < 0:
def press(): new_model = load_model('Letter_Model_V3_999_1') alph = 'ABCDEFGHIJKLMNOPQRSTUVWXY' alph_dict = {} for i, n in enumerate(alph): alph_dict.update({i: n}) camera = cv2.VideoCapture(0) #cv2.namedWindow("test") img_counter = 0 List_alph = [i for i in alph] letter = random.choice(List_alph) while True: ret, frame = camera.read() if not ret: print("failed to grab frame") break color = (0, 255, 255) cv2.putText(frame, 'Please try to sign the letter: ' + letter, (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) cv2.imshow("Gesture Detector", frame) k = cv2.waitKey(1) if k % 256 == 27: # ESC pressed print("Escape hit, closing...") break elif k % 256 == 32: # SPACE pressed img_name = "opencv_frame_{}.png".format(img_counter) print("{} written!".format(img_name)) x = frame img_counter += 1 yolo = YOLO( "/Users/Denny/Desktop/Hack_The_Northeast/yolo-hand-detection/models/cross-hands.cfg", "/Users/Denny/Desktop/Hack_The_Northeast/yolo-hand-detection/models/cross-hands.weights", ["hand"]) width, height, inference_time, results = yolo.inference(x) frame = x for detection in results: id, name, confidence, x, y, w, h = detection cx = x + (w / 2) cy = y + (h / 2) crop_img = frame[y - 50:y + h + 50, x - 50:x + w + 50] #cv2.waitKey(0) im = Image.fromarray(crop_img) im.save("your_file.png") im = cv2.imread('your_file.png', 0) new_img = cv2.resize(im, (28, 28)) cv2.imshow("preview", new_img) cv2.waitKey(0) the_class = new_model.predict_classes( new_img.reshape(1, 28, 28, 1)) Answer = alph_dict[the_class[0]] # draw a bounding box rectangle and label on the image color = (0, 255, 255) #text = "%s (%s)" % (name, round(confidence, 2)) if Answer == letter: cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) text = 'Correct Answer for: ' + Answer cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) else: cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2) text = 'Try again!' cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) cv2.imshow("preview", frame) # if Answer==tkvar: # cv2.putText(frame, "Worked well", (x, y), cv2.FONT_HERSHEY_SIMPLEX, # 0.5, color, 2) # else: # cv2.putText(frame, "Try again", (x, y), cv2.FONT_HERSHEY_SIMPLEX, # 0.5, color, 2) camera.release() cv2.destroyAllWindows()
def scrollV(cap, network, device, size, confidence): if network == "normal": print("loading yolo...") yolo = YOLO("models/cross-hands.cfg", "models/cross-hands.weights", ["hand"]) elif network == "prn": print("loading yolo-tiny-prn...") yolo = YOLO("models/cross-hands-tiny-prn.cfg", "models/cross-hands-tiny-prn.weights", ["hand"]) else: print("loading yolo-tiny...") yolo = YOLO("models/cross-hands-tiny.cfg", "models/cross-hands-tiny.weights", ["hand"]) yolo.size = size yolo.confidence = confidence cnt = 0 curr = 0 prev = 0 exit = 0 rval, frame = cap.read() while True: width, height, inference_time, results = yolo.inference(frame) if len(results) == 1: exit = 0 cnt += 1 id, name, confidence, x, y, w, h = results[0] cx = x + (w // 2) cy = y + (h // 2) if cnt <= 5: curr = cy color = (0, 255, 255) cv2.circle(frame, (cx, cy), 10, color, -1) #print("Cy: ", cy) if cnt % 10 == 0 and cnt > 5: prev = curr curr = cy #print("Prev: ",prev) #print("Curr: ", curr) clicks = prev - curr #print(clicks) #if clicks>30 and clicks<170: clicks = clicks // 2 if abs(clicks) > 10: pyautogui.scroll(clicks) else: exit += 1 if exit > 50: print(exit) break cv2.imshow("preview", frame) rval, frame = cap.read() key = cv2.waitKey(1) if key == 27: # exit on ESC break cv2.destroyWindow("preview")
def reading_video(filename): ap = argparse.ArgumentParser() ap.add_argument('-n', '--network', default="normal", help='Network Type: normal / tiny / prn / v4-tiny') ap.add_argument('-d', '--device', default=0, help='Device to use') ap.add_argument('-v', '--videos', default="videos", help='Path to videos or video file') ap.add_argument('-s', '--size', default=416, help='Size for yolo') ap.add_argument('-c', '--confidence', default=0.2, help='Confidence for yolo') ap.add_argument("-f", "--fff", help="a dummy argument to fool ipython", default="1") args = ap.parse_args() if args.network == "normal": print("loading yolo...") yolo = YOLO("models/cross-hands.cfg", "models/cross-hands.weights", ["hand"]) elif args.network == "prn": print("loading yolo-tiny-prn...") yolo = YOLO("models/cross-hands-tiny-prn.cfg", "models/cross-hands-tiny-prn.weights", ["hand"]) elif args.network == "v4-tiny": print("loading yolov4-tiny-prn...") yolo = YOLO("models/cross-hands-yolov4-tiny.cfg", "models/cross-hands-yolov4-tiny.weights", ["hand"]) else: print("loading yolo-tiny...") yolo = YOLO("models/cross-hands-tiny.cfg", "models/cross-hands-tiny.weights", ["hand"]) yolo.size = int(args.size) yolo.confidence = float(args.confidence) # opening a window called preview cv2.namedWindow("preview") # to open and capture frames from video vc = cv2.VideoCapture(filename) if vc.isOpened(): # try to get the first frame rval, frame = vc.read() # to get the first frame else: # some error causes the video to not open rval = False while (vc.isOpened()): # Applying YOLO on the frames width, height, inference_time, results = yolo.inference(frame) for detection in results: id, name, confidence, x, y, w, h = detection cx = x + (w / 2) cy = y + (h / 2) # draw a bounding box rectangle and label on the image color = (0, 255, 255) cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2) text = "%s (%s)" % (name, round(confidence, 2)) # put a text on the detected hand with the confidence ratio cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) cv2.imshow("preview", frame) rval, frame = vc.read() # to close the window we need to click on the ESC button key = cv2.waitKey(20) if key == 27: # exit on ESC break cv2.destroyWindow("preview") vc.release()