if not grabbed: raise ValueError("Camera read failed!") bg = utils.image_resize(frame, height=600).astype(np.float32) while True: grabbed, frame = camera.read() if not grabbed: print "Camera read failed" break frame = utils.image_resize(frame, height=600) height, width, channels = frame.shape if not calibrated: # Sample hand color utils.add_text(frame, "Press space after covering rectangle with hand. Hit SPACE when ready") x, y, w, h = width / 4, height / 2, 50, 50 cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) cv2.imshow("Calibration", frame) if cv2.waitKey(2) & 0xFF == ord(' '): roi = frame[y:y + h, x:x + w, :] roi_hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) min_value = np.amin(roi_hsv, (0, 1)) max_value = np.amax(roi_hsv, (0, 1)) cv2.destroyWindow("Calibration") calibrated = True else: cv2.accumulateWeighted(frame, bg, 0.01) frame ^= cv2.convertScaleAbs(bg)
if prev_frame is None or len(p0) <= 3: info = "Detecting..." face = utils.detect_face(face_cascade, frame) if face is not None: prev_frame = curr_frame_gray x,y,w,h = face roi = np.zeros(prev_frame.shape, dtype=np.uint8) roi[y:y+h, x:x+w] = 255 p0 = cv2.goodFeaturesToTrack(prev_frame, mask=roi, **feature_params) else: p1,st,err = cv2.calcOpticalFlowPyrLK(prev_frame, curr_frame_gray, p0, None, **lk_params) # Update points being tracked to new good set p0 = p1[st==1].reshape(-1,1,2) info = "Tracking: %d" % len(p0) for pt in p1: a,b = pt.ravel() cv2.circle(frame, (a,b), 3, (0,255,0), -1) prev_frame utils.add_text(frame, info) cv2.imshow("Output", frame) key = cv2.waitKey(1) & 0xFF if key== ord('q'): break elif key == ord('r'): p0 = [] camera.release() cv2.destroyAllWindows()
raise ValueError("Camera read failed!") bg = utils.image_resize(frame, height=600).astype(np.float32) while True: grabbed, frame = camera.read() if not grabbed: print "Camera read failed" break frame = utils.image_resize(frame, height=600) height, width, channels = frame.shape if not calibrated: # Sample hand color utils.add_text( frame, "Press space after covering rectangle with hand. Hit SPACE when ready" ) x, y, w, h = width / 4, height / 2, 50, 50 cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) cv2.imshow("Calibration", frame) if cv2.waitKey(2) & 0xFF == ord(' '): roi = frame[y:y + h, x:x + w, :] roi_hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) min_value = np.amin(roi_hsv, (0, 1)) max_value = np.amax(roi_hsv, (0, 1)) cv2.destroyWindow("Calibration") calibrated = True else: cv2.accumulateWeighted(frame, bg, 0.01)
calibration_rects = {} while True: face_box = None grabbed, frame = camera.read() frame = utils.image_resize(frame , height = 600) face_box = utils.detect_face(face_cascade, frame) if face_box is None: continue cv2.rectangle(frame, (face_box[0], face_box[1]), (face_box[0] + face_box[2], face_box[1] + face_box[3]), (255, 0, 0), 2) if calibrate: utils.add_text(frame, "Press: W - closest, S - farthest, C - neutral, Q - Done") no_points_either_side = z_axis_length/2 cv2.imshow("Calibrating...", frame) key = cv2.waitKey(1) & 0xFF if key == ord('w'): calibration_rects[0] = (face_box[2], face_box[3]) print calibration_rects[0] elif key == ord('c'): calibration_rects[no_points_either_side] = (face_box[2], face_box[3]) print calibration_rects[no_points_either_side] elif key == ord('s'): calibration_rects[z_axis_length - 1] = (face_box[2], face_box[3]) print calibration_rects[z_axis_length - 1]
good = d < 1 new_tracks = [] for tr, (x, y), good_flag in zip(tracks, p1.reshape(-1, 2), good): if not good_flag: continue tr.append((x, y)) if len(tr) > track_length: del tr[0] new_tracks.append(tr) tracks = new_tracks # print color cv2.polylines(output, [np.int32(tr) for tr in tracks], False, [200, 200, 200], 2) utils.add_text(output, ("tracking %d" % len(tracks))) track_interval += 1 if track_interval % 100 == 0: mask = 255 * np.ones(curr_gray_frame.shape) for x, y in [np.int32(tr[-1]) for tr in tracks]: cv2.circle(mask, (x, y), 5, 0, -1) p = cv2.goodFeaturesToTrack(prev_gray_frame, mask=None, **feature_params) tracks = [] if p is not None: for x, y in np.float32(p).reshape(-1, 2): tracks.append([(x, y)]) prev_gray_frame = curr_gray_frame
roi_x:roi_x + roi_w]) # print row, col cv2.circle(frame, (roi_x + col, roi_y + row), 5, (0, 255, 0), -1) roi_x = x + w - roi_w - int(w * 0.13) cv2.rectangle(frame, (roi_x, roi_y), (roi_x + roi_w, roi_y + roi_h), (255, 255, 0), 2) row, col = tracker.find_eye_center(gray[roi_y:roi_y + roi_h, roi_x:roi_x + roi_w]) # print row, col cv2.circle(frame, (roi_x + col, roi_y + row), 5, (0, 0, 255), -1) else: utils.add_text(frame, "Face not found!") # row, col = tracker.find_eye_center(gray[:, 0:frame.shape[1] / 2]) # print row, col # # cv2.circle(frame, (col, row), 10, (255, 255, 0), -1) # # row, col = tracker.find_eye_center(gray[:, frame.shape[1] / 2:]) # print row, col # # cv2.circle(frame, (frame.shape[1] / 2 + col, row), 10, (255, 0, 0), -1) cv2.imshow("Output", frame) key = cv2.waitKey(1) & 0xFF if key == ord('q'): break
d = abs(p0 -p0r).reshape(-1,2).max(-1) good = d < 1 new_tracks = [] for tr, (x, y), good_flag in zip(tracks, p1.reshape(-1,2), good): if not good_flag: continue tr.append((x,y)) if len(tr) > track_length: del tr[0] new_tracks.append(tr) tracks = new_tracks # print color cv2.polylines(output, [np.int32(tr) for tr in tracks], False, [200, 200, 200], 2) utils.add_text(output, ("tracking %d" % len(tracks))) track_interval += 1 if track_interval % 10 == 0: mask = 255 * np.ones(curr_gray_frame.shape) for x,y in [np.int32(tr[-1]) for tr in tracks]: cv2.circle(mask, (x,y), 5, 0, -1) p = cv2.goodFeaturesToTrack(prev_gray_frame, mask=None, **feature_params) tracks = [] if p is not None: for x,y in np.float32(p).reshape(-1,2): tracks.append([(x,y)]) prev_gray_frame = curr_gray_frame cv2.imshow("Output", output)
while True: face_box = None grabbed, frame = camera.read() frame = utils.image_resize(frame, height=600) face_box = utils.detect_face(face_cascade, frame) if face_box is None: continue cv2.rectangle(frame, (face_box[0], face_box[1]), (face_box[0] + face_box[2], face_box[1] + face_box[3]), (255, 0, 0), 2) if calibrate: utils.add_text( frame, "Press: W - closest, S - farthest, C - neutral, Q - Done") no_points_either_side = z_axis_length / 2 cv2.imshow("Calibrating...", frame) key = cv2.waitKey(1) & 0xFF if key == ord('w'): calibration_rects[0] = (face_box[2], face_box[3]) print calibration_rects[0] elif key == ord('c'): calibration_rects[no_points_either_side] = (face_box[2], face_box[3]) print calibration_rects[no_points_either_side] elif key == ord('s'): calibration_rects[z_axis_length - 1] = (face_box[2], face_box[3])
cv2.rectangle(frame, (roi_x, roi_y), (roi_x + roi_w, roi_y + roi_h), (255, 0, 0), 2) row, col = tracker.find_eye_center(gray[roi_y:roi_y + roi_h, roi_x:roi_x + roi_w]) # print row, col cv2.circle(frame, (roi_x + col, roi_y + row), 5, (0, 255, 0), -1) roi_x = x + w - roi_w - int(w * 0.13) cv2.rectangle(frame, (roi_x, roi_y), (roi_x + roi_w, roi_y + roi_h), (255, 255, 0), 2) row, col = tracker.find_eye_center(gray[roi_y:roi_y + roi_h, roi_x:roi_x + roi_w]) # print row, col cv2.circle(frame, (roi_x + col, roi_y + row), 5, (0, 0, 255), -1) else: utils.add_text(frame, "Face not found!") # row, col = tracker.find_eye_center(gray[:, 0:frame.shape[1] / 2]) # print row, col # # cv2.circle(frame, (col, row), 10, (255, 255, 0), -1) # # row, col = tracker.find_eye_center(gray[:, frame.shape[1] / 2:]) # print row, col # # cv2.circle(frame, (frame.shape[1] / 2 + col, row), 10, (255, 0, 0), -1) cv2.imshow("Output", frame) key = cv2.waitKey(1) & 0xFF if key == ord('q'): break