def main(): vs = VideoStream() vs.start() names, known_encodings = load_known_faces('./faces/known_faces') print(len(known_encodings)) while vs.isOpened(): image = vs.read() gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) face_locations = fr.face_locations(image, model='hog') img_face_encodings = fr.face_encodings(image, face_locations) match_matrix = [ fr.compare_faces(known_encodings, f, tolerance=0.6) for f in img_face_encodings ] print(match_matrix) img_with_faces = draw_bbox_on_img(image, face_locations) cv2.imshow('frame', img_with_faces) if cv2.waitKey(1) & 0xFF == ord('q'): break vs.close() cv2.destroyAllWindows()
def main(): logging.basicConfig() logger = logging.getLogger("MMNT") logger.setLevel(logging.INFO) logger.info("Initializing") masterSampleTime = time.time() slaveSampleTime = time.time() logger.debug("Initializing motor control") mc = MotorControl() mc.resetMotors() logger.debug("Initializing microphone") dev = usb.core.find(idVendor=0x2886, idProduct=0x0018) if not dev: sys.exit("Could not find ReSpeaker Mic Array through USB") mic = Tuning(dev) mic.write("NONSTATNOISEONOFF", 1) mic.write("STATNOISEONOFF", 1) logger.debug("Initializing models") ht_model = ht.get_model() tfPose = TfPoseEstimator(get_graph_path(TF_MODEL), target_size=(VideoStream.DEFAULT_WIDTH, VideoStream.DEFAULT_HEIGHT)) logger.debug("Initializing video streams") topCamStream = VideoStream(1) botCamStream = VideoStream(2) topCamStream.start() botCamStream.start() masterCamID = TOP_CAM_ID masterStream = topCamStream slaveCamID = BOT_CAM_ID slaveStream = botCamStream masterTargetAngle = 0 slaveTargetAngle = 180 updateMasterAngle = False updateSlaveAngle = False masterTracking = False logger.info("Initialization complete") while True: try: # MASTER masterFrame = masterStream.read() if time.time() - masterSampleTime > MASTER_SAMPLE_FREQ: humans = tfPose.inference(masterFrame, resize_to_default=True, upsample_size=RESIZE_RATIO) if len(humans): logger.debug("Master tracking") masterTracking = True if DISPLAY_VIDEO: TfPoseEstimator.draw_humans(masterFrame, humans, imgcopy=False) human = humans[0] if (ht.is_hands_above_head(human)): logger.debug("HANDS ABOVE HEAD!!!") midX = -1 for part in headParts: if part in human.body_parts: midX = human.body_parts[part].x * VideoStream.DEFAULT_WIDTH break if midX != -1: centerDiff = abs(midX - VideoStream.DEFAULT_WIDTH/2) if centerDiff > FACE_THRESHOLD: if midX < VideoStream.DEFAULT_WIDTH/2: # rotate CCW masterTargetAngle += centerDiff * degreePerPixel elif midX > VideoStream.DEFAULT_WIDTH/2: # rotate CW masterTargetAngle -= centerDiff * degreePerPixel masterTargetAngle = masterTargetAngle % 360 updateMasterAngle = True masterSampleTime = time.time() else: logger.debug("Master stopped tracking") masterTracking = False # If master is not tracking a human, move towards speech if not masterTracking: speechDetected, micDOA = mic.speech_detected(), mic.direction logger.debug("master speech detected:", speechDetected, "diff:", abs(micDOA - masterTargetAngle)) if speechDetected and abs(micDOA - masterTargetAngle) > ANGLE_THRESHOLD: masterTargetAngle = micDOA logger.debug("Update master angle:", masterTargetAngle) masterSampleTime = time.time() updateMasterAngle = True # SLAVE slaveFrame = slaveStream.read() if time.time() - slaveSampleTime > SLAVE_SAMPLE_FREQ: # If master is not tracking a human and a slave sees a human, move master to the visible human and move slave away if not masterTracking and time.time() - masterSampleTime > MASTER_SAMPLE_FREQ: humans = tfPose.inference(slaveFrame, resize_to_default=True, upsample_size=RESIZE_RATIO) if len(humans): logger.debug("slave found mans") if DISPLAY_VIDEO: TfPoseEstimator.draw_humans(slaveFrame, humans, imgcopy=False) human = humans[0] if (ht.is_hands_above_head(human)): logger.debug("HANDS ABOVE HEAD!!!") midX = -1 for part in headParts: if part in human.body_parts: midX = human.body_parts[part].x * VideoStream.DEFAULT_WIDTH break if midX != -1: centerDiff = abs(midX - VideoStream.DEFAULT_WIDTH/2) # if centerDiff > FACE_THRESHOLD: if midX < VideoStream.DEFAULT_WIDTH/2: # rotate CCW masterTargetAngle = slaveTargetAngle + centerDiff * degreePerPixel elif midX > VideoStream.DEFAULT_WIDTH/2: # rotate CW masterTargetAngle = slaveTargetAngle - centerDiff * degreePerPixel masterTargetAngle = masterTargetAngle % 360 updateMasterAngle = True masterSampleTime = time.time() slaveTargetAngle = (masterTargetAngle + 180) % 360 updateSlaveAngle = True logger.debug("Moving master to slave:", masterTargetAngle) speechDetected, micDOA = mic.speech_detected(), mic.direction speechMasterDiff = abs(micDOA - masterTargetAngle) if speechDetected and speechMasterDiff > SLAVE_MASTER_THRESHOLD and abs(micDOA - slaveTargetAngle) > ANGLE_THRESHOLD: slaveTargetAngle = micDOA logger.debug("Update slave angle:", slaveTargetAngle) slaveSampleTime = time.time() updateSlaveAngle = True # Send Serial Commands if updateSlaveAngle and updateMasterAngle: logger.debug("Slave Angle:", slaveTargetAngle) logger.debug("Master Angle:", masterTargetAngle) updateSlaveAngle = False updateMasterAngle = False if slaveCamID == BOT_CAM_ID: mc.runMotors(masterTargetAngle, slaveTargetAngle) else: mc.runMotors(slaveTargetAngle, masterTargetAngle) elif updateSlaveAngle: mc.runMotor(slaveCamID, slaveTargetAngle) logger.debug("Slave Angle:", slaveTargetAngle) updateSlaveAngle = False elif updateMasterAngle: mc.runMotor(masterCamID, masterTargetAngle) logger.debug("Master Angle:", masterTargetAngle) updateMasterAngle = False if DISPLAY_VIDEO: cv.imshow('Master Camera', masterFrame) cv.imshow('Slave Camera', slaveFrame) if cv.waitKey(1) == 27: pass except KeyboardInterrupt: logger.debug("Keyboard interrupt! Terminating.") mc.stopMotors() slaveStream.stop() masterStream.stop() mic.close() time.sleep(2) break cv.destroyAllWindows()