def __init__(self, source_path=None):
        self.source_path = source_path
        if (source_path is None):
            self.source_path = SOURCE_PATH

        self._detector = FaceDetector(self.source_path)
        self._embed = Embeddings(self.source_path)
 def __init__(self,
              min_dx=20,
              min_da=20,
              init_num=10,
              debug=False,
              socket_host='172.18.22.12',
              socket_port=1919):
     # Init face detection on camera
     self._face_detector = FaceDetector(debug=debug)
     self._min_dx = min_dx
     self._min_da = min_da
     self._init_num = init_num
     self._debug = debug
     self._x = int()
     self._a = int()
     self.init_face()
     # Init socket connection to car
     self._socket_host = socket_host
     self._socket_port = socket_port
     self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
     try:
         self._socket.connect((self._socket_host, self._socket_port))
         print('Socket connection accepted')
     except socket.timeout:
         print('Socket connection timeout')
         exit(1)
    def __init__(self, args):
        used_devices = set([args.d_fd, args.d_lm, args.d_hp, args.d_reid])
        self.context = InferenceContext()
        context = self.context
        context.load_plugins(used_devices, args.cpu_lib, args.gpu_lib)
        for d in used_devices:
            context.get_plugin(d).set_config(
                {"PERF_COUNT": "YES" if args.perf_stats else "NO"})

        log.info("Loading models")
        face_detector_net = self.load_model(args.m_fd)
        landmarks_net = self.load_model(args.m_lm)
        head_pose_net = self.load_model(args.m_hp)
        # face_reid_net = self.load_model(args.m_reid)

        self.face_detector = FaceDetector(face_detector_net,
                                          confidence_threshold=args.t_fd,
                                          roi_scale_factor=args.exp_r_fd)

        self.landmarks_detector = LandmarksDetector(landmarks_net)
        self.head_pose_detector = HeadPoseDetector(head_pose_net)
        self.face_detector.deploy(args.d_fd, context)
        self.landmarks_detector.deploy(args.d_lm,
                                       context,
                                       queue_size=self.QUEUE_SIZE)
        self.head_pose_detector.deploy(args.d_hp,
                                       context,
                                       queue_size=self.QUEUE_SIZE)

        log.info("Models are loaded")
    def __init__(self, args):
        self.gpu_ext = args.gpu_lib
        self.allow_grow = args.allow_grow and not args.no_show

        log.info('OpenVINO Inference Engine')
        log.info('\tbuild: {}'.format(get_version()))
        core = Core()
        if args.cpu_lib and 'CPU' in {args.d_fd, args.d_lm, args.d_reid}:
            core.add_extension(args.cpu_lib, 'CPU')

        self.face_detector = FaceDetector(core,
                                          args.m_fd,
                                          args.fd_input_size,
                                          confidence_threshold=args.t_fd,
                                          roi_scale_factor=args.exp_r_fd)
        self.landmarks_detector = LandmarksDetector(core, args.m_lm)
        self.face_identifier = FaceIdentifier(core,
                                              args.m_reid,
                                              match_threshold=args.t_id,
                                              match_algo=args.match_algo)

        self.face_detector.deploy(args.d_fd, self.get_config(args.d_fd))
        self.landmarks_detector.deploy(args.d_lm, self.get_config(args.d_lm),
                                       self.QUEUE_SIZE)
        self.face_identifier.deploy(args.d_reid, self.get_config(args.d_reid),
                                    self.QUEUE_SIZE)

        log.debug('Building faces database using images from {}'.format(
            args.fg))
        self.faces_database = FacesDatabase(
            args.fg, self.face_identifier, self.landmarks_detector,
            self.face_detector if args.run_detector else None, args.no_show)
        self.face_identifier.set_faces_database(self.faces_database)
        log.info('Database is built, registered {} identities'.format(
            len(self.faces_database)))
 def __init__(self):
     self.face_detector = FaceDetector()
     # https://github.com/davisking/dlib-models
     self.sp = dlib.shape_predictor(
         'data/shape_predictor_5_face_landmarks.dat')
     self.facerec = dlib.face_recognition_model_v1(
         'data/dlib_face_recognition_resnet_model_v1.dat')
def main():
    rospy.init_node("legacy_measurement", anonymous=False, log_level=rospy.DEBUG)

    # Get ROS topic from launch parameter
    input_topic = rospy.get_param("~input_topic", "/webcam/image_raw")
    rospy.loginfo("[LegacyMeasurement] Listening on topic '" + input_topic + "'")

    video_file = rospy.get_param("~video_file", None)
    rospy.loginfo("[LegacyMeasurement] Video file input: '" + str(video_file) + "'")

    bdf_file = rospy.get_param("~bdf_file", "")
    rospy.loginfo("[LegacyMeasurement] Bdf file: '" + str(bdf_file) + "'")

    cascade_file = rospy.get_param("~cascade_file", "")
    rospy.loginfo("[LegacyMeasurement] Cascade file: '" + str(cascade_file) + "'")

    show_image_frame = rospy.get_param("~show_image_frame", False)
    rospy.loginfo("[LegacyMeasurement] Show image frame: '" + str(show_image_frame) + "'")

    # Start heart rate measurement
    is_video = video_file != ""
    pulse_measurement = LegacyMeasurement(is_video)

    face_detector = FaceDetector(input_topic, cascade_file)
    face_detector.bottom_face_callback = pulse_measurement.on_image_frame
    face_detector.run(video_file, bdf_file, show_image_frame)

    rospy.spin()
    rospy.loginfo("[LegacyMeasurement] Shutting down")
Beispiel #7
0
 def __init__(self):
     # age model
     # model structure: https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/static/age.prototxt
     # pre-trained weights: https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/static/dex_chalearn_iccv2015.caffemodel
     self.age_model = cv2.dnn.readNetFromCaffe(
         "data/age.prototxt", "data/dex_chalearn_iccv2015.caffemodel")
     self.fd = FaceDetector()
Beispiel #8
0
def init():
    # Init all variable
    global fs, fd, today_dir, index, img_path_map

    fs = FaceSearcher(**cfg.search_config)
    fd = FaceDetector(**cfg.detect_config)
    # Make folder
    today_dir = init_folder(cfg.data_path, cfg.wards)
    # If data exist ==> add them to graph
    w_paths = [os.path.join(today_dir, w, 'true') for w in cfg.wards]
    features = []
    for w_path in w_paths:
        for image_name in os.listdir(w_path):
            image_path = os.path.join(w_path, image_name)
            # Add path to path map
            img_path_map.append(image_path)
            # convert image to pytorch tensor
            image = pil_loader(image_path)
            image = ToTensor()(image)

            # extract all
            tensor = fd.extract_feature(image)
            # print(tensor.shape)

            features.append(tensor)
            index += 1
    # Add to graph
    print('Getting {} images'.format(index))
    if index > 0:
        features = np.array(features)
        # print(features.shape)
        fs.add_faces(features, np.arange(index))
Beispiel #9
0
def run(args):
    model = build_model()

    (clf, class_names) = read_classifier(
        os.path.join(args.data_path, 'classifier.pickle'))
    # if classifier is none we only have one face
    if clf is None:
        verified_embedding, only_class = read_only_embedding(args.data_path)

    cap = cv2.VideoCapture(0)

    if (cap.isOpened() == False):
        print("Error opening video stream or file")

    face_detector = FaceDetector()

    while (cap.isOpened()):
        # Capture frame-by-frame
        ret, frame = cap.read()
        if ret == True:

            # Detect image and write it
            faces = face_detector.detect_faces(frame)
            for face in faces:
                x, y, w, h = face
                cropped = frame[y:y + h, x:x + w]
                cropped = cv2.resize(cropped, (96, 96))
                cropped = np.around(convert_image(cropped), decimals=12)
                embedding = model.predict(np.array([cropped]))

                if clf is None:
                    dist = np.linalg.norm(verified_embedding - embedding)
                    match = dist < 0.7
                    label = only_class if match else "Unknown"
                    if args.debug:
                        label += ' (d: {})'.format(round(dist, 2))
                else:
                    predictions = clf.predict_proba(embedding)
                    pred_class = np.argmax(predictions, axis=1)[0]
                    score = round(np.max(predictions) * 100, 2)
                    match = score > 70
                    name = class_names[pred_class]
                    label = '{} ({}%)'.format(name, score)

                color = (0, 255, 0) if match else (0, 0, 255)

                draw_bbox(frame, x, y, x + w, y + h, label=label, color=color)

            cv2.imshow('Frame', frame)

            # Press Q on keyboard to  exit
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break

        else:
            break

    cap.release()
    cv2.destroyAllWindows()
Beispiel #10
0
    def __init__(self, args):
        used_devices = set([args.d_fd, args.d_lm, args.d_reid])
        self.context = InferenceContext()
        context = self.context
        context.load_plugins(used_devices, args.cpu_lib, args.gpu_lib)
        for d in used_devices:
            context.get_plugin(d).set_config(
                {"PERF_COUNT": "YES" if args.perf_stats else "NO"})

        log.info("Loading models")
        face_detector_net = self.load_model(args.m_fd)
        landmarks_net = self.load_model(args.m_lm)
        face_reid_net = self.load_model(args.m_reid)

        self.face_detector = FaceDetector(
            face_detector_net,
            confidence_threshold=args.t_fd,
            roi_scale_factor=args.exp_r_fd,
        )
        self.landmarks_detector = LandmarksDetector(landmarks_net)
        self.face_identifier = FaceIdentifier(face_reid_net,
                                              match_threshold=args.t_id)

        self.face_detector.deploy(args.d_fd, context)
        self.landmarks_detector.deploy(args.d_lm,
                                       context,
                                       queue_size=self.QUEUE_SIZE)
        self.face_identifier.deploy(args.d_reid,
                                    context,
                                    queue_size=self.QUEUE_SIZE)
        log.info("Models are loaded")

        if args.fc in "LOAD":
            self.faces_database = pickle.loads(open(args.fpl, "rb").read())
            log.info("Face database loaded from {}.".format(args.fpl))

        else:
            log.info("Building faces database using images from '%s'" %
                     (args.fg))
            self.faces_database = FacesDatabase(
                args.fg,
                self.face_identifier,
                self.landmarks_detector,
                self.face_detector if args.run_detector else None,
                args.no_show,
            )
            if args.fc in "SAVE":
                with open(args.fps, "wb") as f:
                    f.write(pickle.dumps(self.faces_database))
                    f.close()
                    log.info("Face database {} saved".format(args.fps))

        self.face_identifier.set_faces_database(self.faces_database)
        log.info("Database is built, registered %s identities" %
                 (len(self.faces_database)))

        self.allow_grow = args.allow_grow and not args.no_show
Beispiel #11
0
    def __init__(self):
        self.fd = FaceDetector()

        # pb_path = os.path.join(MODELS_DIR, 'gaze_opt_b1.m.pb')
        # pb_path = os.path.join(MODELS_DIR, 'gaze_opt_b2.m.pb')  # 108, 180
        # pb_path = os.path.join(MODELS_DIR, 'gaze_opt_b2_small.pb')
        pb_path = os.path.join(MODELS_DIR, 'gaze_opt_b2_small.m.pb')  # 36, 60

        self.sess = self.get_model_sess(pb_path)
Beispiel #12
0
def capture(named_path, data_path, count):
    cap = cv2.VideoCapture(0)

    # Check if camera opened successfully
    if (cap.isOpened() == False):
        print("Error opening video stream or file")

    captured_counter = 0
    face_detector = FaceDetector()
    model = build_model()

    while (cap.isOpened() and captured_counter < count):
        # Capture frame-by-frame
        ret, frame = cap.read()
        if ret == True:

            # Show progress bar
            draw_progressbar(frame, (captured_counter / count))

            # Detect image and write it
            faces = face_detector.detect_faces(frame)
            if len(faces) > 0:

                # Per person path
                file_path = os.path.join(named_path,
                                         str(captured_counter + 1) + '.jpg')
                print('Writing capture: ' + file_path)

                face = faces[0]  # Assume it's the only face
                x, y, w, h = face
                cropped = frame[y:y + h, x:x + w]
                cropped = cv2.resize(cropped, (96, 96))
                cv2.imwrite(file_path, cropped)
                captured_counter += 1
                draw_bbox(frame, x, y, x + w, y + h, label="Face detected")

            cv2.imshow('Frame', frame)

            # Press Q on keyboard to  exit
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break

        # Break the loop
        else:
            break

    # When everything done, release the video capture object
    cap.release()
    cv2.destroyAllWindows()

    # Build and Write the embedding file for this person
    build_embedding(model, named_path)

    # Rebuild the classifier
    build_classifier(data_path)

    print('Done!')
Beispiel #13
0
def predict():
    image_np = data_uri_to_cv2_img(request.values['image'])    
    # Passing the frame to the predictor
    with graph.as_default():
        faces = FaceDetector('./haarcascade_frontalface_default.xml').detect_faces(image_np)
        emotion = model.predict_from_ndarray(image_np)
        result = {'emotion': emotion, 'faces': json.dumps(faces.tolist())} \
            if len(faces) > 0 else {'emotion': 'no face detected', 'faces': json.dumps([])}
    return jsonify(result)
Beispiel #14
0
def _main(cap_src):

    cap = cv2.VideoCapture(cap_src)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

    face_d = FaceDetector()

    sess = onnxruntime.InferenceSession(
        f'{root_path}/pretrained/fsanet-1x1-iter-688590.onnx')

    sess2 = onnxruntime.InferenceSession(
        f'{root_path}/pretrained/fsanet-var-iter-688590.onnx')

    print('Processing frames, press q to exit application...')
    while True:
        ret, frame = cap.read()
        if (not ret):
            print(
                'Could not capture a valid frame from video source, check your cam/video value...'
            )
            break
        #get face bounding boxes from frame
        face_bb = face_d.get(frame)
        for (x1, y1, x2, y2) in face_bb:
            face_roi = frame[y1:y2 + 1, x1:x2 + 1]

            #preprocess headpose model input
            face_roi = cv2.resize(face_roi, (64, 64))
            face_roi = face_roi.transpose((2, 0, 1))
            face_roi = np.expand_dims(face_roi, axis=0)
            face_roi = (face_roi - 127.5) / 128
            face_roi = face_roi.astype(np.float32)

            #get headpose
            res1 = sess.run(["output"], {"input": face_roi})[0]
            res2 = sess2.run(["output"], {"input": face_roi})[0]

            yaw, pitch, roll = np.mean(np.vstack((res1, res2)), axis=0)

            draw_axis(frame,
                      yaw,
                      pitch,
                      roll,
                      tdx=(x2 - x1) // 2 + x1,
                      tdy=(y2 - y1) // 2 + y1,
                      size=50)

            #draw face bb
            # cv2.rectangle(frame,(x1,y1),(x2,y2),(0,255,0),2)

        cv2.imshow('Frame', frame)

        key = cv2.waitKey(1) & 0xFF
        if (key == ord('q')):
            break
Beispiel #15
0
 def __init__(self, base_dir):
     self.publisher = Publisher()
     self.publisher.declare_queue('hello')
     self.base_dir = base_dir
     self.csv_filename = 'screenshot_list.csv'
     self.face_detector = FaceDetector()
     # self.cap0 = cv2.VideoCapture()
     # self.cap0.open(0)
     self.cap = cv2.VideoCapture(1)  # default is 0
     self.eyegaze_process = None
    def __init__(self, df):
        self.video_paths = df['video_path']
        self.filenames = df.index
        self.face_dr = FaceDetector(frames_per_video=FRAMES_PER_VIDEO)

        mean = [0.485, 0.456, 0.406]
        std = [0.229, 0.224, 0.225]
        self.normalize_transform = Normalize(mean, std)

        self.video_reader = VideoReader()
        self.video_read_fn = lambda x: self.video_reader.read_frames(
            x, num_frames=FRAMES_PER_VIDEO)
def main():
    # original frame size is (720, 960)
    W = 320
    H = 240
    image_cx = W // 2
    image_cy = H // 2

    num_skip_frames = 300

    drone = tellopy.Tello()
    controller = Controller(drone, image_cx, image_cy)
    face_detector = FaceDetector()
    renderer = Renderer()
    display = PygameDisplay(W, H)

    try:
        drone.connect()
        drone.wait_for_connection(60.0)

        drone.subscribe(drone.EVENT_FLIGHT_DATA, flight_data_handler)
        container = av.open(drone.get_video_stream())

        drone.takeoff()

        while True:
            for frame in container.decode(video=0):
                if num_skip_frames > 0:
                    num_skip_frames = num_skip_frames - 1
                    continue
                start_time = time.time()
                image = np.array(frame.to_image())
                image = cv2.resize(image, (W, H))

                face = face_detector.detect(image)
                controller.control(face)
                renderer.render(image, drone_state, face)
                display.paint(image)

                time_base = max(1.0 / 60, frame.time_base)
                processing_time = time.time() - start_time
                num_skip_frames = int(processing_time / time_base)
                #print('Video steam %d FPS, frame time base=%f' % (1/frame.time_base, frame.time_base))
                #print('Processing FPS=%d, time=%f ms, skip frames=%d' % (1/processing_time, 1000 * processing_time, num_skip_frames))

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)

    finally:
        drone.land()
        drone.quit()
        display.dispose()
Beispiel #18
0
def main(model):
    webcam = cv2.VideoCapture(0)
    face_detector = FaceDetector(model)

    while True:
        _, frame = webcam.read()
        face_detector.refresh(frame)
        frame = face_detector.annotate_frame()
        cv2.imshow("FaceDetector", frame)
        key = cv2.waitKey(
            1
        ) & 0xFF  ## I have no f*****g idea why but frame is refusing to show unless this line is present
Beispiel #19
0
    def __init__(self, camera):
        super().__init__()
        self.head_deg, self.body_deg = 80, 90
        self.__oc = camera

        self.__robot = RobotControl()
        self.__robot.connect()
        self.__robot.activate_command_control()
        self.__robot.move()
        time.sleep(1)

        self.__fd = FaceDetector()
def get_img_and_lms5():
    """
    测试获取图像和人脸关键点
    """
    img_path = os.path.join(IMGS_DIR, 'eyes_up.jpg')
    img_bgr = cv2.imread(img_path)

    fd = FaceDetector()
    main_box, face_landmarks = fd.get_main_faces_dwo(img_bgr)

    corner_list = [[2, 3], [1, 0]]  # 关键点: 左眼、右眼

    return img_bgr, face_landmarks, corner_list
def main():
    W = 432
    H = 240
    image_cx = W // 2
    image_cy = H // 2

    face_detector = FaceDetector()
    renderer = Renderer()
    #display = Cv2Display2D()
    display = PygameDisplay(W, H)

    try:
        container = av.open('video/ball_tracking_example.mp4')
        num_skip_frames = 0
        while True:
            for frame in container.decode(video=0):
                if num_skip_frames > 0:
                    num_skip_frames = num_skip_frames - 1
                    continue
                start_time = time.monotonic()

                image = np.array(frame.to_image())
                image = cv2.resize(image, (W, H))

                face = face_detector.detect(image)
                renderer.render(image, drone_state, face)

                if face is not None:
                    offset_x = face.cx - image_cx
                    offset_y = face.cy - image_cy
                    print(offset_x, offset_y)

                display.paint(image)

                time_base = max(1 / 60, frame.time_base)
                processing_time = time.monotonic() - start_time
                num_skip_frames = int(processing_time / time_base)
                print('Video steam %d FPS, frame time base=%f' %
                      (1 / frame.time_base, frame.time_base))
                print('Processing FPS=%d, time=%f ms, skip frames=%d' %
                      (1 / processing_time, 1000 * processing_time,
                       num_skip_frames))

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)

    finally:
        display.dispose()
    def __init__(self):
        if platform.system().lower() == 'darwin':
            self.camera = Camera()
            self.camera.init(cameraNumber=0, win=(640, 480))
        elif platform.system().lower() == 'linux':
            self.camera = Camera(cam='pi')
            self.camera.init(win=(640, 480))
        else:
            print('Sorry, platform not supported')
            exit()

        self.balltracker = BallTracker()

        self.face = FaceDetector()
Beispiel #23
0
    def __init__(self, varsd):
        used_devices = set([varsd["d_fd"], varsd["d_lm"], varsd["d_reid"]])
        self.context = InferenceContext(used_devices, varsd["cpu_lib"],
                                        varsd["gpu_lib"], varsd["perf_stats"])
        context = self.context

        log.info("Loading models")
        face_detector_net = self.load_model(varsd["m_fd"])

        assert (varsd["fd_input_height"] and varsd["fd_input_width"]) or \
               (varsd["fd_input_height"]==0 and varsd["fd_input_width"]==0), \
            "Both -fd_iw and -fd_ih parameters should be specified for reshape"

        if varsd["fd_input_height"] and varsd["fd_input_width"]:
            face_detector_net.reshape({
                "data":
                [1, 3, varsd["fd_input_height"], varsd["fd_input_width"]]
            })
        landmarks_net = self.load_model(varsd["m_lm"])
        face_reid_net = self.load_model(varsd["m_reid"])

        self.face_detector = FaceDetector(face_detector_net,
                                          confidence_threshold=varsd["t_fd"],
                                          roi_scale_factor=varsd["exp_r_fd"])

        self.landmarks_detector = LandmarksDetector(landmarks_net)
        self.face_identifier = FaceIdentifier(face_reid_net,
                                              match_threshold=varsd["t_id"],
                                              match_algo=varsd["match_algo"])

        self.face_detector.deploy(varsd["d_fd"], context)
        self.landmarks_detector.deploy(varsd["d_lm"],
                                       context,
                                       queue_size=self.QUEUE_SIZE)
        self.face_identifier.deploy(varsd["d_reid"],
                                    context,
                                    queue_size=self.QUEUE_SIZE)
        log.info("Models are loaded")

        log.info("Building faces database using images from '%s'" %
                 (varsd["fg"]))
        self.faces_database = FacesDatabase(
            varsd["fg"], self.face_identifier, self.landmarks_detector,
            self.face_detector if varsd["run_detector"] else None,
            varsd["no_show"])
        self.face_identifier.set_faces_database(self.faces_database)
        log.info("Database is built, registered %s identities" % \
            (len(self.faces_database)))

        self.allow_grow = varsd["allow_grow"] and not varsd["no_show"]
    def __init__(self, args):
        used_devices = set([args.d_fd, args.d_lm, args.d_reid])
        self.context = InferenceContext()
        context = self.context
        context.load_plugins(used_devices, args.cpu_lib, args.gpu_lib)
        for d in used_devices:
            context.get_plugin(d).set_config(
                {"PERF_COUNT": "YES" if args.perf_stats else "NO"})

        log.info("Loading models")
        face_detector_net = self.load_model(args.m_fd)

        assert (args.fd_input_height and args.fd_input_width) or \
               (args.fd_input_height==0 and args.fd_input_width==0), \
            "Both -fd_iw and -fd_ih parameters should be specified for reshape"

        if args.fd_input_height and args.fd_input_width:
            face_detector_net.reshape(
                {"data": [1, 3, args.fd_input_height, args.fd_input_width]})
        landmarks_net = self.load_model(args.m_lm)
        face_reid_net = self.load_model(args.m_reid)

        self.face_detector = FaceDetector(face_detector_net,
                                          confidence_threshold=args.t_fd,
                                          roi_scale_factor=args.exp_r_fd)

        self.landmarks_detector = LandmarksDetector(landmarks_net)
        self.face_identifier = FaceIdentifier(face_reid_net,
                                              match_threshold=args.t_id,
                                              match_algo=args.match_algo)

        self.face_detector.deploy(args.d_fd, context)
        self.landmarks_detector.deploy(args.d_lm,
                                       context,
                                       queue_size=self.QUEUE_SIZE)
        self.face_identifier.deploy(args.d_reid,
                                    context,
                                    queue_size=self.QUEUE_SIZE)
        log.info("Models are loaded")

        log.info("Building faces database using images from '%s'" % (args.fg))
        self.faces_database = FacesDatabase(
            args.fg, self.face_identifier, self.landmarks_detector,
            self.face_detector if args.run_detector else None, args.no_show)
        self.face_identifier.set_faces_database(self.faces_database)
        log.info("Database is built, registered %s identities" % \
            (len(self.faces_database)))

        self.allow_grow = args.allow_grow and not args.no_show
Beispiel #25
0
    def __init__(self, parent):
        super().__init__(parent)
        self._is_join_requested = False
        self._is_joined = False

        self._frame_source = None
        self._mask = None
        self._mask_next = None

        self._face_detector = FaceDetector()
        self._shape_predictor = ShapePredictor(
            model_path=SHAPE_PREDICTOR_MODEL_PATH)

        self.mask = FaceMaskPassthrough()
        self.frame_source = CameraFrameSource(0)
    def setup_detector(self):
        # Instantiate the object
        self.detector = FaceDetector( self.process_each_n, self.scale_factor )

        # Get media train folder
        py_path = os.path.abspath(__file__)
        py_dir = os.path.abspath(os.path.join(py_path, os.pardir, os.pardir, 'media/train'))

        # Add people to detect
        self.detector.add_to_database("BRUNO LIMA", os.path.join(py_dir, "bruno_lima/bruno_05.png"), (255, 0, 0))
        self.detector.add_to_database("JOAO PAULO", os.path.join(py_dir, "joao_paulo/joao_01.jpg"), (0, 255, 0))
        self.detector.add_to_database("TIAGO VIEIRA", os.path.join(py_dir, "tiago_vieira/tiago_02.jpg"), (205, 207, 109))

        # Init service to tell who_I_see
        service = rospy.Service('~get_seen_faces_names', Trigger, self.who_I_see)
Beispiel #27
0
class AgePredictor:
    def __init__(self):
        # age model
        # model structure: https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/static/age.prototxt
        # pre-trained weights: https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/static/dex_chalearn_iccv2015.caffemodel
        self.age_model = cv2.dnn.readNetFromCaffe(
            "data/age.prototxt", "data/dex_chalearn_iccv2015.caffemodel")
        self.fd = FaceDetector()

    # given an image
    # extract roi using face detector and predict the age using the age model
    # 3 return values:
    ## apparent_age is the model predicted age of the face
    ## roi is the region of the image that contains the face
    ## angle is the rotation angle (in CCW) for the roi
    def predict_age(self, img):
        # extract roi and resize it to the desired dimensions for the age model
        roi, angle = self.fd.detect_face(img)
        if roi is None:
            return -1, None, 0
        roi_resized = cv2.resize(roi, (224, 224))
        img_blob = cv2.dnn.blobFromImage(roi_resized)
        # run it through the model and return predicted age
        self.age_model.setInput(img_blob)
        age_dist = self.age_model.forward()[0]
        output_indexes = np.array([i for i in range(0, 101)])
        apparent_age = round(np.sum(age_dist * output_indexes), 2)
        return apparent_age, roi, angle
Beispiel #28
0
class Master:
    def __init__(self, camera):
        super().__init__()
        self.head_deg, self.body_deg = 80, 90
        self.__oc = camera

        self.__robot = RobotControl()
        self.__robot.connect()
        self.__robot.activate_command_control()
        self.__robot.move()
        time.sleep(1)

        self.__fd = FaceDetector()

    def get_face(self):
        while True:
            beg = time.time()
            img = self.__oc.getFrame(CAM_ROTATION_DEG)
            tl, br, name = self.__fd.get_face(img)
            cur = time.time()
            yield tl, br, name, 1 / (cur - beg)

    def map_val(self, x, in_min=0, in_max=1000, out_min=0, out_max=100):
        return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min

    def send_to_robot(self, tl, br, name):
        if name != "Unknown":
            area = abs((tl[0] - br[0]) * (br[1] - tl[1]))
            area = min(100000, area)
            self.body_deg = abs(int(self.map_val(area, 1000, 100000, 80, 130)))
            self.head_deg = int(160 - self.map_val(tl[1], 0, 480, 60, 100))
            self.__robot.set_robot_deg(self.head_deg, self.body_deg)

        self.__robot.move()
        time.sleep(0.05)
Beispiel #29
0
 def __init__(self,
              config_path=os.path.join(
                  path.rsplit(os.path.sep, 1)[0], 'config.yml')):
     config = get_config(config_path)
     self.detector = FaceDetector(config)
     self.extractor = FeatureExtractor()
     self.embeddings = np.load(
         os.path.join(
             path.rsplit(os.path.sep, 1)[0], config['embeddings_path']))
     self.df = pd.read_csv(
         os.path.join(
             path.rsplit(os.path.sep, 1)[0], config['classes_path']))
     self.index = faiss.IndexFlatL2(128)
     self.index.add(self.embeddings)
     self.knn = config['k_neighbors']
     self.max_distance = config['max_distance']
Beispiel #30
0
def profile_face_clf(img_lst, cascade_file, scale_factor, min_neighbors,
                     min_size):
    """Classifies face based on profile image

    :param img_lst: list of image objects with profile image
    :param cascade_file, scale_factor, min_neighbors, min_size: tuning parameters
    :return pred_lst: list of predictions (face or not)
    """
    pred_lst = []

    i = 0

    for img_obj in img_lst:

        image = img_obj['face_img']

        face_detector = FaceDetector(cascade_file, scale_factor, min_neighbors,
                                     min_size)
        face_detector_processor = FaceDetectorProcessor()
        face_detector_processor.detector = face_detector
        cropped_face, face_in_square = face_detector_processor.process_image(
            image)

        i += 1
        print "Inserting prediction result ", i

        pred_lst.append(face_in_square)

    return pred_lst
 def __init__(self):
   gopigo.set_speed(50)
   gopigo.stop
   self._face_detector = FaceDetector('/home/pi/opencv/data/haarcascades/haarcascade_frontalface_default.xml')
   self.sizes_calculated = False
   self.image_height = None
   self.image_width = None
   self.segment_detector = None
class ImageProcessor:
  def __init__(self):
    gopigo.set_speed(50)
    gopigo.stop
    self._face_detector = FaceDetector('/home/pi/opencv/data/haarcascades/haarcascade_frontalface_default.xml')
    self.sizes_calculated = False
    self.image_height = None
    self.image_width = None
    self.segment_detector = None

  def faces(self, image):
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    return self._face_detector.detect(gray, scaleFactor = 1.1, minNeighbors = 5, minSize = (30, 30))

  def calculate_sizes(self, image):
    (self.height, self.width) = image.shape[:2]
    self.segment_detector = SegmentDetector(self.width)
    self.sizes_calculated = True

  def process(self, stream):
    start_time = time.time()
    image = cv2.imdecode(np.fromstring(stream.getvalue(), dtype=np.uint8), 1)

    if(self.sizes_calculated == False):
      self.calculate_sizes(image)

    faceRects = self.faces(image)
    for (x, y, w, h) in faceRects:
      cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

    if (len(faceRects) > 0):
      (x, y, w, h) = faceRects[0]
      self.move(x + (w/2))
    else:
      print("no faces found")
      gopigo.stop()

    print(time.time() - start_time)
    #pdb.set_trace()

    cv2.line(image, (self.segment_detector.left_cutoff, 0), (self.segment_detector.left_cutoff, self.height), (255, 0, 0), 1)
    cv2.line(image, (self.segment_detector.right_cutoff, 0), (self.segment_detector.right_cutoff, self.height), (255, 0, 0), 1)

    return cv2.imencode('.jpg', image)[1].tostring()

  def move(self, horiz_x):
    segment = self.segment_detector.segment(horiz_x)
    print(segment)
    if(segment == 'left'):
      gopigo.set_speed(10)
      gopigo.right_rot()
    elif(segment == 'right'):
      gopigo.set_speed(10)
      gopigo.left_rot()
    elif(segment == 'centre'):
      gopigo.set_speed(50)
      gopigo.fwd()
Beispiel #33
0
class FaceDetectorProcessor(ImageProcessor):
	"""FaceDetector processing class extending the Image processing
	base class, attempts to determine if there is a face in the image

	Parameters:
	-----------
	cascade_file: path to the xml cascade classifier parameters

	"""

	def __init__(self, cascade_file='haarcascade_frontalface_default.xml'):
		self.detector = FaceDetector(cascade_file)
		self.preprocessor = GrayscaleProcessor()


	def process_image(self, image):
		"""Process the image by determining if there is a face

		:param image: image as numpy array
		:return: cropped face image, and boolean indicating whether 
		there is a face
		"""
		gray = self.preprocessor.process_image(image)

		face = self.detector.detect_face(gray)

		if len(face) == 0:
			return image, False

		x, y, w, h = face

		cropped_face = image[y:y+h, x:x+w] 

		return cropped_face, True


	def save_image(self, image, user_id, photo_id):
		"""Saves the image to a temporary directory in the current working 
		folder with a concatentation of user_id and photo_id as the filename

		:param image: image as a numpy array
		:param user_id: user_id of the face image
		:param photo_id: photo_id of the original instagram image has .jpg postfix
		:return: file path name
		"""
		path = os.path.dirname(__file__)
		path = os.path.join(path, 'tmp')
		if not os.path.exists(path):
			os.mkdir(path)
		fname = os.path.join(path, str(user_id) + str(photo_id))
		cv2.imwrite(fname, image)
		return fname
Beispiel #34
0
	def __init__(self):
		if platform.system().lower() == 'darwin':
			self.camera = Camera()
			self.camera.init(cameraNumber=0, win=(640, 480))
		elif platform.system().lower() == 'linux':
			self.camera = Camera(cam='pi')
			self.camera.init(win=(640, 480))
		else:
			print('Sorry, platform not supported')
			exit()

		self.balltracker = BallTracker()

		self.face = FaceDetector()
Beispiel #35
0
from face_detector import FaceDetector
import argparse
import cv2

ap = argparse.ArgumentParser()
ap.add_argument("-f", "--face", required = True, help = "Face cascade pathname")
ap.add_argument("-i", "--image", required = True, help = "Image pathname")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

fd = FaceDetector(args["face"])
faceRects = fd.detect(gray, scaleFactor = 1.2)
print "I found %d face(s)" % len(faceRects)

for (x, y, w, h) in faceRects:
	cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

cv2.imshow("Faces", image)
cv2.waitKey(0)
Beispiel #36
0
        person_name = raw_input("Please enter the person's name: ")

    frame_class = TrainingMode(person_name, None, new_model_name)

if args["update_model"]:
    model_path = args["update_model"]
    # TODO: Check if file exists
    frame_class = TrainingMode(model_path, None)

if args["model"]:
    model_path = args["model"]  # Model file
    # TODO: Check if file exists
    frame_class = ModelMode(model_path)

# Face detection is needed in all modes
face_detector = FaceDetector("haarcascade_frontalface_default.xml")
cap = cv2.VideoCapture(0)

while True:  # Main loop
    ret, frame = cap.read()
    frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)

    ROIs = []  # Array for segmeted faces
    ROIs_coordinates = []  # Array for x,y coordinates of segmented faces to draw bounding boxes

    faces = face_detector.detect(frame)
    for (x, y, w, h) in faces:
        ROIs.append(frame[y : y + h, x : x + w])
        ROIs_coordinates.append((x, y))
        # cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255))
Beispiel #37
0
	def __init__(self, cascade_file='haarcascade_frontalface_default.xml'):
		self.detector = FaceDetector(cascade_file)
		self.preprocessor = GrayscaleProcessor()
from face_detector import FaceDetector
import image_utils
import argparse
import cv2

ap = argparse.ArgumentParser()
ap.add_argument("-f", "--face", required=True,
                help="path to where the face cascade resides")
ap.add_argument("-v" "--video",
                help="path to the (optional) video file")
args = vars(ap.parse_args())

# Create a FaceDetector using a serialized classifier
# that's trained specifically for frontal face detection
fd = FaceDetector(args["face"])

# If the user didn't specify a video file,
# then we'll assume that we should try
# to capture video directly from the
# user's computer webcam
if not args.get("video", False):
    camera = cv2.VideoCapture(0)
# If the user did specify a video file,
# then we'll use that as our video source
else:
    camera = cv2.VideoCapture(args["video"])

# Loop over all the frames in the video and
# detect the faces in each individual frame
# until we either run out of frames (as would
# happen with a video file), or the user opts
Beispiel #39
0
class CameraServer(object):
	"""
	Streams camera images as fast as possible
	"""
	camera = None

	def __init__(self):
		if platform.system().lower() == 'darwin':
			self.camera = Camera()
			self.camera.init(cameraNumber=0, win=(640, 480))
		elif platform.system().lower() == 'linux':
			self.camera = Camera(cam='pi')
			self.camera.init(win=(640, 480))
		else:
			print('Sorry, platform not supported')
			exit()

		self.balltracker = BallTracker()

		self.face = FaceDetector()

	def __del__(self):
		if self.camera:
			self.camera.close()

	def start(self):
		self.run()

	def join(self):
		pass

	def run(self):
		print('Publishing ball {}:{}'.format('0.0.0.0', '9000'))
		pub_ball = zmq.Pub(('0.0.0.0', 9000))
		print('Publishing image_color {}:{}'.format('0.0.0.0', '9010'))
		pub_image = zmq.Pub(('0.0.0.0', 9010))

		try:
			while True:
				ret, frame = self.camera.read()
				msg = Msg.Image()
				msg.img = frame
				pub_image.pub('image_color', msg)

				width, height = frame.shape[:2]
				center, radius = self.balltracker.find(frame)
				if center and radius > 10:
					x, y = center
					xx = x-width/2
					yy = y-height/2

					msg = Msg.Vector()
					msg.set(xx, yy, 0)
					pub_ball.pub('ball', msg)

				faces = self.face.find(frame)
				if len(faces) > 0:
					print('found a face!', faces, type(faces))
					# msg = Msg.Array()
					# msg.array = [[1,2,3,4], [4,5,6,7]]
					# msg.array = list(faces)
					# msg.array = faces
					# print('msg >>', msg, type(msg.array))
					# pub_ball.pub('faces', msg)
					# print(msg)

				# sleep(0.01)

		except KeyboardInterrupt:
			print('Ctl-C ... exiting')
			return