示例#1
0
文件: model_tuner.py 项目: wlau88/IDA
def profile_face_clf(img_lst, cascade_file, scale_factor, min_neighbors,
                     min_size):
    """Classifies face based on profile image

    :param img_lst: list of image objects with profile image
    :param cascade_file, scale_factor, min_neighbors, min_size: tuning parameters
    :return pred_lst: list of predictions (face or not)
    """
    pred_lst = []

    i = 0

    for img_obj in img_lst:

        image = img_obj['face_img']

        face_detector = FaceDetector(cascade_file, scale_factor, min_neighbors,
                                     min_size)
        face_detector_processor = FaceDetectorProcessor()
        face_detector_processor.detector = face_detector
        cropped_face, face_in_square = face_detector_processor.process_image(
            image)

        i += 1
        print "Inserting prediction result ", i

        pred_lst.append(face_in_square)

    return pred_lst
    def __init__(self, source_path=None):
        self.source_path = source_path
        if (source_path is None):
            self.source_path = SOURCE_PATH

        self._detector = FaceDetector(self.source_path)
        self._embed = Embeddings(self.source_path)
 def __init__(self,
              min_dx=20,
              min_da=20,
              init_num=10,
              debug=False,
              socket_host='172.18.22.12',
              socket_port=1919):
     # Init face detection on camera
     self._face_detector = FaceDetector(debug=debug)
     self._min_dx = min_dx
     self._min_da = min_da
     self._init_num = init_num
     self._debug = debug
     self._x = int()
     self._a = int()
     self.init_face()
     # Init socket connection to car
     self._socket_host = socket_host
     self._socket_port = socket_port
     self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
     try:
         self._socket.connect((self._socket_host, self._socket_port))
         print('Socket connection accepted')
     except socket.timeout:
         print('Socket connection timeout')
         exit(1)
示例#4
0
def init():
    # Init all variable
    global fs, fd, today_dir, index, img_path_map

    fs = FaceSearcher(**cfg.search_config)
    fd = FaceDetector(**cfg.detect_config)
    # Make folder
    today_dir = init_folder(cfg.data_path, cfg.wards)
    # If data exist ==> add them to graph
    w_paths = [os.path.join(today_dir, w, 'true') for w in cfg.wards]
    features = []
    for w_path in w_paths:
        for image_name in os.listdir(w_path):
            image_path = os.path.join(w_path, image_name)
            # Add path to path map
            img_path_map.append(image_path)
            # convert image to pytorch tensor
            image = pil_loader(image_path)
            image = ToTensor()(image)

            # extract all
            tensor = fd.extract_feature(image)
            # print(tensor.shape)

            features.append(tensor)
            index += 1
    # Add to graph
    print('Getting {} images'.format(index))
    if index > 0:
        features = np.array(features)
        # print(features.shape)
        fs.add_faces(features, np.arange(index))
def main():
    rospy.init_node("legacy_measurement", anonymous=False, log_level=rospy.DEBUG)

    # Get ROS topic from launch parameter
    input_topic = rospy.get_param("~input_topic", "/webcam/image_raw")
    rospy.loginfo("[LegacyMeasurement] Listening on topic '" + input_topic + "'")

    video_file = rospy.get_param("~video_file", None)
    rospy.loginfo("[LegacyMeasurement] Video file input: '" + str(video_file) + "'")

    bdf_file = rospy.get_param("~bdf_file", "")
    rospy.loginfo("[LegacyMeasurement] Bdf file: '" + str(bdf_file) + "'")

    cascade_file = rospy.get_param("~cascade_file", "")
    rospy.loginfo("[LegacyMeasurement] Cascade file: '" + str(cascade_file) + "'")

    show_image_frame = rospy.get_param("~show_image_frame", False)
    rospy.loginfo("[LegacyMeasurement] Show image frame: '" + str(show_image_frame) + "'")

    # Start heart rate measurement
    is_video = video_file != ""
    pulse_measurement = LegacyMeasurement(is_video)

    face_detector = FaceDetector(input_topic, cascade_file)
    face_detector.bottom_face_callback = pulse_measurement.on_image_frame
    face_detector.run(video_file, bdf_file, show_image_frame)

    rospy.spin()
    rospy.loginfo("[LegacyMeasurement] Shutting down")
示例#6
0
 def __init__(self):
     # age model
     # model structure: https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/static/age.prototxt
     # pre-trained weights: https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/static/dex_chalearn_iccv2015.caffemodel
     self.age_model = cv2.dnn.readNetFromCaffe(
         "data/age.prototxt", "data/dex_chalearn_iccv2015.caffemodel")
     self.fd = FaceDetector()
    def __init__(self, args):
        self.gpu_ext = args.gpu_lib
        self.allow_grow = args.allow_grow and not args.no_show

        log.info('OpenVINO Inference Engine')
        log.info('\tbuild: {}'.format(get_version()))
        core = Core()
        if args.cpu_lib and 'CPU' in {args.d_fd, args.d_lm, args.d_reid}:
            core.add_extension(args.cpu_lib, 'CPU')

        self.face_detector = FaceDetector(core,
                                          args.m_fd,
                                          args.fd_input_size,
                                          confidence_threshold=args.t_fd,
                                          roi_scale_factor=args.exp_r_fd)
        self.landmarks_detector = LandmarksDetector(core, args.m_lm)
        self.face_identifier = FaceIdentifier(core,
                                              args.m_reid,
                                              match_threshold=args.t_id,
                                              match_algo=args.match_algo)

        self.face_detector.deploy(args.d_fd, self.get_config(args.d_fd))
        self.landmarks_detector.deploy(args.d_lm, self.get_config(args.d_lm),
                                       self.QUEUE_SIZE)
        self.face_identifier.deploy(args.d_reid, self.get_config(args.d_reid),
                                    self.QUEUE_SIZE)

        log.debug('Building faces database using images from {}'.format(
            args.fg))
        self.faces_database = FacesDatabase(
            args.fg, self.face_identifier, self.landmarks_detector,
            self.face_detector if args.run_detector else None, args.no_show)
        self.face_identifier.set_faces_database(self.faces_database)
        log.info('Database is built, registered {} identities'.format(
            len(self.faces_database)))
    def __init__(self, args):
        used_devices = set([args.d_fd, args.d_lm, args.d_hp, args.d_reid])
        self.context = InferenceContext()
        context = self.context
        context.load_plugins(used_devices, args.cpu_lib, args.gpu_lib)
        for d in used_devices:
            context.get_plugin(d).set_config(
                {"PERF_COUNT": "YES" if args.perf_stats else "NO"})

        log.info("Loading models")
        face_detector_net = self.load_model(args.m_fd)
        landmarks_net = self.load_model(args.m_lm)
        head_pose_net = self.load_model(args.m_hp)
        # face_reid_net = self.load_model(args.m_reid)

        self.face_detector = FaceDetector(face_detector_net,
                                          confidence_threshold=args.t_fd,
                                          roi_scale_factor=args.exp_r_fd)

        self.landmarks_detector = LandmarksDetector(landmarks_net)
        self.head_pose_detector = HeadPoseDetector(head_pose_net)
        self.face_detector.deploy(args.d_fd, context)
        self.landmarks_detector.deploy(args.d_lm,
                                       context,
                                       queue_size=self.QUEUE_SIZE)
        self.head_pose_detector.deploy(args.d_hp,
                                       context,
                                       queue_size=self.QUEUE_SIZE)

        log.info("Models are loaded")
 def __init__(self):
     self.face_detector = FaceDetector()
     # https://github.com/davisking/dlib-models
     self.sp = dlib.shape_predictor(
         'data/shape_predictor_5_face_landmarks.dat')
     self.facerec = dlib.face_recognition_model_v1(
         'data/dlib_face_recognition_resnet_model_v1.dat')
示例#10
0
def run(args):
    model = build_model()

    (clf, class_names) = read_classifier(
        os.path.join(args.data_path, 'classifier.pickle'))
    # if classifier is none we only have one face
    if clf is None:
        verified_embedding, only_class = read_only_embedding(args.data_path)

    cap = cv2.VideoCapture(0)

    if (cap.isOpened() == False):
        print("Error opening video stream or file")

    face_detector = FaceDetector()

    while (cap.isOpened()):
        # Capture frame-by-frame
        ret, frame = cap.read()
        if ret == True:

            # Detect image and write it
            faces = face_detector.detect_faces(frame)
            for face in faces:
                x, y, w, h = face
                cropped = frame[y:y + h, x:x + w]
                cropped = cv2.resize(cropped, (96, 96))
                cropped = np.around(convert_image(cropped), decimals=12)
                embedding = model.predict(np.array([cropped]))

                if clf is None:
                    dist = np.linalg.norm(verified_embedding - embedding)
                    match = dist < 0.7
                    label = only_class if match else "Unknown"
                    if args.debug:
                        label += ' (d: {})'.format(round(dist, 2))
                else:
                    predictions = clf.predict_proba(embedding)
                    pred_class = np.argmax(predictions, axis=1)[0]
                    score = round(np.max(predictions) * 100, 2)
                    match = score > 70
                    name = class_names[pred_class]
                    label = '{} ({}%)'.format(name, score)

                color = (0, 255, 0) if match else (0, 0, 255)

                draw_bbox(frame, x, y, x + w, y + h, label=label, color=color)

            cv2.imshow('Frame', frame)

            # Press Q on keyboard to  exit
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break

        else:
            break

    cap.release()
    cv2.destroyAllWindows()
示例#11
0
    def __init__(self, source_path=None):
        self._source_path = source_path
        if (source_path is None):
            self._source_path = SOURCE_PATH

        # self._source_path = "C:\\Users\\sivaram\\Documents\\FDR"
        self._detector = FaceDetector(self._source_path)
        self._embed = Embeddings(self._source_path)
示例#12
0
def predict():
    image_np = data_uri_to_cv2_img(request.values['image'])    
    # Passing the frame to the predictor
    with graph.as_default():
        faces = FaceDetector('./haarcascade_frontalface_default.xml').detect_faces(image_np)
        emotion = model.predict_from_ndarray(image_np)
        result = {'emotion': emotion, 'faces': json.dumps(faces.tolist())} \
            if len(faces) > 0 else {'emotion': 'no face detected', 'faces': json.dumps([])}
    return jsonify(result)
示例#13
0
    def __init__(self, args):
        used_devices = set([args.d_fd, args.d_lm, args.d_reid])
        self.context = InferenceContext()
        context = self.context
        context.load_plugins(used_devices, args.cpu_lib, args.gpu_lib)
        for d in used_devices:
            context.get_plugin(d).set_config(
                {"PERF_COUNT": "YES" if args.perf_stats else "NO"})

        log.info("Loading models")
        face_detector_net = self.load_model(args.m_fd)
        landmarks_net = self.load_model(args.m_lm)
        face_reid_net = self.load_model(args.m_reid)

        self.face_detector = FaceDetector(
            face_detector_net,
            confidence_threshold=args.t_fd,
            roi_scale_factor=args.exp_r_fd,
        )
        self.landmarks_detector = LandmarksDetector(landmarks_net)
        self.face_identifier = FaceIdentifier(face_reid_net,
                                              match_threshold=args.t_id)

        self.face_detector.deploy(args.d_fd, context)
        self.landmarks_detector.deploy(args.d_lm,
                                       context,
                                       queue_size=self.QUEUE_SIZE)
        self.face_identifier.deploy(args.d_reid,
                                    context,
                                    queue_size=self.QUEUE_SIZE)
        log.info("Models are loaded")

        if args.fc in "LOAD":
            self.faces_database = pickle.loads(open(args.fpl, "rb").read())
            log.info("Face database loaded from {}.".format(args.fpl))

        else:
            log.info("Building faces database using images from '%s'" %
                     (args.fg))
            self.faces_database = FacesDatabase(
                args.fg,
                self.face_identifier,
                self.landmarks_detector,
                self.face_detector if args.run_detector else None,
                args.no_show,
            )
            if args.fc in "SAVE":
                with open(args.fps, "wb") as f:
                    f.write(pickle.dumps(self.faces_database))
                    f.close()
                    log.info("Face database {} saved".format(args.fps))

        self.face_identifier.set_faces_database(self.faces_database)
        log.info("Database is built, registered %s identities" %
                 (len(self.faces_database)))

        self.allow_grow = args.allow_grow and not args.no_show
def build_model():
    print("Initialisation...")
    face_recognizator = FaceRecognizator()
    face_recognizator.init()
    persons = Persons(face_recognizator)
    print("\n\nDémarrage...")
    face_detector = FaceDetector(persons)

    return face_detector
示例#15
0
def capture(named_path, data_path, count):
    cap = cv2.VideoCapture(0)

    # Check if camera opened successfully
    if (cap.isOpened() == False):
        print("Error opening video stream or file")

    captured_counter = 0
    face_detector = FaceDetector()
    model = build_model()

    while (cap.isOpened() and captured_counter < count):
        # Capture frame-by-frame
        ret, frame = cap.read()
        if ret == True:

            # Show progress bar
            draw_progressbar(frame, (captured_counter / count))

            # Detect image and write it
            faces = face_detector.detect_faces(frame)
            if len(faces) > 0:

                # Per person path
                file_path = os.path.join(named_path,
                                         str(captured_counter + 1) + '.jpg')
                print('Writing capture: ' + file_path)

                face = faces[0]  # Assume it's the only face
                x, y, w, h = face
                cropped = frame[y:y + h, x:x + w]
                cropped = cv2.resize(cropped, (96, 96))
                cv2.imwrite(file_path, cropped)
                captured_counter += 1
                draw_bbox(frame, x, y, x + w, y + h, label="Face detected")

            cv2.imshow('Frame', frame)

            # Press Q on keyboard to  exit
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break

        # Break the loop
        else:
            break

    # When everything done, release the video capture object
    cap.release()
    cv2.destroyAllWindows()

    # Build and Write the embedding file for this person
    build_embedding(model, named_path)

    # Rebuild the classifier
    build_classifier(data_path)

    print('Done!')
示例#16
0
    def __init__(self):
        self.fd = FaceDetector()

        # pb_path = os.path.join(MODELS_DIR, 'gaze_opt_b1.m.pb')
        # pb_path = os.path.join(MODELS_DIR, 'gaze_opt_b2.m.pb')  # 108, 180
        # pb_path = os.path.join(MODELS_DIR, 'gaze_opt_b2_small.pb')
        pb_path = os.path.join(MODELS_DIR, 'gaze_opt_b2_small.m.pb')  # 36, 60

        self.sess = self.get_model_sess(pb_path)
示例#17
0
def _main(cap_src):

    cap = cv2.VideoCapture(cap_src)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

    face_d = FaceDetector()

    sess = onnxruntime.InferenceSession(
        f'{root_path}/pretrained/fsanet-1x1-iter-688590.onnx')

    sess2 = onnxruntime.InferenceSession(
        f'{root_path}/pretrained/fsanet-var-iter-688590.onnx')

    print('Processing frames, press q to exit application...')
    while True:
        ret, frame = cap.read()
        if (not ret):
            print(
                'Could not capture a valid frame from video source, check your cam/video value...'
            )
            break
        #get face bounding boxes from frame
        face_bb = face_d.get(frame)
        for (x1, y1, x2, y2) in face_bb:
            face_roi = frame[y1:y2 + 1, x1:x2 + 1]

            #preprocess headpose model input
            face_roi = cv2.resize(face_roi, (64, 64))
            face_roi = face_roi.transpose((2, 0, 1))
            face_roi = np.expand_dims(face_roi, axis=0)
            face_roi = (face_roi - 127.5) / 128
            face_roi = face_roi.astype(np.float32)

            #get headpose
            res1 = sess.run(["output"], {"input": face_roi})[0]
            res2 = sess2.run(["output"], {"input": face_roi})[0]

            yaw, pitch, roll = np.mean(np.vstack((res1, res2)), axis=0)

            draw_axis(frame,
                      yaw,
                      pitch,
                      roll,
                      tdx=(x2 - x1) // 2 + x1,
                      tdy=(y2 - y1) // 2 + y1,
                      size=50)

            #draw face bb
            # cv2.rectangle(frame,(x1,y1),(x2,y2),(0,255,0),2)

        cv2.imshow('Frame', frame)

        key = cv2.waitKey(1) & 0xFF
        if (key == ord('q')):
            break
示例#18
0
 def __init__(self,
              name,
              detector=None,
              embedder='face_detection_model/openface_nn4.small2.v1.t7'):
     self.name = name
     if detector is None:
         detector = FaceDetector()
     self.detector = detector
     self.embedder = cv2.dnn.readNetFromTorch(embedder)
     self.data = []
示例#19
0
 def __init__(self, base_dir):
     self.publisher = Publisher()
     self.publisher.declare_queue('hello')
     self.base_dir = base_dir
     self.csv_filename = 'screenshot_list.csv'
     self.face_detector = FaceDetector()
     # self.cap0 = cv2.VideoCapture()
     # self.cap0.open(0)
     self.cap = cv2.VideoCapture(1)  # default is 0
     self.eyegaze_process = None
示例#20
0
文件: views.py 项目: chobeat/blackbox
def _analysis(file):
    face_images = FaceDetector(file).get_faces_with_features()
    faces = [
        generate_report(url, image_id)
        for url, image_id in _save_images(face_images)
    ]

    return render_template("analysis.html",
                           faces=faces,
                           render_suggestion=render_suggestion)
示例#21
0
    def build(self):
        if platform == "android":
            print("Android detected. Requesting permissions")
            self.request_android_permissions()
        self.manager = AppScreenManager()
        self.manager.add_widget(Capture(name="Capture"))
        self.manager.add_widget(Editor(name="Editor"))
        self.face_detector = FaceDetector(self.user_data_dir)
        self.theme_cls.theme_style = "Dark"

        return self.manager
示例#22
0
def main():
    # original frame size is (720, 960)
    W = 320
    H = 240
    image_cx = W // 2
    image_cy = H // 2

    num_skip_frames = 300

    drone = tellopy.Tello()
    controller = Controller(drone, image_cx, image_cy)
    face_detector = FaceDetector()
    renderer = Renderer()
    display = PygameDisplay(W, H)

    try:
        drone.connect()
        drone.wait_for_connection(60.0)

        drone.subscribe(drone.EVENT_FLIGHT_DATA, flight_data_handler)
        container = av.open(drone.get_video_stream())

        drone.takeoff()

        while True:
            for frame in container.decode(video=0):
                if num_skip_frames > 0:
                    num_skip_frames = num_skip_frames - 1
                    continue
                start_time = time.time()
                image = np.array(frame.to_image())
                image = cv2.resize(image, (W, H))

                face = face_detector.detect(image)
                controller.control(face)
                renderer.render(image, drone_state, face)
                display.paint(image)

                time_base = max(1.0 / 60, frame.time_base)
                processing_time = time.time() - start_time
                num_skip_frames = int(processing_time / time_base)
                #print('Video steam %d FPS, frame time base=%f' % (1/frame.time_base, frame.time_base))
                #print('Processing FPS=%d, time=%f ms, skip frames=%d' % (1/processing_time, 1000 * processing_time, num_skip_frames))

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)

    finally:
        drone.land()
        drone.quit()
        display.dispose()
示例#23
0
    def __init__(self, camera):
        super().__init__()
        self.head_deg, self.body_deg = 80, 90
        self.__oc = camera

        self.__robot = RobotControl()
        self.__robot.connect()
        self.__robot.activate_command_control()
        self.__robot.move()
        time.sleep(1)

        self.__fd = FaceDetector()
示例#24
0
def main(model):
    webcam = cv2.VideoCapture(0)
    face_detector = FaceDetector(model)

    while True:
        _, frame = webcam.read()
        face_detector.refresh(frame)
        frame = face_detector.annotate_frame()
        cv2.imshow("FaceDetector", frame)
        key = cv2.waitKey(
            1
        ) & 0xFF  ## I have no f*****g idea why but frame is refusing to show unless this line is present
    def __init__(self, df):
        self.video_paths = df['video_path']
        self.filenames = df.index
        self.face_dr = FaceDetector(frames_per_video=FRAMES_PER_VIDEO)

        mean = [0.485, 0.456, 0.406]
        std = [0.229, 0.224, 0.225]
        self.normalize_transform = Normalize(mean, std)

        self.video_reader = VideoReader()
        self.video_read_fn = lambda x: self.video_reader.read_frames(
            x, num_frames=FRAMES_PER_VIDEO)
示例#26
0
def get_img_and_lms5():
    """
    测试获取图像和人脸关键点
    """
    img_path = os.path.join(IMGS_DIR, 'eyes_up.jpg')
    img_bgr = cv2.imread(img_path)

    fd = FaceDetector()
    main_box, face_landmarks = fd.get_main_faces_dwo(img_bgr)

    corner_list = [[2, 3], [1, 0]]  # 关键点: 左眼、右眼

    return img_bgr, face_landmarks, corner_list
示例#27
0
    def classify_profile_photo(self, image_metadata):
        """Classifies the face in a profile photo

        Uses the CropImageProcessor to crop the image, passes it on to
        the image_processor then use the clf to classify the image

        :param image_metadata: metadata of the image 
        :return: classification result ([] if 'Content not found')

        """
        url = image_metadata['user']['profile_picture']
        imageFile = requests.get(url).content

        if imageFile == 'Content not found':
            return []

        fout = open('temp.jpg', 'w')
        fout.write(imageFile)
        fout.close()

        image = cv2.imread('temp.jpg')
        height, width, channels = image.shape

        photo_id = url[url.rindex('https://') + 9:].replace('/', '@')

        clf_results = []  #list of classification results

        user_id = image_metadata['user']['id']

        #####Optimized parameters after model tuning
        face_detector = FaceDetector('haarcascade_frontalface_default.xml',
                                     1.1, 1, (20, 20))
        face_detector_processor = FaceDetectorProcessor()
        face_detector_processor.detector = face_detector

        self._image_processor = face_detector_processor
        #####

        cropped_face, face_in_square = self._image_processor.process_image(
            image)

        try:
            if face_in_square:
                #stores it in directory
                fname = self._image_processor.save_image(image=cropped_face,
                                                         user_id=user_id,
                                                         photo_id=photo_id)
                input_image = caffe.io.load_image(fname)
                clf_results.append((user_id, photo_id, self._clf(input_image)))
        except Exception, e:
            print str(e)
示例#28
0
    def __init__(self, varsd):
        used_devices = set([varsd["d_fd"], varsd["d_lm"], varsd["d_reid"]])
        self.context = InferenceContext(used_devices, varsd["cpu_lib"],
                                        varsd["gpu_lib"], varsd["perf_stats"])
        context = self.context

        log.info("Loading models")
        face_detector_net = self.load_model(varsd["m_fd"])

        assert (varsd["fd_input_height"] and varsd["fd_input_width"]) or \
               (varsd["fd_input_height"]==0 and varsd["fd_input_width"]==0), \
            "Both -fd_iw and -fd_ih parameters should be specified for reshape"

        if varsd["fd_input_height"] and varsd["fd_input_width"]:
            face_detector_net.reshape({
                "data":
                [1, 3, varsd["fd_input_height"], varsd["fd_input_width"]]
            })
        landmarks_net = self.load_model(varsd["m_lm"])
        face_reid_net = self.load_model(varsd["m_reid"])

        self.face_detector = FaceDetector(face_detector_net,
                                          confidence_threshold=varsd["t_fd"],
                                          roi_scale_factor=varsd["exp_r_fd"])

        self.landmarks_detector = LandmarksDetector(landmarks_net)
        self.face_identifier = FaceIdentifier(face_reid_net,
                                              match_threshold=varsd["t_id"],
                                              match_algo=varsd["match_algo"])

        self.face_detector.deploy(varsd["d_fd"], context)
        self.landmarks_detector.deploy(varsd["d_lm"],
                                       context,
                                       queue_size=self.QUEUE_SIZE)
        self.face_identifier.deploy(varsd["d_reid"],
                                    context,
                                    queue_size=self.QUEUE_SIZE)
        log.info("Models are loaded")

        log.info("Building faces database using images from '%s'" %
                 (varsd["fg"]))
        self.faces_database = FacesDatabase(
            varsd["fg"], self.face_identifier, self.landmarks_detector,
            self.face_detector if varsd["run_detector"] else None,
            varsd["no_show"])
        self.face_identifier.set_faces_database(self.faces_database)
        log.info("Database is built, registered %s identities" % \
            (len(self.faces_database)))

        self.allow_grow = varsd["allow_grow"] and not varsd["no_show"]
示例#29
0
    def __init__(self):
        if platform.system().lower() == 'darwin':
            self.camera = Camera()
            self.camera.init(cameraNumber=0, win=(640, 480))
        elif platform.system().lower() == 'linux':
            self.camera = Camera(cam='pi')
            self.camera.init(win=(640, 480))
        else:
            print('Sorry, platform not supported')
            exit()

        self.balltracker = BallTracker()

        self.face = FaceDetector()
def main():
    W = 432
    H = 240
    image_cx = W // 2
    image_cy = H // 2

    face_detector = FaceDetector()
    renderer = Renderer()
    #display = Cv2Display2D()
    display = PygameDisplay(W, H)

    try:
        container = av.open('video/ball_tracking_example.mp4')
        num_skip_frames = 0
        while True:
            for frame in container.decode(video=0):
                if num_skip_frames > 0:
                    num_skip_frames = num_skip_frames - 1
                    continue
                start_time = time.monotonic()

                image = np.array(frame.to_image())
                image = cv2.resize(image, (W, H))

                face = face_detector.detect(image)
                renderer.render(image, drone_state, face)

                if face is not None:
                    offset_x = face.cx - image_cx
                    offset_y = face.cy - image_cy
                    print(offset_x, offset_y)

                display.paint(image)

                time_base = max(1 / 60, frame.time_base)
                processing_time = time.monotonic() - start_time
                num_skip_frames = int(processing_time / time_base)
                print('Video steam %d FPS, frame time base=%f' %
                      (1 / frame.time_base, frame.time_base))
                print('Processing FPS=%d, time=%f ms, skip frames=%d' %
                      (1 / processing_time, 1000 * processing_time,
                       num_skip_frames))

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)

    finally:
        display.dispose()