Esempio n. 1
0
 def test_end_of_video(self):
     frame_reader = URLFrameReader(
         cam_url='%s//data/video/test-vin.mp4' % ROOT)
     for i in range(76):
         self.assertTrue(frame_reader.has_next())
         frame_reader.next_frame()
     self.assertFalse(frame_reader.has_next())
Esempio n. 2
0
def main(cam_url, recording_area):

    rb = RabbitMQ((Config.Rabbit.USERNAME, Config.Rabbit.PASSWORD),
                  (Config.Rabbit.IP_ADDRESS, Config.Rabbit.PORT))
    detector = MTCNNDetector(FaceGraph())
    frame_reader = URLFrameReader(cam_url)
    edit_image = utils.CropperUtils()
    face_angle = utils.FaceAngleUtils()
    feature_extractor = FacenetExtractor(FaceGraph())
    pre_process = Preprocessor(whitening)

    while frame_reader.has_next():

        embedding_images = []
        embedding_vectors = []
        display_images = []
        display_image_bounding_boxes = []

        frame = frame_reader.next_frame()
        bounding_boxes, points = detector.detect_face(frame)

        for index, bounding_box in enumerate(bounding_boxes):

            if face_angle.is_acceptable_angle(points[:, index]) is True:

                embedding_image = edit_image.crop_face(frame, bounding_box)
                embedding_images.append(embedding_image)

                display_image, display_image_bounding_box = edit_image.crop_display_face(
                    frame, bounding_box)
                display_images.append(display_image)
                display_image_bounding_boxes.append(display_image_bounding_box)

                whitened_image = pre_process.process(embedding_image)
                embedding_vector, coeff = feature_extractor.extract_features(
                    whitened_image)

                embedding_vectors.append(embedding_vector)

        if len(embedding_vectors) > 0:

            rb.send_multi_embedding_message(display_images, embedding_vectors,
                                            recording_area, time.time(),
                                            display_image_bounding_boxes,
                                            rb.SEND_QUEUE_WORKER)
        else:
            print("No Face Detected")
Esempio n. 3
0
def occlusion_dection_video(video_path, detector):
    frame_reader = URLFrameReader(video_path, scale_factor=1)
    frames_per_state = 4
    state_correct = 0
    curent_frame = 0
    # Opening phase
    try:
        for i in range(frames_per_state):
            curent_frame += 1
            frame = frame_reader.next_frame()
            detected_result = detector.detect(frame)
            frame_label = process_result(detected_result)
            if frame_label == NO_OCCLUSION:
                state_correct += 1
            # fps = "{0}/{1}".format(curent_frame, frames_per_state)
            # put_text_on_image(frame, fps, BLUE, "top")
            # cv2.imshow('frame', frame)

        state_validation = True if state_correct >= 1 else False
        state_correct = 0
        curent_frame = 0

        # Realtime phase
        while frame_reader.has_next():
            result_board = 255 * np.ones((400, 400, 3))
            frame = frame_reader.next_frame()
            curent_frame += 1
            show_information(frame, curent_frame, frames_per_state,
                             state_validation)
            detected_result = detector.detect(frame)
            frame_label = process_result(detected_result)
            if frame_label == NO_OCCLUSION:
                state_correct += 1

            if curent_frame >= frames_per_state:
                state_validation = True if state_correct >= 1 else False
                curent_frame = 0
                state_correct = 0
            display_result_board(result_board, detected_result)
            cv2.imshow('frame', frame)
            cv2.imshow('result', result_board)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
    finally:
        # cap.release()
        cv2.destroyAllWindows()
from face_extractor import FacenetExtractor
from tf_graph import FaceGraph
from cv_utils import show_frame, CropperUtils
from preprocess import Preprocessor
from matcher import KdTreeMatcher
from frame_reader import URLFrameReader
import time

matcher = KdTreeMatcher()
face_graph = FaceGraph()
face_detector = MTCNNDetector(face_graph)
feature_extractor = FacenetExtractor(face_graph)
preprocessor = Preprocessor()
frame_reader = URLFrameReader(cam_url=0, scale_factor=2)

while frame_reader.has_next():
    frame = frame_reader.next_frame()
    bouncing_boxes, landmarks = face_detector.detect_face(frame)
    nrof_faces = len(bouncing_boxes)
    start = time.time()
    for i in range(nrof_faces):
        cropped = CropperUtils.crop_face(frame, bouncing_boxes[i])
        display_face, padded_bb_str = CropperUtils.crop_display_face(
            frame, bouncing_boxes[i])
        reverse_face = CropperUtils.reverse_display_face(
            display_face, padded_bb_str)
        process_img = preprocessor.process(cropped)
        show_frame(reverse_face, 'Reverse')
        show_frame(cropped, 'Cropped')
        emb, coeff = feature_extractor.extract_features(process_img)
        predict_id, top_match_ids = matcher.match(emb)
def generic_function(cam_url, queue_reader, area, face_extractor_model,
                     re_source):
    '''
    This is main function
    '''
    print("Generic function")
    print("Cam URL: {}".format(cam_url))
    print("Area: {}".format(area))

    # TODO: init logger, modulize this?

    # Variables for tracking faces
    frame_counter = 0

    if Config.Matcher.CLEAR_SESSION:
        clear_session_folder()

    if Config.Mode.CALC_FPS:
        start_time = time.time()
    if cam_url is not None:
        frame_reader = URLFrameReader(cam_url)
    else:
        print('Empty Image Source')
        return -1

    video_out_fps, video_out_w, video_out_h, = frame_reader.get_info()
    print(video_out_fps, video_out_w, video_out_h)

    video_out = None
    if Config.Track.TRACKING_VIDEO_OUT:
        video_out = VideoHandle(time.time(), video_out_fps, int(video_out_w),
                                int(video_out_h))

    db = DashboardDatabase(use_image_id=True)
    rabbit_mq = RabbitMQ((Config.Rabbit.USERNAME, Config.Rabbit.PASSWORD),
                         (Config.Rabbit.IP_ADDRESS, Config.Rabbit.PORT))
    matcher = KdTreeMatcher()
    matcher.build(db)

    # find current track
    import glob
    tracking_dirs = glob.glob(Config.Dir.TRACKING_DIR + '/*')

    if tracking_dirs == []:
        number_of_existing_trackers = 0
    else:
        lof_int_trackid = [
            int(tracking_dir.split('/')[-1]) for tracking_dir in tracking_dirs
        ]
        number_of_existing_trackers = max(lof_int_trackid) + 1

    mode = 'video'  # video, live
    '''
    # Feature 1: Find Merge Split
    splitMerge = pipe.SplitMergeThread(database=db, rabbit_mq=rabbit_mq, matcher=matcher)
    splitMerge.daemon = True
    splitMerge.start()

    # Feature 2: Find similar
    findSimilarFaceThread = pipe.FindSimilarFaceThread(database=db, rabbit_mq=rabbit_mq)
    findSimilarFaceThread.daemon = True
    findSimilarFaceThread.start()
    '''

    # main program stage
    stageDetectFace = pipe.Stage(pipe.FaceDetectWorker, 1)
    stagePreprocess = pipe.Stage(pipe.PreprocessDetectedFaceWorker, 1)
    stageDistributor = pipe.Stage(pipe.FaceDistributorWorker, 1)
    stageExtract = pipe.Stage(pipe.FaceExtractWorker, 1)
    stageTrack = pipe.Stage(pipe.FullTrackTrackingWorker,
                            1,
                            area=area,
                            database=db,
                            matcher=matcher,
                            init_tracker_id=number_of_existing_trackers)
    stageResultToTCH = pipe.Stage(pipe.SendToDashboardWorker,
                                  1,
                                  database=db,
                                  rabbit_mq=rabbit_mq)
    stageStorage = pipe.Stage(pipe.DashboardStorageWorker, 1)
    stageDatabase = pipe.Stage(pipe.DashboardDatabaseWorker, 1, database=db)

    stageDetectFace.link(stagePreprocess)
    stagePreprocess.link(stageDistributor)
    stageDistributor.link(stageExtract)
    stageExtract.link(stageTrack)
    stageTrack.link(stageResultToTCH)
    stageTrack.link(stageStorage)
    stageTrack.link(stageDatabase)

    if Config.Track.TRACKING_VIDEO_OUT:
        stageVideoOut = pipe.Stage(pipe.VideoWriterWorker,
                                   1,
                                   database=db,
                                   video_out=video_out)
        stageTrack.link(stageVideoOut)

    pipeline = pipe.Pipeline(stageDetectFace)

    print('Begin')
    try:
        while frame_reader.has_next():
            #continue
            frame = frame_reader.next_frame()
            if frame is None:
                if mode == 'video':
                    print("Wait for executor to finish it jobs")
                    pipeline.put(None)
                    break
                if mode == 'live':
                    if re_source:
                        print('Trying to connect the stream again ...')
                        if cam_url is not None:
                            frame_reader = URLFrameReader(cam_url,
                                                          scale_factor=1,
                                                          should_crop=True)
                    continue

            print('Read frame', frame_counter, frame.shape)

            if frame_counter % Config.Frame.FRAME_INTERVAL == 0:
                # timer = Timer(frame_counter)
                task = pipe.Task(pipe.Task.Frame)
                task.package(frame=frame, frame_info=frame_counter)
                pipeline.put(task)
                # pipeline.put((frame, frame_counter, timer))

            frame_counter += 1

        print('Time elapsed: {}'.format(time.time() - start_time))
        print('Avg FPS: {}'.format(
            (frame_counter + 1) / (time.time() - start_time)))
        frame_reader.release()
        '''
        splitMerge.join()
        findSimilarFaceThread.join()
        '''

    except KeyboardInterrupt:
        if Config.Track.TRACKING_VIDEO_OUT:
            video_out.release_tmp()
        pipeline.put(None)
        print('Keyboard Interrupt !!! Release All !!!')
        print('Time elapsed: {}'.format(time.time() - start_time))
        print('Avg FPS: {}'.format(
            (frame_counter + 1) / (time.time() - start_time)))
        frame_reader.release()
        '''
Esempio n. 6
0
 def test_invalid_url_has_no_frame(self):
     frame_reader = URLFrameReader(cam_url='abc')
     self.assertFalse(frame_reader.has_next())