Esempio n. 1
0
def find_two_consistent_faces(video):

    m = MTCNNDetector()

    l_faces0 = m.detect(video[0])

    isSingleFace0 = len (l_faces0) == 1

    if not isSingleFace0:
        print("Not single face in frame 0. Skipping")
        return (None, None)

    l_faces1 = m.detect(video[31])

    isSingleFace1 = len (l_faces1) == 1

    if not isSingleFace1:
        print("Not single face in frame 31. Skipping")
        return (None, None)

    # Todo: Test for size diff and pos diff. Skip on failure.

    face0 = l_faces0[0]
    face1 = l_faces1[0]

    return (face0, face1)
Esempio n. 2
0
def load_and_align_data(face_graph, image_paths, do_display):
    detector = MTCNNDetector(face_graph)
    nrof_samples = len(image_paths)
    img_list = [None] * nrof_samples
    for i in range(nrof_samples):
        img = misc.imread(os.path.expanduser(image_paths[i]))
        bounding_boxes, landmarks = detector.detect_face(img)

        all_face_marks_x = landmarks[0:5, :]
        all_face_marks_y = landmarks[5:10, :]

        face_marks_x = all_face_marks_x[:, 0]
        face_marks_y = all_face_marks_y[:, 0]

        print(face_marks_x)
        print(face_marks_y)

        # draw landmarks on image
        if do_display:
            frame = img.copy()
            for x, y in zip(face_marks_x, face_marks_y):
                cv2.circle(frame, (int(x), int(y)), 2, (0, 0, 255), -1)
            cv2.imshow(image_paths[i], frame)
            cv2.waitKey()
        bounding_box = bounding_boxes[0]
        img_list[i] = CropperUtils.crop_face(img, bounding_box)
    images = np.stack(img_list)
    return images
Esempio n. 3
0
def generate_video_sample(cam_url, area):
    '''generating'''
    print('Generating... ')
    print("Cam URL: {}".format(cam_url))
    print("Area: {}".format(area))
    # Variables for tracking faces
    frame_counter = 0

    # Variables holding the correlation trackers and the name per faceid
    frame_sample = {}

    face_rec_graph = FaceGraph()
    face_extractor = FacenetExtractor(face_rec_graph)
    detector = MTCNNDetector(face_rec_graph)
    preprocessor = Preprocessor()
    if args.cam_url is not None:
        frame_reader = URLFrameReader(args.cam_url, scale_factor=1)
    else:
        frame_reader = RabbitFrameReader(rabbit_mq)

    try:
        while True:  # frame_reader.has_next():
            frame = frame_reader.next_frame()
            frame_sample[frame_counter] = FrameSample()
            frame_sample[frame_counter].read_image = frame
            if frame is None:
                print("Waiting for the new image")
                continue

            print("Frame ID: %d" % frame_counter)

            if frame_counter % Config.Frame.FRAME_INTERVAL == 0:
                origin_bbs, points = detector.detect_face(frame)
                frame_sample[frame_counter].origin_bbs = origin_bbs
                frame_sample[frame_counter].points = points
                for _, origin_bb in enumerate(origin_bbs):
                    cropped_face = CropperUtils.crop_face(frame, origin_bb)

                    # Calculate embedding
                    preprocessed_image = preprocessor.process(cropped_face)
                    emb_array, coeff = face_extractor.extract_features(
                        preprocessed_image)
                    frame_sample[frame_counter].embs.append(emb_array)

            frame_counter += 1
    except KeyboardInterrupt:
        print('Keyboard Interrupt !!! Release All !!!')
        print('Saved this video sample as ../session/db/sample.pkl')
        PickleUtils.save_pickle('../session/db/sample.pkl', frame_sample)
Esempio n. 4
0
def main(cam_url, recording_area):

    rb = RabbitMQ((Config.Rabbit.USERNAME, Config.Rabbit.PASSWORD),
                  (Config.Rabbit.IP_ADDRESS, Config.Rabbit.PORT))
    detector = MTCNNDetector(FaceGraph())
    frame_reader = URLFrameReader(cam_url)
    edit_image = utils.CropperUtils()
    face_angle = utils.FaceAngleUtils()
    feature_extractor = FacenetExtractor(FaceGraph())
    pre_process = Preprocessor(whitening)

    while frame_reader.has_next():

        embedding_images = []
        embedding_vectors = []
        display_images = []
        display_image_bounding_boxes = []

        frame = frame_reader.next_frame()
        bounding_boxes, points = detector.detect_face(frame)

        for index, bounding_box in enumerate(bounding_boxes):

            if face_angle.is_acceptable_angle(points[:, index]) is True:

                embedding_image = edit_image.crop_face(frame, bounding_box)
                embedding_images.append(embedding_image)

                display_image, display_image_bounding_box = edit_image.crop_display_face(
                    frame, bounding_box)
                display_images.append(display_image)
                display_image_bounding_boxes.append(display_image_bounding_box)

                whitened_image = pre_process.process(embedding_image)
                embedding_vector, coeff = feature_extractor.extract_features(
                    whitened_image)

                embedding_vectors.append(embedding_vector)

        if len(embedding_vectors) > 0:

            rb.send_multi_embedding_message(display_images, embedding_vectors,
                                            recording_area, time.time(),
                                            display_image_bounding_boxes,
                                            rb.SEND_QUEUE_WORKER)
        else:
            print("No Face Detected")
Esempio n. 5
0
 def __init__(cls):
     cls.face_rec_graph_face = FaceGraph()
     cls.coeff_graph = FaceGraph()
     cls.face_extractor = FacenetExtractor(
         cls.face_rec_graph_face, model_path=Config.Model.FACENET_DIR)
     cls.coeff_extractor = FacenetExtractor(
         cls.coeff_graph, model_path=Config.Model.COEFF_DIR)
     cls.detector = MTCNNDetector(
         cls.face_rec_graph_face, scale_factor=Config.MTCNN.SCALE_FACTOR)
     cls.preprocessor = Preprocessor()
Esempio n. 6
0
def test_register_function():
    face_rec_graph = FaceGraph()
    face_extractor = FacenetExtractor(
        face_rec_graph, model_path=Config.FACENET_DIR)
    detector = MTCNNDetector(face_rec_graph)
    preprocessor = Preprocessor()
    register_function(
        detector,
        preprocessor,
        face_extractor,
    )
Esempio n. 7
0
def prepare_process(iPart):

    # Todo prep all (original, fake) for all parts. Issue tasks for all pairs and mp on those, not the iPart.

    l_d = read_metadata(iPart)
    dir = get_part_dir(iPart)
    output_dir = get_output_dir()

    mtcnn_detector = MTCNNDetector()

    num_originals = len(l_d)

    l_part_task = []

    for idx_key in range(num_originals):

        current = l_d[idx_key]

        original = dir / current[0]

        # Pick first fake. Todo: Can pick other fakes for more data. (one set per epoch)
        num_fakes = len(current[1])

        if num_fakes == 0:
            print(
                f"p_{iPart}_{str(original.stem)}: No associated fakes. Skipping."
            )
            continue

        fake = dir / current[1][0]

        isPairFound = original.is_file() and fake.is_file()

        if isPairFound:
            pass
        else:
            print(f"p_{iPart}: Original and/or fake not found. Skipping.")
            continue

        file_pair_out = output_dir / f"Line_Pair_p_{iPart}_{str(original.stem)}_{str(fake.stem)}.npy"
        file_real_out = output_dir / f"Line_Test_p_{iPart}_{str(original.stem)}_real.npy"
        file_fake_out = output_dir / f"Line_Test_p_{iPart}_{str(fake.stem)}_fake.npy"

        isExisting = file_pair_out.is_file() and file_real_out.is_file(
        ) and file_fake_out.is_file()

        if isExisting:
            continue

        l_part_task.append((iPart, original, fake))

    return l_part_task
Esempio n. 8
0
class MTCNNFaceDetection(object):
    '''
    Use mtcnn model to detect face in frame
    '''
    def __init__(self):
        face_graph = FaceGraph()
        self.detector = MTCNNDetector(face_graph)

    def detect(self, image, **kwargs):
        faces = []
        margin = 0
        img_h, img_w = image.shape[:2]
        origin_bbs, landmarks = self.detector.detect_face(image, **kwargs)

        landmarks = landmarks.reshape(-1, 10).tolist()
        for origin_bb, landmark in zip(origin_bbs, landmarks):
            face = {}
            det = np.squeeze(origin_bb)
            bb = np.zeros(4, dtype=np.int32)
            bb[0] = np.maximum(det[0] - margin / 2, 0)
            bb[1] = np.maximum(det[1] - margin / 2, 0)
            bb[2] = np.minimum(det[2] + margin / 2, img_w)
            bb[3] = np.minimum(det[3] + margin / 2, img_h)
            x = bb[0]
            y = bb[1]
            w = bb[2] - bb[0]
            h = bb[3] - bb[1]
            face['face'] = (x, y, w, h)

            points = []
            # 0: right eye
            # 1: left eye
            # 2: nose
            # 3: right of mount
            # 4: left of mount
            for ix, iy in zip(landmark[:5], landmark[5:]):
                points.append((ix, iy))
            # Draw right eye
            eye_width = int(w / 3)
            right_eye = draw_eye(image, points[0], eye_width, eye_width)
            left_eye = draw_eye(image, points[1], eye_width, eye_width)
            # Draw mouth
            mouth = draw_mouth(image, points[3], points[4])

            face['right_eye'] = right_eye
            face['left_eye'] = left_eye
            face['mouth'] = mouth
            faces.append(face)
        return faces
Esempio n. 9
0
def process_part(iCluster):

    print(f"process_part {iCluster} starting...")

    assert get_ready_data_dir().is_dir()

    output_dir = get_ready_data_dir()

    v = VideoManager()

    l_d = v.get_cluster_metadata(iCluster)

    mtcnn_detector = MTCNNDetector()

    for entry in l_d:
        orig_path = entry[0]

        file_base = output_dir / f"c_{iCluster}_{orig_path.stem}"

        filename_df = file_base.with_suffix(".pkl")
        filename_np = file_base.with_suffix(".npy")

        isJobDone = filename_df.is_file() and filename_np.is_file()

        if isJobDone:
            continue


        print (str(orig_path))

        df = sample_video_set(mtcnn_detector, entry)
        print(f"Saving {str(file_base)}...")
        df.to_pickle(filename_df)

        print(f"Preprocessing {str(file_base)}...")

        data = np.stack(df.data.values)
        data = preprocess_input(data)
        np.random.shuffle(data)

        np.save(filename_np, data)

        print(f"Videoset {str(file_base)} done.")


    print(f"Cluster {iCluster} done.")
Esempio n. 10
0
def run_one():
    input_dir = get_part_dir(0)
    mtcnn_detector = MTCNNDetector()

    l_files = list(sorted(input_dir.iterdir()))

    l_files = [x for x in l_files if x.suffix == '.mp4']

    video_path = input_dir / "nrdnytturz.mp4"

    assert video_path.is_file()

    #video_path = l_files[126]

    video_size = 32

    W = 256
    H = 1

    video = read_video(video_path, video_size)

    x_max = video.shape[2]
    y_max = video.shape[1]
    z_max = video.shape[0]

    faces = find_two_consistent_faces(mtcnn_detector, video)

    featureset = ['l_mouth', 'r_mouth']

    anSample = sample_feature(video, faces, featureset, W, H, True)

    l_feature0 = np.array((*_get_integer_coords_single_feature(
        x_max, y_max, faces[0], featureset[0]), 0))
    r_feature0 = np.array((*_get_integer_coords_single_feature(
        x_max, y_max, faces[0], featureset[1]), 0))

    vector = r_feature0 - l_feature0

    length_vector = np.sqrt(vector.dot(vector))

    anSampleOut = straighten_sample(anSample, length_vector)

    anSample = anSample.reshape(-1)
Esempio n. 11
0
def main(matcher_path, test_path):
    m_trackers_paths = glob.glob(matcher_path + '/*')
    t_trackers_paths = glob.glob(test_path + '/*')
    tracker_manager = TrackerManager('test')
    matcher = FaissMatcher()
    preprocessor = Preprocessor()
    align_preprocessor = Preprocessor(algs=align_and_crop)
    face_rec_graph_face = FaceGraph()
    face_extractor = FacenetExtractor(face_rec_graph_face,
                                      model_path=Config.FACENET_DIR)
    detector = MTCNNDetector(face_rec_graph_face)

    # create matcher
    print('Creating matcher ...')
    for m_dir in m_trackers_paths:
        print('Processing ' + m_dir)
        face_id = m_dir.split('/')[-1]
        embs, labels = extract_embs(m_dir, preprocessor, face_extractor, None)
        face_id_labels = [face_id for i in range(len(labels))]
        matcher.update(embs, face_id_labels)

    # create tracker
    print('Creating trackers')
    for t_dir in t_trackers_paths:
        print('Processing ' + t_dir)
        embs, _ = extract_embs(t_dir, preprocessor, face_extractor, None)
        track_id = int(t_dir.split('/')[-1])

        first_emb = embs.pop()
        face_info = FaceInfo(None, first_emb, None, None, None, None)
        tracker_manager.current_trackers[track_id] = Tracker(
            track_id, face_info, None)
        for emb in embs:
            face_info = FaceInfo(None, emb, None, None, None, None)
            tracker_manager.current_trackers[track_id].update(face_info, None)
        len(tracker_manager.current_trackers)

    # test matching
    print('Test matching ...')
    for fid in tracker_manager.current_trackers:
        print('Processing: ' + str(fid))
        tops = tracker_manager.recognize_current_tracker(fid, matcher, None)
        print('Track_id {}, recognize: {}'.format(fid, tops))
Esempio n. 12
0
def process_part(iPart):

    l_d = read_metadata(iPart)

    input_dir = get_part_dir(iPart)

    output_dir = get_output_dir()

    mtcnn_detector = MTCNNDetector()

    for o_set in l_d:

        l_samples = []

        original_path = input_dir / o_set[0]

        #print(f"{iPart}: {original_path.stem}...")

        r_data = sample_video_safe(mtcnn_detector, original_path, False)

        if r_data is None:
            print(f"{original_path.stem}: Bad original. Skipping set.")
            continue

        l_samples.append(r_data)

        for fake_path in o_set[1]:
            f_data = sample_video_safe(mtcnn_detector, input_dir / fake_path,
                                       False)

            if f_data is None:
                continue

            l_samples.append(f_data)

        if len(l_samples) >= 2:
            data = np.concatenate(l_samples)
            filename = f"p_{iPart}_{original_path.stem}.npy"
            output_path = output_dir / filename
            np.save(output_path, data)
        else:
            print(f"{original_path.stem}: No good fakes. Skipping set.")
Esempio n. 13
0
import mask_glasses

from cv_utils import CropperUtils
from frame_process import ROIFrameProcessor
import cv2
from preprocess import Preprocessor, normalization
import numpy as np
import click
from scipy import misc
from config import Config
from cv_utils import create_if_not_exist
import time
import pipe

frame_reader = URLFrameReader(0)
face_detector = MTCNNDetector(FaceGraph())
frame_processor = ROIFrameProcessor(scale_factor=2)

mask_classifier = mask_glasses.MaskClassifier()
glasses_classifier = mask_glasses.GlassesClassifier()

preprocessor = Preprocessor(algs=normalization)

MASK_DIR = '%s/data/Mask/' % Config.ROOT
NOMASK_DIR = '%s/data/No_Mask/' % Config.ROOT
GLASSES_DIR = '%s/data/Glasses/' % Config.ROOT
NOGLASSES_DIR = '%s/data/No_Glasses/' % Config.ROOT

create_if_not_exist(MASK_DIR)
create_if_not_exist(NOMASK_DIR)
create_if_not_exist(GLASSES_DIR)
    args = parser.parse_args()

    # Run
    if args.video_out is not None:
        Config.Track.TRACKING_VIDEO_OUT = True
        Config.Track.VIDEO_OUT_PATH = args.video_out
    Config.SEND_QUEUE_TO_DASHBOARD = args.dashboard
    Config.Matcher.CLEAR_SESSION = args.clear_session
    Config.Track.SEND_RECOG_API = args.rethinkdb

    face_rec_graph_face = FaceGraph()
    coeff_graph = FaceGraph()
    face_extractor = FacenetExtractor(
        face_rec_graph_face, model_path=args.face_extractor_model)
    coeff_extractor = FacenetExtractor(coeff_graph, model_path=Config.COEFF_DIR)
    detector = MTCNNDetector(
        face_rec_graph_face, scale_factor=Config.MTCNN.SCALE_FACTOR)
    preprocessor = Preprocessor()
    align_preprocessor = Preprocessor(algs=align_and_crop)
    aligner = AlignCustom()

    if args.test_all:
        videos = [
            os.path.join(args.cam_url, video)
            for video in os.listdir(args.cam_url)
            if not os.path.isdir(os.path.join(args.cam_url, video))
        ]
        for video in videos:
            print(video)
            generic_function(video, args.queue_reader, args.area,
                             args.face_extractor_model, args.re_source,
                             args.multi_thread)
from face_detector import MTCNNDetector
from face_extractor import FacenetExtractor
from tf_graph import FaceGraph
from cv_utils import show_frame, CropperUtils
from preprocess import Preprocessor
from matcher import KdTreeMatcher
from frame_reader import URLFrameReader
import time

matcher = KdTreeMatcher()
face_graph = FaceGraph()
face_detector = MTCNNDetector(face_graph)
feature_extractor = FacenetExtractor(face_graph)
preprocessor = Preprocessor()
frame_reader = URLFrameReader(cam_url=0, scale_factor=2)

while frame_reader.has_next():
    frame = frame_reader.next_frame()
    bouncing_boxes, landmarks = face_detector.detect_face(frame)
    nrof_faces = len(bouncing_boxes)
    start = time.time()
    for i in range(nrof_faces):
        cropped = CropperUtils.crop_face(frame, bouncing_boxes[i])
        display_face, padded_bb_str = CropperUtils.crop_display_face(
            frame, bouncing_boxes[i])
        reverse_face = CropperUtils.reverse_display_face(
            display_face, padded_bb_str)
        process_img = preprocessor.process(cropped)
        show_frame(reverse_face, 'Reverse')
        show_frame(cropped, 'Cropped')
        emb, coeff = feature_extractor.extract_features(process_img)
Esempio n. 16
0
def process_part(iCluster):

    isDraw = False

    assert get_ready_data_dir().is_dir()

    output_dir = get_ready_data_dir() / f"c2_{iCluster}"

    if output_dir.is_dir():
        pass
    else:
        output_dir.mkdir()

    assert output_dir.is_dir()

    v = VideoManager.VideoManager()

    l_d = v.get_cluster_metadata(iCluster)

    outputsize = 128 + 64

    mtcnn_detector = MTCNNDetector()

    orig_path = Path(
        "C:\\Users\\T149900\\Downloads\\dfdc_train_part_07\\dfdc_train_part_7\\crnbqgwbmt.mp4"
    )
    orig_path.is_file()

    test_path = Path(
        "C:\\Users\\T149900\\Downloads\\dfdc_train_part_07\\dfdc_train_part_7\\nwzwoxfcnl.mp4"
    )
    test_path.is_file()

    for entry in l_d:

        orig_path = entry[0]

        print(str(orig_path))

        try:
            orig_video = read_video(orig_path, 0)
        except Exception as err:
            print(err)
            continue

        z_max = orig_video.shape[0]
        y_max = orig_video.shape[1]
        x_max = orig_video.shape[2]

        l_all = entry[1]
        l_all.append(orig_path)

        for test_path in l_all:

            print("     " + str(test_path))

            iSample = 0
            filename_base = f"{test_path.stem}"

            try:
                test_video = read_video(test_path, 0)
            except Exception as err:
                print(err)
                continue

            is_identical_format = (test_video.shape[0] == z_max) and (
                test_video.shape[1] == y_max) and (test_video.shape[2]
                                                   == x_max)

            if not is_identical_format:
                print("Not identical formats")
                continue

            d_faces = find_spaced_out_faces_boxes(mtcnn_detector, test_video,
                                                  30)

            for i in range(10):

                z_sample = np.random.choice(range(0, z_max))

                bb_min, bb_max = get_random_face_box_from_z(
                    d_faces, z_sample, x_max, y_max, z_max)

                im_mask, im_real, im_test = cut_frame(bb_min, bb_max,
                                                      orig_video, test_video,
                                                      z_sample, -1, False)

                filename = filename_base + f"_{iSample:003}"
                im_test.save(output_dir / (filename + "_t.png"))
                im_real.save(output_dir / (filename + "_r.png"))
                im_mask.save(output_dir / (filename + "_m.png"))
                iSample = iSample + 1
Esempio n. 17
0
from shutil import copyfile, move, copy, copytree
import os
from cv_utils import create_if_not_exist, FaceAngleUtils, CropperUtils
from tf_graph import FaceGraph
import argparse
from config import Config
from face_detector import MTCNNDetector
import cv2
import time
from pymongo import MongoClient
from frame_reader import URLFrameReader, RabbitFrameReader
import subprocess, re
N = 10

face_rec_graph = FaceGraph()
detector = MTCNNDetector(face_rec_graph, scale_factor=2)

mongodb_client = MongoClient(Config.MongoDB.IP_ADDRESS,
                             Config.MongoDB.PORT,
                             username=Config.MongoDB.USERNAME,
                             password=Config.MongoDB.PASSWORD)

mongodb_db = mongodb_client[Config.MongoDB.DB_NAME]
mongodb_dashinfo = mongodb_db[Config.MongoDB.DASHINFO_COLS_NAME]
mongodb_faceinfo = mongodb_db[Config.MongoDB.FACEINFO_COLS_NAME]


def get_bounding_box(original_path):
    restructured_path = original_path + "_restructured"
    create_if_not_exist(restructured_path)
    face_ids = [
Esempio n. 18
0
 def __init__(self):
     face_graph = FaceGraph()
     self.detector = MTCNNDetector(face_graph)
Esempio n. 19
0
from cv_utils import decode_image, clear_session_folder
from preprocess import Preprocessor
from frame_reader import QueueFrameReader

# read config
if True:
    configs = []
    with open('../config.txt', 'r') as f:
        configs = f.readlines()
    configs = [txt_config.strip('\n') for txt_config in configs]
    Config.DEMO_FOR = configs[0]
    Config.Rabbit.IP_ADDRESS = configs[1]

face_rec_graph = FaceGraph()
face_extractor = FacenetExtractor(face_rec_graph, model_path=Config.FACENET_DIR)
detector = MTCNNDetector(face_rec_graph)
preprocessor = Preprocessor()
matcher = FaissMatcher()
matcher._match_case = 'TCH'
matcher.build(Config.REG_IMAGE_FACE_DICT_FILE)
rb = RabbitMQ()

frame_readers = dict()
register_command = dict()  # {session_id: [[register_name, video_path]]}
removed_sessions = Queue()
sent_msg_queue = Queue()
start_time = time.time()

while True:
    # if time.time() - start_time >= 10.0:
    #     try:
Esempio n. 20
0
def cam_worker_function(cam_url, area):
    '''
    Cam worker function
    '''
    print("Cam URL: {}".format(cam_url))
    print("Area: {}".format(area))

    # Modify Config
    Config.Track.TRACKING_QUEUE_CAM_TO_CENTRAL = True

    rabbit_mq = RabbitMQ((Config.Rabbit.USERNAME, Config.Rabbit.PASSWORD),
                         (Config.Rabbit.IP_ADDRESS, Config.Rabbit.PORT))

    frame_counter = 0

    # Variables holding the correlation trackers and the name per faceid
    list_of_trackers = TrackersList()

    face_rec_graph = FaceGraph()
    face_extractor = FacenetExtractor(face_rec_graph)
    detector = MTCNNDetector(face_rec_graph)
    preprocessor = Preprocessor()
    matcher = KdTreeMatcher()
    if Config.CALC_FPS:
        start_time = time.time()
    if args.cam_url is not None:
        frame_reader = URLFrameReader(args.cam_url, scale_factor=1.5)
    else:
        frame_reader = RabbitFrameReader(rabbit_mq)

    try:
        while True:  # frame_reader.has_next():
            frame = frame_reader.next_frame()
            if frame is None:
                print("Waiting for the new image")
                list_of_trackers.check_delete_trackers(matcher,
                                                       rabbit_mq,
                                                       history_mode=False)
                continue

            print("Frame ID: %d" % frame_counter)

            if Config.CALC_FPS:
                fps_counter = time.time()

            list_of_trackers.update_dlib_trackers(frame)

            if frame_counter % Config.Frame.FRAME_INTERVAL == 0:
                origin_bbs, points = detector.detect_face(frame)
                for i, origin_bb in enumerate(origin_bbs):
                    display_face, _ = CropperUtils.crop_display_face(
                        frame, origin_bb)
                    print("Display face shape")
                    print(display_face.shape)
                    if 0 in display_face.shape:
                        continue
                    cropped_face = CropperUtils.crop_face(frame, origin_bb)

                    # Calculate embedding
                    preprocessed_image = preprocessor.process(cropped_face)
                    emb_array, coeff = face_extractor.extract_features(
                        preprocessed_image)

                    # Calculate angle
                    angle = FaceAngleUtils.calc_angle(points[:, i])

                    # TODO: refractor matching_detected_face_with_trackers
                    matched_fid = list_of_trackers.matching_face_with_trackers(
                        frame, origin_bb, emb_array)

                    # Update list_of_trackers
                    list_of_trackers.update_trackers_list(
                        matched_fid, origin_bb, display_face, emb_array, angle,
                        area, frame_counter, matcher, rabbit_mq)

                    if Config.Track.TRACKING_QUEUE_CAM_TO_CENTRAL:
                        track_tuple = (matched_fid, display_face, emb_array,
                                       area, time.time(), origin_bb, angle)
                        rabbit_mq.send_tracking(
                            track_tuple,
                            rabbit_mq.RECEIVE_CAM_WORKER_TRACKING_QUEUE)

            # Check detete current trackers time
            list_of_trackers.check_delete_trackers(matcher,
                                                   rabbit_mq,
                                                   history_mode=False)

            frame_counter += 1
            if Config.CALC_FPS:
                print("FPS: %f" % (1 / (time.time() - fps_counter)))

    except KeyboardInterrupt:
        print('Keyboard Interrupt !!! Release All !!!')
        if Config.CALC_FPS:
            print('Time elapsed: {}'.format(time.time() - start_time))
            print('Avg FPS: {}'.format(
                (frame_counter + 1) / (time.time() - start_time)))
        frame_reader.release()
Esempio n. 21
0
print(f"Video: {file} Cluster: {iCluster} Original: {original} Part: {part}")


input_dir = get_part_dir(part)

assert (input_dir / file).is_file()
assert (input_dir / original).is_file()

video_real = read_video(input_dir / original, num_frames)
video_fake = read_video(input_dir / file, num_frames)

x_max = video_fake.shape[2]
y_max = video_fake.shape[1]

mtcnn_detector = MTCNNDetector()


l_faces_fake = _get_face_boxes(mtcnn_detector, video_fake, [num_frames//2])

if len (l_faces_fake) == 0:
    #return

l_faces_fake = l_faces_fake[num_frames//2]

for face in l_faces_fake:
    bb_min = np.array(face['bb_min'])
    bb_max = np.array(face['bb_max'])


Esempio n. 22
0
IMAGES = {
    'big_face': misc.imread('%s/data/cropper/big_face.jpg' % ROOT),
    'top': misc.imread('%s/data/cropper/top.jpg' % ROOT),
    'bottom': misc.imread('%s/data/cropper/bottom.jpg' % ROOT),
    'left': misc.imread('%s/data/cropper/left.jpg' % ROOT),
    'right': misc.imread('%s/data/cropper/right.jpg' % ROOT),
    'out_range_top': misc.imread('%s/data/cropper/out_range_top.jpg' % ROOT),
    'out_range_bottom':
    misc.imread('%s/data/cropper/out_range_bottom.jpg' % ROOT),
    'out_range_right': misc.imread(
        '%s/data/cropper/out_range_right.jpg' % ROOT),
    'out_range_left': misc.imread('%s/data/cropper/out_range_left.jpg' % ROOT)
}

DETECTOR = MTCNNDetector(FaceGraph())


class CropperUtilsTest(unittest.TestCase):
    '''
    Run testing on cropping, assume that all face is in acceptable angle and is inner of range
    '''

    def test_display_face_ratio(self):
        [self.display_face_ratio(name, image) for name, image in IMAGES.items()]

    def test_revesed_face_same_as_cropped_face(self):
        [
            self.reverse_face_same_as_cropped_face(name, image)
            for name, image in IMAGES.items()
        ]