예제 #1
0
 def __init__(cls):
     cls.face_rec_graph_face = FaceGraph()
     cls.coeff_graph = FaceGraph()
     cls.face_extractor = FacenetExtractor(
         cls.face_rec_graph_face, model_path=Config.Model.FACENET_DIR)
     cls.coeff_extractor = FacenetExtractor(
         cls.coeff_graph, model_path=Config.Model.COEFF_DIR)
     cls.detector = MTCNNDetector(
         cls.face_rec_graph_face, scale_factor=Config.MTCNN.SCALE_FACTOR)
     cls.preprocessor = Preprocessor()
예제 #2
0
def main(cam_url, recording_area):

    rb = RabbitMQ((Config.Rabbit.USERNAME, Config.Rabbit.PASSWORD),
                  (Config.Rabbit.IP_ADDRESS, Config.Rabbit.PORT))
    detector = MTCNNDetector(FaceGraph())
    frame_reader = URLFrameReader(cam_url)
    edit_image = utils.CropperUtils()
    face_angle = utils.FaceAngleUtils()
    feature_extractor = FacenetExtractor(FaceGraph())
    pre_process = Preprocessor(whitening)

    while frame_reader.has_next():

        embedding_images = []
        embedding_vectors = []
        display_images = []
        display_image_bounding_boxes = []

        frame = frame_reader.next_frame()
        bounding_boxes, points = detector.detect_face(frame)

        for index, bounding_box in enumerate(bounding_boxes):

            if face_angle.is_acceptable_angle(points[:, index]) is True:

                embedding_image = edit_image.crop_face(frame, bounding_box)
                embedding_images.append(embedding_image)

                display_image, display_image_bounding_box = edit_image.crop_display_face(
                    frame, bounding_box)
                display_images.append(display_image)
                display_image_bounding_boxes.append(display_image_bounding_box)

                whitened_image = pre_process.process(embedding_image)
                embedding_vector, coeff = feature_extractor.extract_features(
                    whitened_image)

                embedding_vectors.append(embedding_vector)

        if len(embedding_vectors) > 0:

            rb.send_multi_embedding_message(display_images, embedding_vectors,
                                            recording_area, time.time(),
                                            display_image_bounding_boxes,
                                            rb.SEND_QUEUE_WORKER)
        else:
            print("No Face Detected")
예제 #3
0
def test_register_function():
    face_rec_graph = FaceGraph()
    face_extractor = FacenetExtractor(
        face_rec_graph, model_path=Config.FACENET_DIR)
    detector = MTCNNDetector(face_rec_graph)
    preprocessor = Preprocessor()
    register_function(
        detector,
        preprocessor,
        face_extractor,
    )
예제 #4
0
def gen_frames(root_folder):
    dir_list = []
    for folder in os.listdir(root_folder):
        dir_list.append(folder)
    frames = {}
    face_rec_graph_face = FaceGraph()
    face_rec_graph_coeff = FaceGraph()
    face_extractor = FacenetExtractor(face_rec_graph_face,
                                      model_path=Config.FACENET_DIR)
    coeff_extractor = FacenetExtractor(face_rec_graph_coeff,
                                       model_path=Config.COEFF_DIR)
    preprocessor = Preprocessor()
    for dir in dir_list:
        for file in glob.glob(os.path.join(root_folder, dir, '*')):
            image = imageio.imread(file)
            identity = os.path.split(file)[-2]
            file_name = os.path.split(file)[-1]
            frame_id = file_name.split('_')[5]
            origin_bbox = [int(i) for i in file_name.split('_')[1:5]]
            if file_name.startswith('BAD-TRACK'):
                padding_bbox = [
                    int(i) for i in file_name.split('.')[0].split('_')[-4:]
                ]
            else:
                padding_bbox = [
                    int(i) for i in file_name.split('.')[1].split('_')[-4:]
                ]
            cropped_face = CropperUtils.crop_face(image, padding_bbox)
            preprocessed_image = preprocessor.process(cropped_face)
            emb_array, _ = face_extractor.extract_features(preprocessed_image)
            _, coeff = coeff_extractor.extract_features(preprocessed_image)
            if frame_id in frames:
                frames[frame_id].append(
                    (file, origin_bbox, padding_bbox, identity, emb_array))
            else:
                frames[frame_id] = [(file, origin_bbox, padding_bbox, identity,
                                     emb_array)]
    return frames
def extract_embs(model_path, data, margin, save_name,
                 nrof_samples=float('Inf')):
    save_name = os.path.expanduser(save_name.rstrip('/'))
    if not os.path.exists(os.path.dirname(save_name)):
        print('please enter a valid save_name')

    data = os.path.expanduser(data.rstrip('/'))
    id_dirs = glob.glob(os.path.join(data, '*'))

    id_dict = {}
    for id_dir in id_dirs:
        id_label = os.path.basename(id_dir)
        image_paths = glob.glob(os.path.join(id_dir, '*.*'))
        use_samples = min(nrof_samples, len(image_paths))
        for path in image_paths[:use_samples]:
            id_dict[path] = id_label

    tf = FaceGraph()
    extractor = FacenetExtractor(tf, model_path=model_path)
    nrof_imgs = 0
    emb_array = np.zeros((len(id_dict), 128))
    label_list = []
    image_ids = []
    print('reading images')
    for path, label in id_dict.items():
        try:
            img = misc.imread(path)
            coor_list = os.path.splitext(
                os.path.basename(path))[0].split('_')[-4:]
            bbox = np.array((coor_list), dtype=int)
            face_img = CropperUtils.crop_face(img, bbox, margin)
            face_img = default_preprocess(face_img)
            emb, coeff = extractor.extract_features(face_img).squeeze()
            emb_array[nrof_imgs, :] = emb
        except (ValueError, OSError) as e:
            print(e)
            print('skipping', path)
            continue
        nrof_imgs += 1
        label_list.append(label)
        image_ids.append(os.path.basename(path))
    emb_array = emb_array[:nrof_imgs]
    print('extracted {} images'.format(nrof_imgs))

    save = {'embs': emb_array, 'labels': label_list, 'image_ids': image_ids}

    save_file = os.path.join(save_name)
    print('result saved at', save_file)
    with open(save_file, 'wb') as f:
        pickle.dump(save, f)
예제 #6
0
def generate_video_sample(cam_url, area):
    '''generating'''
    print('Generating... ')
    print("Cam URL: {}".format(cam_url))
    print("Area: {}".format(area))
    # Variables for tracking faces
    frame_counter = 0

    # Variables holding the correlation trackers and the name per faceid
    frame_sample = {}

    face_rec_graph = FaceGraph()
    face_extractor = FacenetExtractor(face_rec_graph)
    detector = MTCNNDetector(face_rec_graph)
    preprocessor = Preprocessor()
    if args.cam_url is not None:
        frame_reader = URLFrameReader(args.cam_url, scale_factor=1)
    else:
        frame_reader = RabbitFrameReader(rabbit_mq)

    try:
        while True:  # frame_reader.has_next():
            frame = frame_reader.next_frame()
            frame_sample[frame_counter] = FrameSample()
            frame_sample[frame_counter].read_image = frame
            if frame is None:
                print("Waiting for the new image")
                continue

            print("Frame ID: %d" % frame_counter)

            if frame_counter % Config.Frame.FRAME_INTERVAL == 0:
                origin_bbs, points = detector.detect_face(frame)
                frame_sample[frame_counter].origin_bbs = origin_bbs
                frame_sample[frame_counter].points = points
                for _, origin_bb in enumerate(origin_bbs):
                    cropped_face = CropperUtils.crop_face(frame, origin_bb)

                    # Calculate embedding
                    preprocessed_image = preprocessor.process(cropped_face)
                    emb_array, coeff = face_extractor.extract_features(
                        preprocessed_image)
                    frame_sample[frame_counter].embs.append(emb_array)

            frame_counter += 1
    except KeyboardInterrupt:
        print('Keyboard Interrupt !!! Release All !!!')
        print('Saved this video sample as ../session/db/sample.pkl')
        PickleUtils.save_pickle('../session/db/sample.pkl', frame_sample)
예제 #7
0
def main(args):
    print(args.image_dir)
    if args.image_files is None:
        image_files = glob.glob(os.path.join(args.image_dir, r'*.*'))
    else:
        image_files = args.image_files

    print(image_files)
    face_graph = FaceGraph()
    images = load_and_align_data(face_graph, image_files, args.display)
    extractor = FacenetExtractor(face_graph)

    emb_list = []
    for image in images:
        _image = default_preprocess(image)
        _emb, coeff = extractor.extract_features(_image)
        emb_list.append(_emb)

    emb = np.vstack(emb_list)
    nrof_images = len(image_files)

    print('Images:')
    for i in range(nrof_images):
        print('%1d: %s' % (i, image_files[i]))
    print('')

    # Print distance matrix
    matching_matrix = []
    print('Distance matrix')
    print('    ', end='')
    for i in range(nrof_images):
        print('    %1d     ' % i, end='')
    print('')
    for i in range(nrof_images):
        print('%1d  ' % i, end='')
        dists = []
        for j in range(nrof_images):
            dist = np.sqrt(np.sum(np.square(np.subtract(emb[i, :],
                                                        emb[j, :]))))
            dists.append(dist)
            print('  %1.4f  ' % dist, end='')
        print('')
        matching_matrix.append(dists)
    print('Save matching_matrix of the folder {} as csv... Done'.format(
        'matching_matrix'))
    np.savetxt('{}.csv'.format('matching_matrix'), matching_matrix)
예제 #8
0
def main(matcher_path, test_path):
    m_trackers_paths = glob.glob(matcher_path + '/*')
    t_trackers_paths = glob.glob(test_path + '/*')
    tracker_manager = TrackerManager('test')
    matcher = FaissMatcher()
    preprocessor = Preprocessor()
    align_preprocessor = Preprocessor(algs=align_and_crop)
    face_rec_graph_face = FaceGraph()
    face_extractor = FacenetExtractor(face_rec_graph_face,
                                      model_path=Config.FACENET_DIR)
    detector = MTCNNDetector(face_rec_graph_face)

    # create matcher
    print('Creating matcher ...')
    for m_dir in m_trackers_paths:
        print('Processing ' + m_dir)
        face_id = m_dir.split('/')[-1]
        embs, labels = extract_embs(m_dir, preprocessor, face_extractor, None)
        face_id_labels = [face_id for i in range(len(labels))]
        matcher.update(embs, face_id_labels)

    # create tracker
    print('Creating trackers')
    for t_dir in t_trackers_paths:
        print('Processing ' + t_dir)
        embs, _ = extract_embs(t_dir, preprocessor, face_extractor, None)
        track_id = int(t_dir.split('/')[-1])

        first_emb = embs.pop()
        face_info = FaceInfo(None, first_emb, None, None, None, None)
        tracker_manager.current_trackers[track_id] = Tracker(
            track_id, face_info, None)
        for emb in embs:
            face_info = FaceInfo(None, emb, None, None, None, None)
            tracker_manager.current_trackers[track_id].update(face_info, None)
        len(tracker_manager.current_trackers)

    # test matching
    print('Test matching ...')
    for fid in tracker_manager.current_trackers:
        print('Processing: ' + str(fid))
        tops = tracker_manager.recognize_current_tracker(fid, matcher, None)
        print('Track_id {}, recognize: {}'.format(fid, tops))
예제 #9
0
def main(args):
    print(args.image_dir)
    if args.image_files is None:
        image_files = glob.glob(os.path.join(args.image_dir, r'*.*'))
    else:
        image_files = args.image_files

    print(image_files)
    face_graph = FaceGraph()
    images = load_and_align_data(face_graph, image_files, args.display)
    extractor = FacenetExtractor(face_graph)
    matcher = KdTreeMatcher()

    emb_list = []
    for image in images:
        _image = default_preprocess(image)
        _emb, coeff = extractor.extract_features(_image)
        emb_list.append(_emb)

    emb = np.vstack(emb_list)
    nrof_images = len(image_files)

    print('Images:')
    for i in range(nrof_images):
        print('%1d: %s' % (i, image_files[i]))
    print('')

    # Print distance matrix
    matching_matrix = []
    print('Distance matrix')
    print('    ', end='')
    for i in range(nrof_images):
        _, top_match_ids, dists = matcher.match(emb[i, :],
                                                top_matches=57,
                                                return_min_dist=True)
        with open('index.txt', 'a') as the_file:
            the_file.write(','.join(top_match_ids) + '\n')
        matching_matrix.append(dists)
    print('Save matching_matrix of the folder {} as csv... Done'.format(
        'matching_matrix'))
    np.savetxt('{}.csv'.format('matching_matrix'), matching_matrix)
from scipy import misc
from rabbitmq import RabbitMQ
from config import Config
from cv_utils import PickleUtils, create_if_not_exist, CropperUtils
from pymongo import MongoClient
from face_extractor import FacenetExtractor
from preprocess import Preprocessor
from tf_graph import FaceGraph
from cv_utils import CropperUtils

facial_dirs = glob.glob('tch_data_Mar_June/*')
tracking_folder_path = 'tchbk/tracking'
queue_name = 'tchbk-production'
SEND_RBMQ_HTTP = 'http://210.211.119.152:1111/tchbk_images'
face_extractor_model = Config.FACENET_DIR
face_rec_graph_face = FaceGraph()
face_extractor = FacenetExtractor(face_rec_graph_face,
                                  model_path=face_extractor_model)
preprocessor = Preprocessor()

rabbit_mq = RabbitMQ((Config.Rabbit.USERNAME, Config.Rabbit.PASSWORD),
                     (Config.Rabbit.IP_ADDRESS, Config.Rabbit.PORT))

mongodb_client = MongoClient(Config.MongoDB.IP_ADDRESS,
                             Config.MongoDB.PORT,
                             username=Config.MongoDB.USERNAME,
                             password=Config.MongoDB.PASSWORD)
mongodb_db = mongodb_client['tchbk-cv']
mongodb_cols = mongodb_db['trackinginfo']
mongodb_dashinfo = mongodb_db['dashinfo']
mongodb_faceinfo = mongodb_db['faceinfo']
    parser.add_argument(
        '-mthread',
        '--multi_thread',
        help='multi_thread mode',
        action='store_true')
    args = parser.parse_args()

    # Run
    if args.video_out is not None:
        Config.Track.TRACKING_VIDEO_OUT = True
        Config.Track.VIDEO_OUT_PATH = args.video_out
    Config.SEND_QUEUE_TO_DASHBOARD = args.dashboard
    Config.Matcher.CLEAR_SESSION = args.clear_session
    Config.Track.SEND_RECOG_API = args.rethinkdb

    face_rec_graph_face = FaceGraph()
    coeff_graph = FaceGraph()
    face_extractor = FacenetExtractor(
        face_rec_graph_face, model_path=args.face_extractor_model)
    coeff_extractor = FacenetExtractor(coeff_graph, model_path=Config.COEFF_DIR)
    detector = MTCNNDetector(
        face_rec_graph_face, scale_factor=Config.MTCNN.SCALE_FACTOR)
    preprocessor = Preprocessor()
    align_preprocessor = Preprocessor(algs=align_and_crop)
    aligner = AlignCustom()

    if args.test_all:
        videos = [
            os.path.join(args.cam_url, video)
            for video in os.listdir(args.cam_url)
            if not os.path.isdir(os.path.join(args.cam_url, video))
예제 #12
0
from rabbitmq import RabbitMQ
from onetime.multi_client_generic_detection_tracking import generic_function
from cv_utils import decode_image, clear_session_folder
from preprocess import Preprocessor
from frame_reader import QueueFrameReader

# read config
if True:
    configs = []
    with open('../config.txt', 'r') as f:
        configs = f.readlines()
    configs = [txt_config.strip('\n') for txt_config in configs]
    Config.DEMO_FOR = configs[0]
    Config.Rabbit.IP_ADDRESS = configs[1]

face_rec_graph = FaceGraph()
face_extractor = FacenetExtractor(face_rec_graph, model_path=Config.FACENET_DIR)
detector = MTCNNDetector(face_rec_graph)
preprocessor = Preprocessor()
matcher = FaissMatcher()
matcher._match_case = 'TCH'
matcher.build(Config.REG_IMAGE_FACE_DICT_FILE)
rb = RabbitMQ()

frame_readers = dict()
register_command = dict()  # {session_id: [[register_name, video_path]]}
removed_sessions = Queue()
sent_msg_queue = Queue()
start_time = time.time()

while True:
from face_detector import MTCNNDetector
from face_extractor import FacenetExtractor
from tf_graph import FaceGraph
from cv_utils import show_frame, CropperUtils
from preprocess import Preprocessor
from matcher import KdTreeMatcher
from frame_reader import URLFrameReader
import time

matcher = KdTreeMatcher()
face_graph = FaceGraph()
face_detector = MTCNNDetector(face_graph)
feature_extractor = FacenetExtractor(face_graph)
preprocessor = Preprocessor()
frame_reader = URLFrameReader(cam_url=0, scale_factor=2)

while frame_reader.has_next():
    frame = frame_reader.next_frame()
    bouncing_boxes, landmarks = face_detector.detect_face(frame)
    nrof_faces = len(bouncing_boxes)
    start = time.time()
    for i in range(nrof_faces):
        cropped = CropperUtils.crop_face(frame, bouncing_boxes[i])
        display_face, padded_bb_str = CropperUtils.crop_display_face(
            frame, bouncing_boxes[i])
        reverse_face = CropperUtils.reverse_display_face(
            display_face, padded_bb_str)
        process_img = preprocessor.process(cropped)
        show_frame(reverse_face, 'Reverse')
        show_frame(cropped, 'Cropped')
        emb, coeff = feature_extractor.extract_features(process_img)
예제 #14
0
 def __init__(self):
     face_graph = FaceGraph()
     self.detector = MTCNNDetector(face_graph)
예제 #15
0
def simulate_tracking(root_folder):
    Config.Track.FACE_TRACK_IMAGES_OUT = True
    Config.Track.SEND_FIRST_STEP_RECOG_API = False
    Config.Track.MIN_MATCH_DISTACE_OUT = True
    Config.Track.CURRENT_EXTRACR_TIMER = 5

    # Load Face Extractor
    face_rec_graph = FaceGraph()
    face_rec_graph_coeff = FaceGraph()
    face_extractor = FacenetExtractor(
        face_rec_graph, model_path=Config.FACENET_DIR)
    coeff_extractor = FacenetExtractor(
        face_rec_graph_coeff, model_path=Config.COEFF_DIR)

    # Create empty KDTreeMatcher
    matcher = FaissMatcher()
    matcher._match_case = 'TCH'

    # Preprocessor
    preprocessor = Preprocessor()

    # Fake rabbit mq

    rabbit_mq = FakeMQ()

    # Clean up for
    clear_tracking_folder()
    if Config.Matcher.CLEAR_SESSION:
        clear_session_folder()

    # Setup result list
    list_of_trackers = TrackersList()
    track_results = TrackerResultsDict()
    predict_dict = {}
    confirmed_ids_dict = {}

    sim_detections = gen_images_with_time(root_folder)
    for detection in sim_detections:
        frame = create_fake_frame(detection)
        sleep(0.05)
        trackers_return_dict, predict_trackers_dict = \
            list_of_trackers.check_delete_trackers(matcher, rabbit_mq)
        track_results.update_two_dict(trackers_return_dict)
        predict_dict.update(predict_trackers_dict)
        confirmed_ids_dict = list_of_trackers.trackers_history.confirm_id(
            confirmed_ids_dict)
        list_of_trackers.trackers_history.check_time(matcher)
        list_of_trackers.update_dlib_trackers(frame)
        facial_quality = 1
        # Crop face for features extraction

        origin_bb = detection[2]
        display_face, padded_bbox = CropperUtils.crop_display_face(
            frame, origin_bb)
        cropped_face = CropperUtils.crop_face(frame, origin_bb)

        bbox_str = '_'.join(np.array(origin_bb, dtype=np.unicode).tolist())
        # Calculate embedding
        preprocessed_image = preprocessor.process(cropped_face)
        emb_array, _ = face_extractor.extract_features(preprocessed_image)
        _, coeff = coeff_extractor.extract_features(preprocessed_image)
        if coeff < 0.15:
            img_path = '../data/notenoughcoeff/{}_{}_{}.jpg'.format(
                detection, bbox_str, coeff)
            cv2.imwrite(img_path, cv2.cvtColor(display_face, cv2.COLOR_BGR2RGB))
            facial_quality = -1
        else:
            with open('../data/coeff_log.txt', 'a') as f:
                f.write('{}_{}_{}, coeff: {}\n'.format(bbox_str, detection[0],
                                                       padded_bbox, coeff))

        matched_fid = list_of_trackers.matching_face_with_trackers(
            frame, detection[0], origin_bb, emb_array, facial_quality)
        if facial_quality == -1 or coeff < 0.15:
            continue

        list_of_trackers.update_trackers_list(
            matched_fid, time.time(), origin_bb, display_face, emb_array, 0,
            'VVT', 1, detection[0], padded_bbox, matcher, rabbit_mq)

        list_of_trackers.check_recognize_tracker(matcher, rabbit_mq,
                                                 matched_fid)

    sleep(6)
    list_of_trackers.check_delete_trackers(matcher, rabbit_mq)
예제 #16
0
def cam_worker_function(cam_url, area):
    '''
    Cam worker function
    '''
    print("Cam URL: {}".format(cam_url))
    print("Area: {}".format(area))

    # Modify Config
    Config.Track.TRACKING_QUEUE_CAM_TO_CENTRAL = True

    rabbit_mq = RabbitMQ((Config.Rabbit.USERNAME, Config.Rabbit.PASSWORD),
                         (Config.Rabbit.IP_ADDRESS, Config.Rabbit.PORT))

    frame_counter = 0

    # Variables holding the correlation trackers and the name per faceid
    list_of_trackers = TrackersList()

    face_rec_graph = FaceGraph()
    face_extractor = FacenetExtractor(face_rec_graph)
    detector = MTCNNDetector(face_rec_graph)
    preprocessor = Preprocessor()
    matcher = KdTreeMatcher()
    if Config.CALC_FPS:
        start_time = time.time()
    if args.cam_url is not None:
        frame_reader = URLFrameReader(args.cam_url, scale_factor=1.5)
    else:
        frame_reader = RabbitFrameReader(rabbit_mq)

    try:
        while True:  # frame_reader.has_next():
            frame = frame_reader.next_frame()
            if frame is None:
                print("Waiting for the new image")
                list_of_trackers.check_delete_trackers(matcher,
                                                       rabbit_mq,
                                                       history_mode=False)
                continue

            print("Frame ID: %d" % frame_counter)

            if Config.CALC_FPS:
                fps_counter = time.time()

            list_of_trackers.update_dlib_trackers(frame)

            if frame_counter % Config.Frame.FRAME_INTERVAL == 0:
                origin_bbs, points = detector.detect_face(frame)
                for i, origin_bb in enumerate(origin_bbs):
                    display_face, _ = CropperUtils.crop_display_face(
                        frame, origin_bb)
                    print("Display face shape")
                    print(display_face.shape)
                    if 0 in display_face.shape:
                        continue
                    cropped_face = CropperUtils.crop_face(frame, origin_bb)

                    # Calculate embedding
                    preprocessed_image = preprocessor.process(cropped_face)
                    emb_array, coeff = face_extractor.extract_features(
                        preprocessed_image)

                    # Calculate angle
                    angle = FaceAngleUtils.calc_angle(points[:, i])

                    # TODO: refractor matching_detected_face_with_trackers
                    matched_fid = list_of_trackers.matching_face_with_trackers(
                        frame, origin_bb, emb_array)

                    # Update list_of_trackers
                    list_of_trackers.update_trackers_list(
                        matched_fid, origin_bb, display_face, emb_array, angle,
                        area, frame_counter, matcher, rabbit_mq)

                    if Config.Track.TRACKING_QUEUE_CAM_TO_CENTRAL:
                        track_tuple = (matched_fid, display_face, emb_array,
                                       area, time.time(), origin_bb, angle)
                        rabbit_mq.send_tracking(
                            track_tuple,
                            rabbit_mq.RECEIVE_CAM_WORKER_TRACKING_QUEUE)

            # Check detete current trackers time
            list_of_trackers.check_delete_trackers(matcher,
                                                   rabbit_mq,
                                                   history_mode=False)

            frame_counter += 1
            if Config.CALC_FPS:
                print("FPS: %f" % (1 / (time.time() - fps_counter)))

    except KeyboardInterrupt:
        print('Keyboard Interrupt !!! Release All !!!')
        if Config.CALC_FPS:
            print('Time elapsed: {}'.format(time.time() - start_time))
            print('Avg FPS: {}'.format(
                (frame_counter + 1) / (time.time() - start_time)))
        frame_reader.release()
예제 #17
0
from face_extractor import FacenetExtractor
import os
from cv_utils import create_if_not_exist, PickleUtils, CropperUtils
import cv2
from scipy import misc
from preprocess import Preprocessor
from matcher import FaissMatcher, KdTreeMatcher
from cv_utils import create_if_not_exist
from rabbitmq import RabbitMQ
from config import Config
from pymongo import MongoClient
import argparse
import glob
import time

extractor_graph = FaceGraph()
face_extractor = FacenetExtractor(extractor_graph)

rabbit_mq = RabbitMQ((Config.Rabbit.USERNAME, Config.Rabbit.PASSWORD),
                     (Config.Rabbit.IP_ADDRESS, Config.Rabbit.PORT))

mongodb_client = MongoClient(Config.MongoDB.IP_ADDRESS,
                             Config.MongoDB.PORT,
                             username=Config.MongoDB.USERNAME,
                             password=Config.MongoDB.PASSWORD)

mongodb_db = mongodb_client[Config.MongoDB.DB_NAME]
mongodb_dashinfo = mongodb_db[Config.MongoDB.DASHINFO_COLS_NAME]
mongodb_faceinfo = mongodb_db[Config.MongoDB.FACEINFO_COLS_NAME]

예제 #18
0
def main(data_path):
    #wipe data
    mongodb_dashinfo.remove({})
    mongodb_faceinfo.remove({})

    tracker_paths = [
        tracker_path for tracker_path in glob.glob(data_path + '/*')
        if os.path.isdir(tracker_path)
    ]
    face_rec_graph = FaceGraph()
    face_extractor = FacenetExtractor(face_rec_graph)
    preprocessor = Preprocessor()
    #get max track id
    existing_tracking_paths = os.listdir(Config.TRACKING_DIR)
    track_id = max([int(dir) for dir in existing_tracking_paths
                    ]) + 1 if len(existing_tracking_paths) > 0 else 0
    for tracker_path in tracker_paths:  #assuming that each annotated folder is a tracker
        tracker_save_folder = os.path.join(Config.TRACKING_DIR, str(track_id))
        preprocessed_images = []
        insert_list = []
        #create fake face_id
        face_id = '{}-{}-{}'.format("Office", track_id, time.time())
        display_imgs = [
            os.path.basename(_dir)
            for _dir in glob.glob(tracker_path + '/*.jpg')
        ]

        #iterate through list of img names
        for display_img in display_imgs:
            image_id = display_img.replace(".jpg", "")
            img = misc.imread(os.path.join(tracker_path, display_img))
            #parse image data
            data_split = image_id.split('_')
            data_split[0] = str(track_id)
            image_id = '_'.join(data_split)
            bbox = data_split[1:5]
            bbox = [int(i) for i in bbox]
            padded_bbox = data_split[-4:len(data_split)]
            padded_bbox = '_'.join(padded_bbox)
            time_stamp = float(data_split[5])

            cropped_face = CropperUtils.reverse_display_face(img, padded_bbox)
            preprocessed_image = preprocessor.process(cropped_face)
            emb_array, _ = face_extractor.extract_features(preprocessed_image)

            insert_list.append({
                'track_id': track_id,
                'image_id': image_id,
                'face_id': face_id,
                'time_stamp': time_stamp,
                'bounding_box': bbox,
                'embedding': emb_array.tolist(),
                'padded_bbox': padded_bbox,
                'points': None,
                'is_registered': True
            })

            #save image to TRACKING DIR
            create_if_not_exist(tracker_save_folder)
            misc.imsave(os.path.join(tracker_save_folder, image_id + '.jpg'),
                        img)

            # preprocessed_images.append(preprocessor.process(cropped_face))

            # #extract embeddings all at once for performance
            # embs_array, _ = face_extractor.extract_features_all_at_once(preprocessed_images)

            # #map embedding with its corresponding image id
            # for i in range(len(s_insert_list)):
            #     insert_list[i]['embedding'] = [embs_array[i].tolist()] #embedding is saved as (1,128)

        #insert all images at once for performance
        mongodb_faceinfo.insert(insert_list)

        #add log to dash info
        mongodb_dashinfo.remove({'track_id': track_id})
        mongodb_dashinfo.insert_one({
            'track_id':
            track_id,
            'represent_image_id':
            insert_list[0]['image_id'],
            'face_id':
            face_id,
            'is_registered':
            True
        })

        #simulate cv <---> web real-time connection
        queue_msg = '|'.join([
            face_id, Config.SEND_RBMQ_HTTP + '/' + str(track_id) + '/',
            insert_list[0]['image_id'],
            str(time.time())
        ])
        rabbit_mq.send(Config.Queues.LIVE_RESULT, queue_msg)

        track_id += 1  #increment track id
예제 #19
0
import mask_glasses

from cv_utils import CropperUtils
from frame_process import ROIFrameProcessor
import cv2
from preprocess import Preprocessor, normalization
import numpy as np
import click
from scipy import misc
from config import Config
from cv_utils import create_if_not_exist
import time
import pipe

frame_reader = URLFrameReader(0)
face_detector = MTCNNDetector(FaceGraph())
frame_processor = ROIFrameProcessor(scale_factor=2)

mask_classifier = mask_glasses.MaskClassifier()
glasses_classifier = mask_glasses.GlassesClassifier()

preprocessor = Preprocessor(algs=normalization)

MASK_DIR = '%s/data/Mask/' % Config.ROOT
NOMASK_DIR = '%s/data/No_Mask/' % Config.ROOT
GLASSES_DIR = '%s/data/Glasses/' % Config.ROOT
NOGLASSES_DIR = '%s/data/No_Glasses/' % Config.ROOT

create_if_not_exist(MASK_DIR)
create_if_not_exist(NOMASK_DIR)
create_if_not_exist(GLASSES_DIR)
예제 #20
0
IMAGES = {
    'big_face': misc.imread('%s/data/cropper/big_face.jpg' % ROOT),
    'top': misc.imread('%s/data/cropper/top.jpg' % ROOT),
    'bottom': misc.imread('%s/data/cropper/bottom.jpg' % ROOT),
    'left': misc.imread('%s/data/cropper/left.jpg' % ROOT),
    'right': misc.imread('%s/data/cropper/right.jpg' % ROOT),
    'out_range_top': misc.imread('%s/data/cropper/out_range_top.jpg' % ROOT),
    'out_range_bottom':
    misc.imread('%s/data/cropper/out_range_bottom.jpg' % ROOT),
    'out_range_right': misc.imread(
        '%s/data/cropper/out_range_right.jpg' % ROOT),
    'out_range_left': misc.imread('%s/data/cropper/out_range_left.jpg' % ROOT)
}

DETECTOR = MTCNNDetector(FaceGraph())


class CropperUtilsTest(unittest.TestCase):
    '''
    Run testing on cropping, assume that all face is in acceptable angle and is inner of range
    '''

    def test_display_face_ratio(self):
        [self.display_face_ratio(name, image) for name, image in IMAGES.items()]

    def test_revesed_face_same_as_cropped_face(self):
        [
            self.reverse_face_same_as_cropped_face(name, image)
            for name, image in IMAGES.items()
        ]