コード例 #1
0
def get_bounding_box(original_path):
    restructured_path = original_path + "_restructured"
    create_if_not_exist(restructured_path)
    face_ids = [
        id for id in os.listdir(original_path)
        if os.path.isdir(os.path.join(original_path, id))
    ]
    for face_id in face_ids:
        id_path = os.path.join(original_path, face_id)
        r_id_path = os.path.join(restructured_path, face_id)
        create_if_not_exist(r_id_path)
        images = [
            os.path.join(id_path, image) for image in os.listdir(id_path)
            if "jpg" in image or "png" in image
        ]
        for image_name in images:
            img = cv2.cvtColor(cv2.imread(image_name), cv2.COLOR_BGR2RGB)
            rects, landmarks = detector.detect_face(img)
            if len(rects == 1) and FaceAngleUtils.is_acceptable_angle(
                    landmarks[:, 0]):
                origin_bb = rects[0][:4]
                display_face, str_padded_box = CropperUtils.crop_display_face(
                    img, origin_bb)
                bbox_str = '_'.join([str(int(num)) for num in origin_bb])
                image_id = '{}_{}_{}_{}.jpg'.format(face_id, bbox_str,
                                                    time.time(),
                                                    str_padded_box)
                cv2.imwrite(os.path.join(r_id_path, image_id),
                            cv2.cvtColor(display_face, cv2.COLOR_RGB2BGR))
                print(image_id)
コード例 #2
0
def main(src, dst):
    # his_dic = get_history_in_send_folder()
    his_dic = PickleUtils.read_pickle('/home/manho/data/his_dic.pkl')
    NEW_TRACKING_DIR = dst
    create_if_not_exist(NEW_TRACKING_DIR)
    track_id_dirs = glob.glob(src + '/*')

    for track_id_dir in track_id_dirs:
        print('Processing ' + track_id_dir)
        splited_file_name = glob.glob(track_id_dir +
                                      '/*')[0].split('/')[-1].replace(
                                          '.jpg', '').split('_')
        face_id = splited_file_name[0]
        track_id = track_id_dir.split('/')[-1]
        print('FACEID: {}, TRACKID: {}'.format(face_id, track_id))

        face_id_dir = os.path.join(NEW_TRACKING_DIR, face_id)
        create_if_not_exist(face_id_dir)

        new_track_id_dir = os.path.join(face_id_dir, track_id)

        subprocess.call(["cp", "-r", track_id_dir, face_id_dir])
        this_mtime = -1
        if face_id in list(his_dic.keys()):
            this_mtime = his_dic[face_id].pop(0)
            if his_dic[face_id] == []:
                his_dic.pop(face_id, None)
        else:
            this_mtime = os.stat(track_id_dir).st_mtime
        modify_image_id(new_track_id_dir, track_id, time_stamp=this_mtime)
    PickleUtils.save_pickle('/home/manho/data/his_dic_remain.pkl', his_dic)
    print('Done!')
コード例 #3
0
def get_trackers_from_db(track_folder):
    save_path = track_folder + "_restructured"
    create_if_not_exist(save_path)
    cursors = mongodb_faceinfo.find({})
    for cursor in cursors:
        face_folder = os.path.join(save_path, cursor["face_id"])
        try:
            copytree(os.path.join(track_folder, str(cursor["track_id"])),
                     os.path.join(face_folder, str(cursor["track_id"])))
            print(cursor["image_id"])
        except:
            continue
コード例 #4
0
def rotate_video(original_path, by_landmark=False):
    restructured_path = original_path + "_restructured"
    create_if_not_exist(restructured_path)
    videos = [os.path.join(original_path, id) for id in os.listdir(original_path)\
            if not os.path.isdir(os.path.join(original_path,id))]

    for video in videos:
        frame_reader = URLFrameReader(video, scale_factor=1)
        video_name = video.split("/")[-1].split(".")[0]
        video_type = video.split("/")[-1].split(".")[1]
        is_rotate = False
        if by_landmark:
            while True:
                frame = frame_reader.next_frame()
                if frame is None:
                    break
                rects, landmarks = detector.detect_face(frame)
                if len(rects) > 0:

                    rotate_angel = FaceAngleUtils.calc_face_rotate_angle(
                        landmarks[:, 0])
                    print("Points: " + str(landmarks[:, 0]) +
                          ", rotate_angel: " + str(rotate_angel))
                    if rotate_angel > 30:
                        video_name += "_rotate"
                    break
        else:
            cmd = 'ffmpeg -i %s' % video

            p = subprocess.Popen(cmd.split(" "),
                                 stderr=subprocess.PIPE,
                                 close_fds=True)
            stdout, stderr = p.communicate()
            reo_rotation = re.compile(b'rotate\s+:\s(?P<rotation>.*)')
            match_rotation = reo_rotation.search(stderr)
            if (match_rotation is not None
                    and len(match_rotation.groups()) > 0):
                rotation = match_rotation.groups()[0]

                if int(rotation) > 0:
                    video_name += "_rotate_" + str(int(rotation))

        n_video_path = os.path.join(restructured_path,
                                    video_name + "." + video_type)
        copy(video, n_video_path)
        print(video_name)
コード例 #5
0
def train_validate_view_tracker(original_path):
    restructured_path = original_path + "_restructured"
    create_if_not_exist(restructured_path)
    face_ids = [
        id for id in os.listdir(original_path)
        if os.path.isdir(os.path.join(original_path, id))
    ]
    for face_id in face_ids:
        id_path = os.path.join(original_path, face_id)
        trackers = [
            tracker for tracker in os.listdir(id_path)
            if os.path.isdir(os.path.join(id_path, tracker))
        ]
        trackers.sort(key=lambda x: int(x))
        if (len(trackers) > 1):
            r_id_path = os.path.join(restructured_path, face_id)
            create_if_not_exist(r_id_path)
            for i in range(1, len(trackers)):
                move(os.path.join(id_path, trackers[i]), r_id_path)
                print(os.path.join(id_path, trackers[i]))
コード例 #6
0
def train_and_validate(original_path, train_ratio):
    restructured_path = original_path + "_restructured"
    create_if_not_exist(restructured_path)
    face_ids = [
        id for id in os.listdir(original_path)
        if os.path.isdir(os.path.join(original_path, id))
    ]
    for face_id in face_ids:
        id_path = os.path.join(original_path, face_id)
        r_id_path = os.path.join(restructured_path, face_id)
        create_if_not_exist(r_id_path)
        trackers = [
            tracker for tracker in os.listdir(id_path)
            if os.path.isdir(os.path.join(id_path, tracker))
        ]
        trackers.sort(key=lambda x: int(x))
        #create_if_not_exist(os.path.join(r_id_path,tracker))
        num_train_set = int(len(trackers) * train_ratio)
        trackers_to_move = trackers[num_train_set:]
        for t_move in trackers_to_move:
            move(os.path.join(id_path, t_move), r_id_path)
コード例 #7
0
def break_into_trackers(original_path):
    restructured_path = original_path + "_restructured"
    create_if_not_exist(restructured_path)

    face_ids = [
        id for id in os.listdir(original_path)
        if os.path.isdir(os.path.join(original_path, id))
    ]

    for face_id in face_ids:
        id_path = os.path.join(original_path, face_id)
        images = [
            os.path.join(id_path, image) for image in os.listdir(id_path)
            if "jpg" in image or "png" in image
        ]
        images.sort(key=lambda x: float(x.replace(".jpg", "").split("_")[5]))
        #images.sort(key=lambda x:int(x.split("/")[-1].split(".")[0]))
        r_id_path = os.path.join(restructured_path, face_id)
        create_if_not_exist(r_id_path)
        i = 0
        while i < len(images):
            tracker_path = os.path.join(r_id_path, str(int(i / N)))
            create_if_not_exist(tracker_path)
            for c in range(i, i + N):
                if (c < len(images)):
                    image_name = images[c].split("/")[-1]
                    copyfile(images[c], os.path.join(tracker_path, image_name))
                    print(image_name)
            i += N
コード例 #8
0
    shuffle(image_id_list)

    N = 100
    count = 0
    total = 0
    i = 0
    while i < len(image_id_list):
        count += 1
        start = time.time()
        for c in range(i, min(len(image_id_list), i + N)):
            mongodb_faceinfo.find_one({"_id": image_id_list[c]})
        total += time.time() - start
        print(time.time() - start)
        i += N
    print("average time: " + str(total / count))


web_log = mongodb_db["visithistory"]

track_folder = "../data/tracking"
replicate_folder = track_folder + "_"
create_if_not_exist(replicate_folder)
cursors = web_log.find({})
for cursor in cursors:
    image_info = cursor["image"].split("/")[-2:]
    track_id = image_info[0]
    image_name = image_info[1]
    image_path = os.path.join(track_folder, track_id, image_name)
    create_if_not_exist(os.path.join(replicate_folder, track_id))
    copyfile(image_path, os.path.join(replicate_folder, track_id, image_name))
コード例 #9
0
def main(data_path, dashboard):
    #wipe data
    mongodb_dashinfo.remove({})
    mongodb_faceinfo.remove({})

    tracker_paths = [
        tracker_path for tracker_path in glob.glob(data_path + '/*')
        if os.path.isdir(tracker_path)
    ]
    # face_extractor = FacenetExtractor(face_rec_graph, model_path='../models/am_inception_res_v1_transfer_Vin_5hour_20180701.pb')

    preprocessor = Preprocessor()
    #get max track id
    existing_tracking_paths = [
        dir for dir in os.listdir(Config.TRACKING_DIR)
        if os.path.isdir(os.path.join(Config.TRACKING_DIR, dir))
    ]
    track_id = max([int(dir) for dir in existing_tracking_paths
                    ]) + 1 if len(existing_tracking_paths) > 0 else 0
    for tracker_path in tracker_paths:  #assuming that each annotated folder is a tracker
        tracker_save_folder = os.path.join(Config.TRACKING_DIR, str(track_id))
        preprocessed_images = []
        insert_list = []
        #create fake face_id
        face_id = '{}-{}-{}'.format("Office", track_id, time.time())
        display_imgs = [
            os.path.basename(_dir)
            for _dir in glob.glob(tracker_path + '/*.jpg')
        ]

        #iterate through list of img names
        for display_img in display_imgs:
            image_id = display_img.replace(".jpg", "")
            img = misc.imread(os.path.join(tracker_path, display_img))
            #parse image data
            data_split = image_id.split('_')
            data_split[0] = str(track_id)
            image_id = '_'.join(data_split)
            bbox = data_split[1:5]
            bbox = [int(i) for i in bbox]
            padded_bbox = data_split[-4:len(data_split)]
            padded_bbox = '_'.join(padded_bbox)
            time_stamp = float(data_split[5])

            cropped_face = CropperUtils.reverse_display_face(img, padded_bbox)
            preprocessed_image = preprocessor.process(cropped_face)
            emb_array, _ = face_extractor.extract_features(preprocessed_image)

            insert_list.append({
                'track_id': track_id,
                'image_id': image_id,
                'face_id': face_id,
                'time_stamp': time_stamp,
                'bounding_box': bbox,
                'embedding': emb_array.tolist(),
                'padded_bbox': padded_bbox,
                'points': None,
                'is_registered': True
            })

            #save image to TRACKING DIR
            create_if_not_exist(tracker_save_folder)
            misc.imsave(os.path.join(tracker_save_folder, image_id + '.jpg'),
                        img)
            print(image_id)

            # preprocessed_images.append(preprocessor.process(cropped_face))

            # #extract embeddings all at once for performance
            # embs_array, _ = face_extractor.extract_features_all_at_once(preprocessed_images)

            # #map embedding with its corresponding image id
            # for i in range(len(s_insert_list)):
            #     insert_list[i]['embedding'] = [embs_array[i].tolist()] #embedding is saved as (1,128)

        #insert all images at once for performance
        mongodb_faceinfo.insert(insert_list)

        #add log to dash info
        mongodb_dashinfo.remove({'track_id': track_id})
        mongodb_dashinfo.insert_one({
            'track_id':
            track_id,
            'represent_image_id':
            insert_list[0]['image_id'],
            'face_id':
            face_id,
            'is_registered':
            True
        })

        if dashboard:
            #simulate cv <---> web real-time connection
            queue_msg = '|'.join([
                face_id, Config.SEND_RBMQ_HTTP + '/' + str(track_id) + '/',
                insert_list[0]['image_id'],
                str(time.time())
            ])
            rabbit_mq.send(Config.Queues.LIVE_RESULT, queue_msg)

        track_id += 1  #increment track id
コード例 #10
0
def general_process(lock_id, detector, preprocessor, face_extractor,
                    blynk_locker):
    '''
    INPUT: lock_id
    '''
    # Get locker infomation
    # lock_id = 'query from mongo'
    locker_info = mongodb_lockersinfo.find({'lock_id': lock_id})[0]
    this_locker = mongodb_lockers.find({'lock_id': lock_id})[0]
    cam_url = locker_info['cam_url']
    status = this_locker['status']

    blynk_locker.processing(status)

    # init face info
    mongodb_faceinfo = mongodb_db[str(lock_id)]

    # Variables for tracking faces
    frame_counter = 0
    start_time = time.time()
    acceptable_spoofing = 0

    # Variables holding the correlation trackers and the name per faceid
    tracking_folder = os.path.join(Config.TRACKING_DIR, str(lock_id))
    create_if_not_exist(tracking_folder)
    tracking_dirs = glob.glob(tracking_folder + '/*')
    if tracking_dirs == []:
        number_of_existing_trackers = 0
    else:
        lof_int_trackid = [
            int(tracking_dir.split('/')[-1]) for tracking_dir in tracking_dirs
        ]
        number_of_existing_trackers = max(lof_int_trackid) + 1
    tracker_manager = TrackerManager(
        'LOCKID' + str(lock_id), current_id=number_of_existing_trackers)
    frame_reader = URLFrameReader(cam_url, scale_factor=1)
    matcher = FaissMatcher()

    if status == 'locked':
        embs = []
        labels = []
        cursors = mongodb_faceinfo.find({
            'face_id': this_locker['lock_face_id']
        })
        for cursor in cursors:
            embs.append(np.array(cursor['embedding']))
            labels.append(cursor['image_id'])
        nof_registered_image_ids = len(labels)
        matcher.fit(embs, labels)

    while True:
        # in case the jerk hits the button
        if time.time() - start_time > 4:
            with open('../data/locker_{}_log.txt'.format(lock_id), 'a') as f:
                f.write('[LOCKER {}] OUT OF TIME! \n\n'.format(lock_id))
            frame_reader.release()
            blynk_locker.stop_processing(status)
            return -1

        frame = frame_reader.next_frame()
        if frame is None:
            print('Invalid Video Source')
            break

        fps_counter = time.time()
        # cv2.imshow('Locker {}'.format(lock_id), frame)
        # cv2.waitKey(1)

        tracker_manager.update_trackers(frame)
        if frame_counter % Config.Frame.FRAME_INTERVAL == 0:
            origin_bbs, points = detector.detect_face(frame)

            for i, in_origin_bb in enumerate(origin_bbs):
                origin_bb = in_origin_bb[:-1]

                display_face, str_padded_bbox = CropperUtils.crop_display_face(
                    frame, origin_bb)
                cropped_face = CropperUtils.crop_face(frame, origin_bb)

                # is_spoofing = spoofing_detector.is_face_spoofing(cropped_face)
                # if is_spoofing:
                #     acceptable_spoofing += 1
                # with open('../data/spoofing_log.txt', 'a') as f:
                #     f.write('Spoofing Detected at Locker {}: {}\n'.format(lock_id, is_spoofing))
                # if acceptable_spoofing > 5:
                #     with open('../data/locker_{}_log.txt'.format(lock_id), 'a') as f:
                #         f.write(
                #             '[LOCKER {}] STOP PROCESSING. '
                #             'SPOOFING DETECTED!\n'.format(lock_id)
                #         )
                #     frame_reader.release()
                #     blynk_locker.stop_processing(status)
                #     return -1

                # Calculate embedding
                preprocessed_image = preprocessor.process(cropped_face)
                # preprocessed_image = align_preprocessor.process(frame, points[:,i], aligner, 160)
                emb_array, _ = face_extractor.extract_features(
                    preprocessed_image)

                face_info = FaceInfo(origin_bb.tolist(), emb_array,
                                     frame_counter, display_face,
                                     str_padded_bbox, points[:, i].tolist())

                is_good_face = handle_filters(points[:, i], coeff_extractor,
                                              face_info, preprocessed_image)

                face_info.is_good = is_good_face
                # TODO: refractor matching_detected_face_with_trackers
                matched_track_id = tracker_manager.track(face_info)

                if not face_info.is_good:
                    print('BAD FACE')
                    continue

                # Update tracker_manager
                tracker_manager.update(matched_track_id, frame, face_info)
                checking_tracker = None
                checking_tracker, top_predicted_face_ids, matching_result_dict = \
                    tracker_manager.check_and_recognize_tracker(
                        matcher,
                        matched_track_id,
                        mongodb_faceinfo,
                        None)
                # handle_results(checking_tracker, matching_result_dict)
                if checking_tracker is not None:
                    dumped_images = checking_tracker.dump_images(
                        mongodb_faceinfo,
                        add_new=True,
                        trackingfolder=tracking_folder)
                    checking_tracker.represent_image_id = dumped_images[0]
                    face_url = os.path.join(Config.SEND_RBMQ_HTTP, str(lock_id),
                                            str(checking_tracker.track_id),
                                            checking_tracker.represent_image_id)
                    face_url += '.jpg'
                    if status == 'available':
                        # Save locker, sign up the face
                        mongodb_lockers.remove({'lock_id': lock_id})
                        msg_dict = {
                            'lock_id': lock_id,
                            'status': 'locked',
                            'lock_face_url': face_url,
                            'lock_face_id': checking_tracker.face_id,
                            'lock_timestamp': time.time(),
                            'unlock_face_url': None,
                            'unlock_face_id': None,
                            'unlock_timestap': None
                        }
                        mongodb_lockers.insert_one(msg_dict)

                        # update logs
                        msg_dict.update({'log_timestamp': time.time()})
                        mongodb_logs.insert_one(msg_dict)
                        with open('../data/locker_{}_log.txt'.format(lock_id),
                                  'a') as f:
                            f.write(
                                '[LOCKER {}] REGISTERED FACE AS {}. LOCKED\n'.
                                format(lock_id, checking_tracker.face_id))
                        blynk_locker.stop_processing('locked')

                    elif status == 'locked':
                        # Release the locker, face verification
                        # update locker
                        msg_dict = mongodb_lockers.find(
                            {
                                'lock_id': lock_id
                            }, projection={"_id": False})[0]
                        msg_dict.update({
                            'unlock_face': face_url,
                            'unlock_timestamp': time.time()
                        })

                        if this_locker[
                                'lock_face_id'] == checking_tracker.face_id:
                            print('UNLOCK!')
                            blynk_locker.stop_processing('available')
                            mongodb_lockers.remove({'lock_id': lock_id})
                            mongodb_lockers.insert_one({
                                'lock_id': lock_id,
                                'status': 'available',
                                'lock_face_id': None,
                                'lock_face_url': None,
                                'lock_timestamp': None,
                                'unlock_face_id': None,
                                'unlock_face_url': None,
                                'unlock_timestap': None
                            })
                            with open(
                                    '../data/locker_{}_log.txt'.format(lock_id),
                                    'a') as f:
                                f.write(
                                    '[LOCKER {}] MATCHED WITH FACE ID {}. '
                                    'UNLOCKED. THIS LOCKER IS AVAILABLE NOW!\n'.
                                    format(lock_id, checking_tracker.face_id))

                        else:
                            print('NOT MATCH')
                            blynk_locker.stop_processing('locked')
                            with open(
                                    '../data/locker_{}_log.txt'.format(lock_id),
                                    'a') as f:
                                f.write('[LOCKER {}] NOT MATCH. '
                                        'PLEASE TRY AGAIN!\n'.format(lock_id))

                        # update logs
                        msg_dict.update({'log_timestamp': time.time()})
                        mongodb_logs.insert_one(msg_dict)
                    frame_reader.release()
                    return 1
            tracker_manager.find_and_process_end_track(mongodb_faceinfo)
            frame_counter += 1
            print("FPS: %f" % (1 / (time.time() - fps_counter)))
コード例 #11
0
frame_reader = URLFrameReader(0)
face_detector = MTCNNDetector(FaceGraph())
frame_processor = ROIFrameProcessor(scale_factor=2)

mask_classifier = mask_glasses.MaskClassifier()
glasses_classifier = mask_glasses.GlassesClassifier()

preprocessor = Preprocessor(algs=normalization)

MASK_DIR = '%s/data/Mask/' % Config.ROOT
NOMASK_DIR = '%s/data/No_Mask/' % Config.ROOT
GLASSES_DIR = '%s/data/Glasses/' % Config.ROOT
NOGLASSES_DIR = '%s/data/No_Glasses/' % Config.ROOT

create_if_not_exist(MASK_DIR)
create_if_not_exist(NOMASK_DIR)
create_if_not_exist(GLASSES_DIR)
create_if_not_exist(NOGLASSES_DIR)

while frame_reader.has_next():
    frame = frame_reader.next_frame()
    if frame is None:
        break

    bbs, pts = face_detector.detect_face(frame)
    preprocessed = preprocessor.process(frame)

    has_mask = None
    has_glasses = None
コード例 #12
0
from config import Config
from cv_utils import PickleUtils, create_if_not_exist, CropperUtils
from pymongo import MongoClient
from face_extractor import FacenetExtractor
from preprocess import Preprocessor
from tf_graph import FaceGraph
from cv_utils import CropperUtils

facial_dirs = glob.glob('/mnt/production_data/tch_data/tch_data_Mar_June/*')
queue_name = Config.Queues.LIVE_RESULT
face_extractor_model = Config.FACENET_DIR
face_rec_graph_face = FaceGraph()
face_extractor = FacenetExtractor(face_rec_graph_face,
                                  model_path=face_extractor_model)
preprocessor = Preprocessor()
create_if_not_exist(Config.TRACKING_DIR)

rabbit_mq = RabbitMQ((Config.Rabbit.USERNAME, Config.Rabbit.PASSWORD),
                     (Config.Rabbit.IP_ADDRESS, Config.Rabbit.PORT))

mongodb_client = MongoClient(Config.MongoDB.IP_ADDRESS,
                             Config.MongoDB.PORT,
                             username=Config.MongoDB.USERNAME,
                             password=Config.MongoDB.PASSWORD)
mongodb_db = mongodb_client[Config.MongoDB.DB_NAME]
mongodb_dashinfo = mongodb_db[Config.MongoDB.DASHINFO_COLS_NAME]
mongodb_faceinfo = mongodb_db[Config.MongoDB.FACEINFO_COLS_NAME]


def under_line_join(jlist):
    tmp = jlist
コード例 #13
0
    parser.add_argument('-fem',
                        '--face_extractor_model',
                        help='path to model want to use instead of default',
                        default=Config.Model.FACENET_DIR)
    parser.add_argument(
        '-rs',
        '--re_source',
        help='Set the stream source again if stream connection is interrupted',
        action='store_true')
    parser.add_argument(
        '-rb',
        '--rethinkdb',
        help=
        'Use the first 30 face images to recognize and send it to rethinkdb',
        action='store_true')
    args = parser.parse_args()

    # Run
    if args.video_out is not None:
        Config.Track.TRACKING_VIDEO_OUT = True
        Config.Track.VIDEO_OUT_PATH = args.video_out
    Config.Mode.SEND_QUEUE_TO_DASHBOARD = args.dashboard
    Config.Matcher.CLEAR_SESSION = args.clear_session
    Config.Track.SEND_RECOG_API = args.rethinkdb

    create_if_not_exist(Config.Dir.TRACKING_DIR)
    create_if_not_exist(Config.Dir.LOG_DIR)

    generic_function(args.cam_url, args.queue_reader, args.area,
                     args.face_extractor_model, args.re_source)