コード例 #1
0
    def __init__(self,
                 connector,
                 label=None,
                 path_to_conf='yolo-vibe-nr_live.yaml'):
        self.connector = connector
        self.connector.enable_frame_throw()
        self.name = 'YoloVibeModel'

        # Load config:
        path_to_conf = os.path.join(os.path.dirname(__file__), path_to_conf)
        conf = parse_conf(path_to_conf)
        self.connector.logger.info(
            'Config has been loaded from {}'.format(path_to_conf))

        # Init model:
        self.yolo, self.yolo_args = init_yolo(conf['yolo'])
        self.vibe, self.vibe_args = init_vibe(conf['vibe'])

        # Warmup:
        if 'warmup_img' in conf['input']:
            img = cv2.imread(conf['input']['warmup_img'], 1)
            warmup_holoport_pipeline(img, self.yolo, self.yolo_args, self.vibe,
                                     self.vibe_args)

        # Auxiliary params:
        self.aux_params = generate_aux_params(conf['input'])

        # Make queues, events and threads:
        self.break_event = threading.Event()
        self.frame_q = Queue(maxsize=10000)
        self.yolo_input_q = Queue(maxsize=1000)
        self.yolo_output_q = Queue(maxsize=1000)
        self.vibe_input_q = Queue(maxsize=1000)
        self.vibe_output_q = Queue(maxsize=1000)
        self.avatar_q = Queue(maxsize=10000)
        self.workers = []

        # Make workers:
        worker_args = (self.yolo_args, self.break_event, self.frame_q,
                       self.yolo_input_q, self.aux_params)
        self.workers.append(
            threading.Thread(target=pre_yolo_worker, args=worker_args))
        worker_args = (self.vibe_args, self.break_event, self.yolo_output_q,
                       self.vibe_input_q)
        self.workers.append(
            threading.Thread(target=pre_vibe_worker, args=worker_args))
        worker_args = (self.yolo, self.vibe, self.break_event,
                       self.yolo_input_q, self.yolo_output_q,
                       self.vibe_input_q, self.vibe_output_q)
        self.workers.append(
            threading.Thread(target=yolo_vibe_inference_worker,
                             args=worker_args))
        worker_args = (conf['renderer'], self.break_event, self.vibe_output_q,
                       self.avatar_q)
        self.workers.append(
            threading.Thread(target=renderer_worker, args=worker_args))
        worker_args = (self.break_event, self.avatar_q,
                       self.connector.send_data, self.connector.send_frame)
        self.workers.append(
            threading.Thread(target=send_worker, args=worker_args))
コード例 #2
0
ファイル: test_lwgan.py プロジェクト: darkAlert/ganportation
def test_adaptive_personalization(path_to_conf, save_results=False, train_size=50):
    # Load config:
    conf = parse_conf(path_to_conf)
    print ('Config has been loaded from', path_to_conf)

    # Init LWGAN-RT model:
    lwgan, args = init_lwgan(conf['lwgan'])
    lwgan.mode = 'predefined'

    # Load test data:
    target_path = conf['input']['target_path']
    frames_dir = os.path.join(conf['input']['frames_dir'], target_path)
    smpls_dir = os.path.join(conf['input']['smpls_dir'], target_path)
    print('Loading test data...')
    test_data = load_data(frames_dir, smpls_dir, args.image_size)
    print('Test data has been loaded:', len(test_data))

    # Adaptive personalization:
    print('Running adaptive_personalization...')
    index = 5
    lwgan.run_adaptive_personalization(test_data[index]['lwgan_input'], test_data[index]['smpl'])

    # Inference:
    print('Inferencing...')
    steps = conf['input']['steps']
    view = parse_view_params(conf['input']['view'])
    delta = 360 / steps
    step_i = 0
    results = []
    start = time.time()

    for data in test_data:
        view['R'][0] = 0
        view['R'][1] = delta * step_i / 180.0 * np.pi
        view['R'][2] = 0

        step_i += 1
        if step_i >= steps:
            step_i = 0

        preds = lwgan.inference(data['lwgan_input'], data['smpl'], view)
        results.append(preds)

    elapsed = time.time() - start
    fps = len(test_data) / elapsed
    spf = elapsed / len(test_data)  # secons per frame
    print('###Elapsed time:', elapsed, 'frames:', len(test_data), 'fps:', fps, 'spf:', spf)

    # Save the results:
    result_dir = conf['output']['result_dir']
    if save_results and result_dir is not None:
        print ('Saving the results to', result_dir)
        if not os.path.exists(result_dir):
            os.makedirs(result_dir)
        for idx, preds in enumerate(results):
            out_path = os.path.join(result_dir, str(idx).zfill(5) + '.jpeg')
            save_cv2_img(preds, out_path, normalize=True)

    print ('All done!')
コード例 #3
0
def test(path_to_conf, save_results=False):
    # Load config:
    conf = parse_conf(path_to_conf)
    print('Config has been loaded from', path_to_conf)

    # Init YOLO-RT model:
    conf['yolo']['gpu_id'] = '0'
    yolo, args = init_yolo(conf['yolo'])

    # Load test data:
    print('Loading test data...')
    frames_dir = os.path.join(conf['input']['frames_dir'],
                              conf['input']['target_path'])
    test_data = load_data(frames_dir, args.yolo_img_size)
    print('Test data has been loaded:', len(test_data))

    # Inference:
    print('Inferencing...')
    start = time.time()

    for data in test_data:
        output = yolo.inference(data['yolo_input'])
        data['yolo_output'] = output

    elapsed = time.time() - start
    fps = len(test_data) / elapsed
    spf = elapsed / len(test_data)  # secons per frame
    print('###Elapsed time:', elapsed, 'frames:', len(test_data), 'fps:', fps,
          'spf:', spf)

    # Prepare output:
    for data in test_data:
        actual_size = [data['yolo_input'].shape[2:]]
        origin_size = [data['origin_frame'].shape]
        bboxes = convert_yolo_output_to_bboxes(data['yolo_output'],
                                               actual_size, origin_size)
        data['yolo_bbox'] = bboxes[0] if len(bboxes) else None

    # Save the results:
    result_dir = conf['output']['result_dir']
    if save_results and result_dir is not None:
        print('Saving the results to', result_dir)
        if not os.path.exists(result_dir):
            os.makedirs(result_dir)

        for idx, data in enumerate(test_data):
            if data['yolo_bbox'] is not None:
                pt1 = data['yolo_bbox'][0], data['yolo_bbox'][1]
                pt2 = data['yolo_bbox'][2], data['yolo_bbox'][3]
                img = cv2.rectangle(data['origin_frame'],
                                    pt1,
                                    pt2,
                                    color=(0, 0, 255))
                out_path = os.path.join(result_dir,
                                        str(idx).zfill(5) + '.jpeg')
                cv2.imwrite(out_path, img)
            # print ('{}: {}'.format(idx,data['yolo_bbox']))

    print('All done!')
コード例 #4
0
    def __init__(self,
                 connector,
                 label=None,
                 path_to_conf='retina-mobilenet-aus_live.yaml'):
        self.connector = connector
        self.connector.enable_frame_throw()
        self.name = 'RetinaAUsModel'

        # Load config:
        path_to_conf = os.path.join(os.path.dirname(__file__), path_to_conf)
        conf = parse_conf(path_to_conf)
        self.connector.logger.info(
            'Config has been loaded from {}'.format(path_to_conf))

        # Init models:
        self.retina, self.retina_args = init_retina(conf['retina'])
        self.aus_model, self.aus_args = init_aus(conf['aus'])

        # Warmup:
        if 'warmup_img' in conf['input']:
            img = cv2.imread(conf['input']['warmup_img'], 1)
            warmup_pipeline(img, self.retina, self.retina_args, self.aus_model,
                            self.aus_args)

        # Make queues, events and threads:
        self.break_event = threading.Event()
        self.frame_q = Queue(maxsize=10000)
        self.retina_input_q = Queue(maxsize=1000)
        self.retina_output_q = Queue(maxsize=1000)
        self.aus_input_q = Queue(maxsize=1000)
        self.aus_output_q = Queue(maxsize=1000)
        self.workers = []
        self.lock = threading.Lock()

        # Make workers:
        worker_args = (self.retina_args, self.break_event, self.frame_q,
                       self.retina_input_q)
        self.workers.append(
            threading.Thread(target=pre_retina_worker, args=worker_args))
        worker_args = (self.retina, self.lock, self.break_event,
                       self.retina_input_q, self.retina_output_q)
        self.workers.append(
            threading.Thread(target=retina_inference_worker, args=worker_args))
        worker_args = (self.retina_args, self.aus_args, self.break_event,
                       self.retina_output_q, self.aus_input_q)
        self.workers.append(
            threading.Thread(target=pre_aus_worker, args=worker_args))
        worker_args = (self.aus_model, self.aus_args, self.lock,
                       self.break_event, self.aus_input_q, self.aus_output_q)
        self.workers.append(
            threading.Thread(target=aus_inference_worker, args=worker_args))
        worker_args = (self.break_event, self.aus_output_q,
                       self.connector.send_data, self.connector.send_frame)
        self.workers.append(
            threading.Thread(target=send_worker, args=worker_args))
コード例 #5
0
def test(path_to_conf, save_results=False):
    # Load config:
    conf = parse_conf(path_to_conf)
    print('Config has been loaded from', path_to_conf)

    # Init VIBE-RT model:
    conf['vibe']['gpu_id'] = '0'
    vibe, args = init_vibe(conf['vibe'])

    # Load test data:
    print('Loading test data...')
    frames_dir = conf['input']['frames_dir']
    yolo_bboxes_dir = conf['input']['yolo_bboxes_dir']
    avatar_bboxes_dir = conf['input']['avatar_bboxes_dir']
    target_path = conf['input']['target_path']
    test_data = load_data(frames_dir, yolo_bboxes_dir, avatar_bboxes_dir,
                          target_path, args.bbox_scale, args.crop_size)
    print('Test data has been loaded:', len(test_data))

    # Inference:
    print('Inferencing...')
    start = time.time()

    for data in test_data:
        output = vibe.inference(data['vibe_input'])

        avatar_cam = convert_cam(cam=output['pred_cam'].numpy(),
                                 bbox1=data['yolo_cbbox'],
                                 bbox2=data['scene_cbbox'],
                                 truncated=True)
        data['smpl'] = {
            'pred_cam': output['pred_cam'].numpy(),
            'pose': output['pose'].numpy(),
            'betas': output['betas'].numpy(),
            'rotmat': output['rotmat'].numpy(),
            'avatar_cam': avatar_cam,
        }

    elapsed = time.time() - start
    fps = len(test_data) / elapsed
    spf = elapsed / len(test_data)  # secons per frame
    print('###Elapsed time:', elapsed, 'frames:', len(test_data), 'fps:', fps,
          'spf:', spf)

    # Save the results:
    result_dir = conf['output']['result_dir']
    if save_results and result_dir is not None:
        pose, betas, rotmat, avatar_cam, frame_paths = [], [], [], [], []

        # Merge outputs:
        for data in test_data:
            pose.append(data['smpl']['pose'])
            betas.append(data['smpl']['betas'])
            rotmat.append(data['smpl']['rotmat'])
            avatar_cam.append(data['smpl']['avatar_cam'])
            frame_paths.append(data['path'])
        pose = np.concatenate(pose, axis=0)
        betas = np.concatenate(betas, axis=0)
        rotmat = np.concatenate(rotmat, axis=0)
        avatar_cam = np.concatenate(avatar_cam, axis=0)
        frame_paths = np.concatenate(frame_paths, axis=0)

        # Save:
        output_dir = os.path.join(result_dir, target_path)
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        output_path = os.path.join(output_dir, 'smpl.npz')

        np.savez(output_path,
                 avatar_cam=avatar_cam,
                 pose=pose,
                 betas=betas,
                 rotmat=rotmat,
                 frame_paths=frame_paths)
        print('The results have been saved to', result_dir)

    print('All done!')
コード例 #6
0
    def __init__(self, connector, label=None, path_to_conf=None):
        self.connector = connector
        self.connector.enable_frame_throw()
        self.name = 'HoloportModel'

        # Load config:
        if path_to_conf is None:
            if label is not None and label == 'holoport_adaptive':
                path_to_conf = 'yolo-vibe-lwgan_adaptive.yaml'
            elif label is not None and \
                    any(label == l for l in ['holoport_latest', 'holoport_rotation_last']):
                path_to_conf = 'yolo-vibe-lwgan_last.yaml'
            else:
                path_to_conf = 'yolo-vibe-lwgan_live.yaml'
        path_to_conf = os.path.join(os.path.dirname(__file__), path_to_conf)
        conf = parse_conf(path_to_conf)
        self.connector.logger.info('Config has been loaded from {}'.format(path_to_conf))

        # FPS:
        if 'target_fps' in conf['input']:
            self.target_fps = conf['input']['target_fps']
        else:
            self.target_fps = 15
        self.ms_per_frame = 1.0 / self.target_fps

        # Init model:
        self.yolo, self.yolo_args = init_yolo(conf['yolo'])
        self.vibe, self.vibe_args = init_vibe(conf['vibe'])
        self.lwgan, self.lwgan_args = init_lwgan(conf['lwgan'])
        self.lwgan.mode = 'view'#'predefined'
        self.adaptive = False

        if label is not None:
            if label == 'holoport_andrey':
                img_path = os.path.join(os.path.dirname(__file__), 'assets/andrey_260_img.tensor')
                smpl_path = os.path.join(os.path.dirname(__file__), 'assets/andrey_260_smpl.tensor')
                self.lwgan.load_descriptor(img_path,smpl_path)
                self.lwgan.desc_smpl = self.lwgan.desc_smpl[0]
            elif label == 'holoport_yulia':
                img_path = os.path.join(os.path.dirname(__file__), 'assets/yulia_166_img.tensor')
                smpl_path = os.path.join(os.path.dirname(__file__), 'assets/yulia_166_smpl.tensor')
                self.lwgan.load_descriptor(img_path,smpl_path)
            elif label == 'holoport_adaptive':
                self.lwgan.ada_conf = conf['adaptive']
                self.adaptive = True
            elif label == 'holoport_rotation_latest':
                conf['input']['steps'] = 90
                self.lwgan.load_descriptor(conf['lwgan']['desc_img'], conf['lwgan']['desc_smpl'])
            elif label == 'holoport_latest':
                self.lwgan.load_descriptor(conf['lwgan']['desc_img'], conf['lwgan']['desc_smpl'])

        # Warmup:
        if 'warmup_img' in conf['input']:
            img = cv2.imread(conf['input']['warmup_img'], 1)
            warmup_holoport_pipeline(img, self.yolo, self.yolo_args,
                                     self.vibe, self.vibe_args,
                                     self.lwgan, self.lwgan_args)

        # Auxiliary params:
        self.aux_params = generate_aux_params(conf['input'])

        # Make queues, events and threads:
        self.break_event = threading.Event()
        self.frame_q = Queue(maxsize=10000)
        self.yolo_input_q = Queue(maxsize=10000)
        self.yolo_output_q = Queue(maxsize=10000)
        self.vibe_input_q = Queue(maxsize=10000)
        self.vibe_output_q = Queue(maxsize=10000)
        self.lwgan_input_q = Queue(maxsize=10000)
        self.lwgan_output_q = Queue(maxsize=10000)
        self.avatar_q = Queue(maxsize=10000)
        self.workers = []

        # Make workers:
        worker_args = (self.yolo_args, self.break_event, self.frame_q, self.yolo_input_q, self.aux_params)
        self.workers.append(threading.Thread(target=pre_yolo_worker, args=worker_args))
        worker_args = (self.vibe_args, self.break_event, self.yolo_output_q, self.vibe_input_q, 0.005, True)
        self.workers.append(threading.Thread(target=pre_vibe_worker, args=worker_args))
        worker_args = (self.lwgan_args, self.break_event, self.vibe_output_q, self.lwgan_input_q, self.aux_params)
        self.workers.append(threading.Thread(target=pre_lwgan_worker, args=worker_args))
        worker_args = (self.break_event, self.lwgan_output_q, self.avatar_q)
        self.workers.append(threading.Thread(target=postprocess_worker, args=worker_args))
        worker_args = (self.yolo, self.vibe, self.break_event, self.yolo_input_q,
                       self.yolo_output_q, self.vibe_input_q, self.vibe_output_q)
        self.workers.append(threading.Thread(target=yolo_vibe_inference_worker, args=worker_args))
        worker_args = (self.lwgan, self.break_event, self.lwgan_input_q, self.lwgan_output_q, 0.005, True, self.adaptive)
        self.workers.append(threading.Thread(target=lwgan_inference_worker, args=worker_args))
        worker_args = (self.break_event, self.avatar_q, self.connector.send_data, self.connector.send_frame)
        self.workers.append(threading.Thread(target=send_worker, args=worker_args))
コード例 #7
0
    def __init__(self, connector, label=None, path_to_conf=None):
        self.connector = connector
        self.connector.enable_frame_throw()
        self.name = 'HoloportAdaModel'

        # Load config:
        if path_to_conf is None:
                path_to_conf = 'adaptive_training.yaml'
        path_to_conf = os.path.join(os.path.dirname(__file__), path_to_conf)
        self.conf = parse_conf(path_to_conf)
        self.connector.logger.info('Config has been loaded from {}'.format(path_to_conf))

        # FPS:
        if 'target_fps' in self.conf['input']:
            self.target_fps = self.conf['input']['target_fps']
        else:
            self.target_fps = 15
        self.ms_per_frame = 1.0 / self.target_fps

        # Init model:
        self.yolo, self.yolo_args = init_yolo(self.conf['yolo'])
        self.vibe, self.vibe_args = init_vibe(self.conf['vibe'])
        self.pre_lwgan_args = lambda:0
        self.pre_lwgan_args.image_size = self.conf['lwgan_ada']['image_size']
        self.dataset = []

        # Warmup:
        if 'warmup_img' in self.conf['input']:
            img = cv2.imread(self.conf['input']['warmup_img'], 1)
            warmup_holoport_pipeline(img, self.yolo, self.yolo_args,
                                     self.vibe, self.vibe_args)

        # Auxiliary params:
        self.aux_params = generate_aux_params(self.conf['input'])

        # Make queues, events and threads:
        self.break_event = threading.Event()
        self.break_send_event = threading.Event()
        self.dataset_is_ready = threading.Event()
        self.frame_q = Queue(maxsize=1000)
        self.yolo_input_q = Queue(maxsize=1000)
        self.yolo_output_q = Queue(maxsize=1000)
        self.vibe_input_q = Queue(maxsize=1000)
        self.vibe_output_q = Queue(maxsize=1000)
        self.draw_q = Queue(maxsize=1000)
        self.output_q = Queue(maxsize=1000)
        self.dataset_q = Queue(maxsize=1000)
        self.workers = []

        # Make workers:
        worker_args = (self.yolo_args, self.break_event, self.frame_q, self.yolo_input_q, self.aux_params)
        self.workers.append(threading.Thread(target=pre_yolo_worker, args=worker_args))
        worker_args = (self.vibe_args, self.break_event, self.yolo_output_q, self.vibe_input_q, 0.005, True)
        self.workers.append(threading.Thread(target=pre_vibe_worker, args=worker_args))
        worker_args = (self.pre_lwgan_args, self.break_event, self.vibe_output_q, self.draw_q, self.aux_params)
        self.workers.append(threading.Thread(target=pre_lwgan_worker, args=worker_args))
        worker_args = (self.yolo, self.vibe, self.break_event, self.yolo_input_q,
                       self.yolo_output_q, self.vibe_input_q, self.vibe_output_q)
        self.workers.append(threading.Thread(target=yolo_vibe_inference_worker, args=worker_args))
        worker_args = (self.break_event, self.dataset_is_ready, self.dataset,
                       self.conf['lwgan_ada']['dataset_size'], self.draw_q, self.output_q)
        self.workers.append(threading.Thread(target=draw_worker, args=worker_args))
        worker_args = (self.break_send_event, self.output_q, self.connector.send_data, self.connector.send_frame)
        self.s_worker = threading.Thread(target=send_worker, args=worker_args)
コード例 #8
0
ファイル: hretina.py プロジェクト: darkAlert/ganportation
    return img


if __name__ == '__main__':
    from holoport.conf.conf_parser import parse_conf
    import time
    import os
    import pickle

    def write_pickle_file(pkl_path, data_dict):
        with open(pkl_path, 'wb') as fp:
            pickle.dump(data_dict, fp, protocol=2)

    # Init Retina:
    conf = parse_conf('./holoport/conf/local/retina-resnet50.yaml')
    # conf = parse_conf('./holoport/conf/local/retina-mobilenet.yaml')
    retina, args = init_retina(conf['retina'])

    # Load an image:
    img_path = conf['input']['warmup_img']
    img_raw = cv2.imread(img_path, cv2.IMREAD_COLOR)
    img = prepare_input(img_raw, conf['retina']['img_width'],
                        conf['retina']['img_hight'])

    # 20 identical runs to test:
    result_img, detections = None, None
    for i in range(20):
        # Inference:
        tic = time.time()
        boxes, landms, scores = retina.inference(img)
コード例 #9
0
def test(path_to_conf, save_results=True):
    # Load configs:
    conf = parse_conf(path_to_conf)
    print('Config has been loaded from', path_to_conf)

    # Init YOLO-RT model:
    conf['yolo']['gpu_id'] = '0'
    yolo, yolo_args = init_yolo(conf['yolo'])

    # Init VIBE-RT model:
    conf['vibe']['gpu_id'] = '0'
    vibe, vibe_args = init_vibe(conf['vibe'])

    # Init LWGAN-RT model:
    conf['lwgan']['gpu_ids'] = '1'
    lwgan, lwgan_args = init_lwgan(conf['lwgan'])

    # Warmup:
    if 'warmup_img' in conf['input']:
        img = cv2.imread(conf['input']['warmup_img'], 1)
        warmup_holoport_pipeline(img, yolo, yolo_args, vibe, vibe_args, lwgan,
                                 lwgan_args)

    # Load test data:
    print('Loading test data...')
    frames_dir = os.path.join(conf['input']['frames_dir'],
                              conf['input']['target_path'])
    n = int(
        conf['input']['max_frames']) if 'max_frames' in conf['input'] else None
    test_data = load_frames(frames_dir, max_frames=n)
    print('Test data has been loaded:', len(test_data))

    # Avatar view params:
    steps = conf['input']['steps']
    view = parse_view_params(conf['input']['view'])
    delta = 360 / steps
    step_i = 0

    # Dummy scene params:
    t = conf['input']['scene_bbox'].split(',')
    assert len(t) == 4
    dummy_scene_bbox = np.array(
        [[int(t[0]), int(t[1]), int(t[2]),
          int(t[3])]], dtype=np.int64)
    dummy_scene_cbbox = dummy_scene_bbox.copy()
    dummy_scene_cbbox[:,
                      0] = dummy_scene_bbox[:,
                                            0] + dummy_scene_bbox[:,
                                                                  2] * 0.5  # (x,y,w,h) -> (cx,cy,w,h)
    dummy_scene_cbbox[:,
                      1] = dummy_scene_bbox[:,
                                            1] + dummy_scene_bbox[:, 3] * 0.5

    # Inference:
    print('Inferencing...')
    start = time.time()
    pre_yolo_elapsed, post_yolo_elapsed = 0, 0
    pre_vibe_elapsed, post_vibe_elapsed = 0, 0
    pre_lwgan_elapsed, post_lwgan_elapsed = 0, 0

    for idx, data in enumerate(tqdm(test_data)):
        # Update avatar view:
        view['R'][0] = 0
        view['R'][1] = delta * step_i / 180.0 * np.pi
        view['R'][2] = 0
        data['lwgan_input_view'] = view

        # YOLO:
        t_start = time.time()
        data = pre_yolo(data, yolo_args)
        pre_yolo_elapsed += time.time() - t_start
        data['yolo_output'] = yolo.inference(data['yolo_input'])
        t_start = time.time()
        data = post_yolo(data)
        post_yolo_elapsed += time.time() - t_start

        if data['yolo_cbbox'] is None:
            print('Skip frame {}: person not found!'.format(idx))
            continue

        # Scene bbox and cbbox:
        data['scene_bbox'] = dummy_scene_bbox
        data['scene_cbbox'] = dummy_scene_cbbox

        # VIBE:
        t_start = time.time()
        data = pre_vibe(data, vibe_args)
        pre_vibe_elapsed += time.time() - t_start
        data['vibe_output'] = vibe.inference(data['vibe_input'])
        t_start = time.time()
        data = post_vibe(data)
        post_vibe_elapsed += time.time() - t_start

        # LWGAN:
        t_start = time.time()
        data = pre_lwgan(data, lwgan_args)
        pre_lwgan_elapsed += time.time() - t_start
        data['lwgan_output'] = lwgan.inference(data['lwgan_input_img'],
                                               data['lwgan_input_smpl'],
                                               data['lwgan_input_view'])
        t_start = time.time()
        data = post_lwgan(data)
        post_lwgan_elapsed += time.time() - t_start

        step_i += 1
        if step_i >= steps:
            step_i = 0

    elapsed = time.time() - start
    n = len(test_data)
    fps = n / elapsed
    spf = elapsed / len(test_data)  # seconds per frame
    print('###Elapsed time:', elapsed, 'frames:', n, 'fps:', fps, 'spf:', spf)
    print('Mean pre yolo:', pre_yolo_elapsed / n, ', post yolo:',
          post_yolo_elapsed / n)
    print('Mean pre vibe:', pre_vibe_elapsed / n, ', post vibe:',
          post_vibe_elapsed / n)
    print('Mean pre lwgan:', pre_lwgan_elapsed / n, ', post lwgan:',
          post_lwgan_elapsed / n)

    # Save the results:
    result_dir = conf['output']['result_dir']
    if save_results and result_dir is not None:
        print('Saving the results to', result_dir)
        if not os.path.exists(result_dir):
            os.makedirs(result_dir)
        for idx, data in enumerate(test_data):
            out_path = os.path.join(result_dir, str(idx).zfill(5) + '.jpeg')
            cv2.imwrite(out_path, data['avatar'])
コード例 #10
0
def test_multithreads(path_to_conf, save_results=True, realtime_ms=None):
    # Load configs:
    conf = parse_conf(path_to_conf)
    print('Config has been loaded from', path_to_conf)

    # Init YOLO-RT model:
    yolo, yolo_args = init_yolo(conf['yolo'])

    # Init VIBE-RT model:
    vibe, vibe_args = init_vibe(conf['vibe'])

    # Init LWGAN-RT model:
    lwgan, lwgan_args = init_lwgan(conf['lwgan'])

    # Warmup:
    if 'warmup_img' in conf['input']:
        img = cv2.imread(conf['input']['warmup_img'],1)
        warmup_holoport_pipeline(img, yolo, yolo_args, vibe, vibe_args, lwgan, lwgan_args)

    # Load test data:
    print('Loading test data...')
    frames_dir = os.path.join(conf['input']['frames_dir'], conf['input']['target_path'])
    n = int(conf['input']['max_frames']) if 'max_frames' in conf['input'] else None
    test_data = load_frames(frames_dir, max_frames=n)
    print('Test data has been loaded:', len(test_data))

    # Avatar view params:
    steps = conf['input']['steps']
    view = parse_view_params(conf['input']['view'])

    # Dummy scene params:
    t = conf['input']['scene_bbox'].split(',')
    assert len(t) == 4
    dummy_scene_bbox = np.array([[int(t[0]), int(t[1]), int(t[2]), int(t[3])]], dtype=np.int64)
    dummy_scene_cbbox = dummy_scene_bbox.copy()
    dummy_scene_cbbox[:,0] = dummy_scene_bbox[:,0] + dummy_scene_bbox[:,2] * 0.5  # (x,y,w,h) -> (cx,cy,w,h)
    dummy_scene_cbbox[:,1] = dummy_scene_bbox[:,1] + dummy_scene_bbox[:,3] * 0.5

    # Set auxiliary params:
    aux_params = {}
    aux_params['scene_bbox'] = dummy_scene_bbox
    aux_params['scene_cbbox'] = dummy_scene_cbbox
    aux_params['steps'] = steps
    aux_params['view'] = view

    # Make queues, events and threads:
    break_event = threading.Event()
    frame_q = Queue(maxsize=10000)
    yolo_input_q = Queue(maxsize=1000)
    yolo_output_q = Queue(maxsize=1000)
    vibe_input_q = Queue(maxsize=1000)
    vibe_output_q = Queue(maxsize=1000)
    lwgan_input_q = Queue(maxsize=1000)
    lwgan_output_q = Queue(maxsize=1000)
    avatar_q = Queue(maxsize=10000)
    workers = []

    # Make workers:
    worker_args = (yolo_args, break_event, frame_q, yolo_input_q, aux_params)
    workers.append(threading.Thread(target=pre_yolo_worker, args=worker_args))
    worker_args = (vibe_args, break_event, yolo_output_q, vibe_input_q)
    workers.append(threading.Thread(target=pre_vibe_worker, args=worker_args))
    worker_args = (lwgan_args, break_event, vibe_output_q, lwgan_input_q)
    workers.append(threading.Thread(target=pre_lwgan_worker, args=worker_args))
    worker_args = (break_event, lwgan_output_q, avatar_q)
    workers.append(threading.Thread(target=postprocess_worker, args=worker_args))
    worker_args = (yolo, vibe, break_event, yolo_input_q, yolo_output_q, vibe_input_q, vibe_output_q)
    workers.append(threading.Thread(target=yolo_vibe_inference_worker, args=worker_args))
    if realtime_ms is None:
        worker_args = (lwgan, break_event, lwgan_input_q, lwgan_output_q)
    else:
        worker_args = (lwgan, break_event, lwgan_input_q, lwgan_output_q, 0.005, True)
    workers.append(threading.Thread(target=lwgan_inference_worker, args=worker_args))

    # Feed data:
    if realtime_ms is None:
        for data in test_data:
            frame_q.put(data)

    print('Inferencing... realtime fps:', realtime_ms)
    start = time.time()

    # Run workers:
    for w in workers:
        w.start()

    if realtime_ms is not None:
        # Simulate real-time frame capturing:
        for data in test_data:
            frame_q.put(data)
            print('{}/{}, yolo_in:{}, yolo_out:{}, vibe_in:{}, vibe_out:{}, lwgan_in:{}, lwgan_out:{}'.format(
                avatar_q.qsize(), len(test_data), yolo_input_q.qsize(),
                yolo_output_q.qsize(), vibe_input_q.qsize(), vibe_output_q.qsize(),
                lwgan_input_q.qsize(), lwgan_output_q.qsize()))
            time.sleep(realtime_ms)

    else:
        # Wait for all the data to be processed
        while not frame_q.empty() or \
                not yolo_input_q.empty() or \
                not yolo_output_q.empty() or \
                not vibe_input_q.empty() or \
                not vibe_output_q.empty() or \
                not lwgan_input_q.empty() or \
                not lwgan_output_q.empty():
            print ('{}/{}, yolo_in:{}, yolo_out:{}, vibe_in:{}, vibe_out:{}, lwgan_in:{}, lwgan_out:{}'.format(
                avatar_q.qsize(), len(test_data), yolo_input_q.qsize(),
                yolo_output_q.qsize(), vibe_input_q.qsize(), vibe_output_q.qsize(),
                lwgan_input_q.qsize(), lwgan_output_q.qsize()))
            time.sleep(0.1)

    # Stop workers:
    break_event.set()

    # Wait workers:
    for w in workers:
        w.join()

    # Log:
    elapsed = time.time() - start
    n = len(test_data)
    m = avatar_q.qsize()
    fps = n / elapsed
    spf = elapsed / len(test_data)  # seconds per frame
    print('###Elapsed time:', elapsed, 'processed:{}/{}'.format(m,n), 'fps:', fps, 'spf:', spf)

    # Save the results:
    result_dir = conf['output']['result_dir']
    if save_results and result_dir is not None:
        print ('Saving the results to', result_dir)

        if not os.path.exists(result_dir):
            os.makedirs(result_dir)
        idx = 0

        while True:
            try:
                data = avatar_q.get(timeout=1)
                avatar_q.task_done()
                out_path = os.path.join(result_dir, str(idx).zfill(5) + '.jpeg')
                if 'not_found' in data:
                    dummy_output = np.zeros((100, 100, 3), dtype=np.uint8)
                    cv2.imwrite(out_path, dummy_output)
                else:
                    cv2.imwrite(out_path, data['avatar'])
                idx += 1
            except Empty:
                break

    print ('All done!')
コード例 #11
0
ファイル: haus.py プロジェクト: darkAlert/ganportation
    return img


if __name__ == '__main__':
    from holoport.conf.conf_parser import parse_conf
    import time
    import cv2
    import pickle

    def load_pickle_file(pkl_path):
        with open(pkl_path, 'rb') as f:
            data = pickle.load(f, encoding='latin1')
        return data

    # Init AUs model:
    conf = parse_conf('./holoport/conf/local/aus_resnet50.yaml')
    # conf = parse_conf('./holoport/conf/local/aus_resnet34.yaml')
    aus_model, args = init_aus(conf['aus'])

    # Load an image:
    img_path = conf['input']['warmup_img']
    img_raw = cv2.imread(img_path, cv2.IMREAD_COLOR)

    # Load landmarks for the image:
    faces = load_pickle_file(conf['input']['warmup_face'])
    landmarks = faces[0]['landms']
    face_img = prepare_input(img_raw, landmarks, args.au_face_size)

    # 20 identical runs to test:
    aus_raw = None
    for i in range(20):