示例#1
0
def test_adaptive_personalization(path_to_conf, save_results=False, train_size=50):
    # Load config:
    conf = parse_conf(path_to_conf)
    print ('Config has been loaded from', path_to_conf)

    # Init LWGAN-RT model:
    lwgan, args = init_lwgan(conf['lwgan'])
    lwgan.mode = 'predefined'

    # Load test data:
    target_path = conf['input']['target_path']
    frames_dir = os.path.join(conf['input']['frames_dir'], target_path)
    smpls_dir = os.path.join(conf['input']['smpls_dir'], target_path)
    print('Loading test data...')
    test_data = load_data(frames_dir, smpls_dir, args.image_size)
    print('Test data has been loaded:', len(test_data))

    # Adaptive personalization:
    print('Running adaptive_personalization...')
    index = 5
    lwgan.run_adaptive_personalization(test_data[index]['lwgan_input'], test_data[index]['smpl'])

    # Inference:
    print('Inferencing...')
    steps = conf['input']['steps']
    view = parse_view_params(conf['input']['view'])
    delta = 360 / steps
    step_i = 0
    results = []
    start = time.time()

    for data in test_data:
        view['R'][0] = 0
        view['R'][1] = delta * step_i / 180.0 * np.pi
        view['R'][2] = 0

        step_i += 1
        if step_i >= steps:
            step_i = 0

        preds = lwgan.inference(data['lwgan_input'], data['smpl'], view)
        results.append(preds)

    elapsed = time.time() - start
    fps = len(test_data) / elapsed
    spf = elapsed / len(test_data)  # secons per frame
    print('###Elapsed time:', elapsed, 'frames:', len(test_data), 'fps:', fps, 'spf:', spf)

    # Save the results:
    result_dir = conf['output']['result_dir']
    if save_results and result_dir is not None:
        print ('Saving the results to', result_dir)
        if not os.path.exists(result_dir):
            os.makedirs(result_dir)
        for idx, preds in enumerate(results):
            out_path = os.path.join(result_dir, str(idx).zfill(5) + '.jpeg')
            save_cv2_img(preds, out_path, normalize=True)

    print ('All done!')
示例#2
0
def warmup_holoport_pipeline(img,
                             yolo,
                             yolo_args,
                             vibe=None,
                             vibe_args=None,
                             lwgan=None,
                             lwgan_args=None):
    print('Warming up holoport pipeline...')
    assert img is not None

    # Set dummy input:
    data = {'frame': img}
    view = parse_view_params('R=0,90,0/t=0,0,0')
    view['R'][0] = 0
    view['R'][1] = 0
    view['R'][2] = 0
    data['lwgan_input_view'] = view
    dummy_scene_bbox = np.array([[575, 150, 850, 850]], dtype=np.int64)
    dummy_scene_cbbox = dummy_scene_bbox.copy()
    dummy_scene_cbbox[:,
                      0] = dummy_scene_bbox[:,
                                            0] + dummy_scene_bbox[:,
                                                                  2] * 0.5  # (x,y,w,h) -> (cx,cy,w,h)
    dummy_scene_cbbox[:,
                      1] = dummy_scene_bbox[:,
                                            1] + dummy_scene_bbox[:, 3] * 0.5
    data['scene_bbox'] = dummy_scene_bbox
    data['scene_cbbox'] = dummy_scene_cbbox

    # YOLO:
    data = pre_yolo(data, yolo_args)
    data['yolo_output'] = yolo.inference(data['yolo_input'])
    data = post_yolo(data)
    print('YOLO has been warmed up!')

    assert data['yolo_cbbox'] is not None

    # VIBE:
    if vibe is None or vibe_args is None:
        return True
    data = pre_vibe(data, vibe_args)
    data['vibe_output'] = vibe.inference(data['vibe_input'])
    data = post_vibe(data)
    print('VIBE has been warmed up!')

    # LWGAN:
    if lwgan is None or lwgan_args is None:
        return True
    data = pre_lwgan(data, lwgan_args)
    data['lwgan_output'] = lwgan.inference(data['lwgan_input_img'],
                                           data['lwgan_input_smpl'],
                                           data['lwgan_input_view'])
    print('LWGAN has been warmed up!')

    return True
示例#3
0
def generate_aux_params(conf):
    # Avatar view params:
    steps = conf['steps']
    view = parse_view_params(conf['view'])

    # Dummy scene params:
    t = conf['scene_bbox'].split(',')
    assert len(t) == 4
    scene_bbox = np.array([[int(t[0]), int(t[1]), int(t[2]), int(t[3])]], dtype=np.int64)
    scene_cbbox = scene_bbox.copy()
    scene_cbbox[:, 0] = scene_bbox[:, 0] + scene_bbox[:, 2] * 0.5  # (x,y,w,h) -> (cx,cy,w,h)
    scene_cbbox[:, 1] = scene_bbox[:, 1] + scene_bbox[:, 3] * 0.5

    # Set auxiliary params:
    aux_params = {}
    aux_params['scene_bbox'] = scene_bbox
    aux_params['scene_cbbox'] = scene_cbbox
    aux_params['steps'] = steps
    aux_params['view'] = view

    return aux_params
示例#4
0
def test(path_to_conf, save_results=True):
    # Load configs:
    conf = parse_conf(path_to_conf)
    print('Config has been loaded from', path_to_conf)

    # Init YOLO-RT model:
    conf['yolo']['gpu_id'] = '0'
    yolo, yolo_args = init_yolo(conf['yolo'])

    # Init VIBE-RT model:
    conf['vibe']['gpu_id'] = '0'
    vibe, vibe_args = init_vibe(conf['vibe'])

    # Init LWGAN-RT model:
    conf['lwgan']['gpu_ids'] = '1'
    lwgan, lwgan_args = init_lwgan(conf['lwgan'])

    # Warmup:
    if 'warmup_img' in conf['input']:
        img = cv2.imread(conf['input']['warmup_img'], 1)
        warmup_holoport_pipeline(img, yolo, yolo_args, vibe, vibe_args, lwgan,
                                 lwgan_args)

    # Load test data:
    print('Loading test data...')
    frames_dir = os.path.join(conf['input']['frames_dir'],
                              conf['input']['target_path'])
    n = int(
        conf['input']['max_frames']) if 'max_frames' in conf['input'] else None
    test_data = load_frames(frames_dir, max_frames=n)
    print('Test data has been loaded:', len(test_data))

    # Avatar view params:
    steps = conf['input']['steps']
    view = parse_view_params(conf['input']['view'])
    delta = 360 / steps
    step_i = 0

    # Dummy scene params:
    t = conf['input']['scene_bbox'].split(',')
    assert len(t) == 4
    dummy_scene_bbox = np.array(
        [[int(t[0]), int(t[1]), int(t[2]),
          int(t[3])]], dtype=np.int64)
    dummy_scene_cbbox = dummy_scene_bbox.copy()
    dummy_scene_cbbox[:,
                      0] = dummy_scene_bbox[:,
                                            0] + dummy_scene_bbox[:,
                                                                  2] * 0.5  # (x,y,w,h) -> (cx,cy,w,h)
    dummy_scene_cbbox[:,
                      1] = dummy_scene_bbox[:,
                                            1] + dummy_scene_bbox[:, 3] * 0.5

    # Inference:
    print('Inferencing...')
    start = time.time()
    pre_yolo_elapsed, post_yolo_elapsed = 0, 0
    pre_vibe_elapsed, post_vibe_elapsed = 0, 0
    pre_lwgan_elapsed, post_lwgan_elapsed = 0, 0

    for idx, data in enumerate(tqdm(test_data)):
        # Update avatar view:
        view['R'][0] = 0
        view['R'][1] = delta * step_i / 180.0 * np.pi
        view['R'][2] = 0
        data['lwgan_input_view'] = view

        # YOLO:
        t_start = time.time()
        data = pre_yolo(data, yolo_args)
        pre_yolo_elapsed += time.time() - t_start
        data['yolo_output'] = yolo.inference(data['yolo_input'])
        t_start = time.time()
        data = post_yolo(data)
        post_yolo_elapsed += time.time() - t_start

        if data['yolo_cbbox'] is None:
            print('Skip frame {}: person not found!'.format(idx))
            continue

        # Scene bbox and cbbox:
        data['scene_bbox'] = dummy_scene_bbox
        data['scene_cbbox'] = dummy_scene_cbbox

        # VIBE:
        t_start = time.time()
        data = pre_vibe(data, vibe_args)
        pre_vibe_elapsed += time.time() - t_start
        data['vibe_output'] = vibe.inference(data['vibe_input'])
        t_start = time.time()
        data = post_vibe(data)
        post_vibe_elapsed += time.time() - t_start

        # LWGAN:
        t_start = time.time()
        data = pre_lwgan(data, lwgan_args)
        pre_lwgan_elapsed += time.time() - t_start
        data['lwgan_output'] = lwgan.inference(data['lwgan_input_img'],
                                               data['lwgan_input_smpl'],
                                               data['lwgan_input_view'])
        t_start = time.time()
        data = post_lwgan(data)
        post_lwgan_elapsed += time.time() - t_start

        step_i += 1
        if step_i >= steps:
            step_i = 0

    elapsed = time.time() - start
    n = len(test_data)
    fps = n / elapsed
    spf = elapsed / len(test_data)  # seconds per frame
    print('###Elapsed time:', elapsed, 'frames:', n, 'fps:', fps, 'spf:', spf)
    print('Mean pre yolo:', pre_yolo_elapsed / n, ', post yolo:',
          post_yolo_elapsed / n)
    print('Mean pre vibe:', pre_vibe_elapsed / n, ', post vibe:',
          post_vibe_elapsed / n)
    print('Mean pre lwgan:', pre_lwgan_elapsed / n, ', post lwgan:',
          post_lwgan_elapsed / n)

    # Save the results:
    result_dir = conf['output']['result_dir']
    if save_results and result_dir is not None:
        print('Saving the results to', result_dir)
        if not os.path.exists(result_dir):
            os.makedirs(result_dir)
        for idx, data in enumerate(test_data):
            out_path = os.path.join(result_dir, str(idx).zfill(5) + '.jpeg')
            cv2.imwrite(out_path, data['avatar'])
def test_multithreads(path_to_conf, save_results=True, realtime_ms=None):
    # Load configs:
    conf = parse_conf(path_to_conf)
    print('Config has been loaded from', path_to_conf)

    # Init YOLO-RT model:
    yolo, yolo_args = init_yolo(conf['yolo'])

    # Init VIBE-RT model:
    vibe, vibe_args = init_vibe(conf['vibe'])

    # Init LWGAN-RT model:
    lwgan, lwgan_args = init_lwgan(conf['lwgan'])

    # Warmup:
    if 'warmup_img' in conf['input']:
        img = cv2.imread(conf['input']['warmup_img'],1)
        warmup_holoport_pipeline(img, yolo, yolo_args, vibe, vibe_args, lwgan, lwgan_args)

    # Load test data:
    print('Loading test data...')
    frames_dir = os.path.join(conf['input']['frames_dir'], conf['input']['target_path'])
    n = int(conf['input']['max_frames']) if 'max_frames' in conf['input'] else None
    test_data = load_frames(frames_dir, max_frames=n)
    print('Test data has been loaded:', len(test_data))

    # Avatar view params:
    steps = conf['input']['steps']
    view = parse_view_params(conf['input']['view'])

    # Dummy scene params:
    t = conf['input']['scene_bbox'].split(',')
    assert len(t) == 4
    dummy_scene_bbox = np.array([[int(t[0]), int(t[1]), int(t[2]), int(t[3])]], dtype=np.int64)
    dummy_scene_cbbox = dummy_scene_bbox.copy()
    dummy_scene_cbbox[:,0] = dummy_scene_bbox[:,0] + dummy_scene_bbox[:,2] * 0.5  # (x,y,w,h) -> (cx,cy,w,h)
    dummy_scene_cbbox[:,1] = dummy_scene_bbox[:,1] + dummy_scene_bbox[:,3] * 0.5

    # Set auxiliary params:
    aux_params = {}
    aux_params['scene_bbox'] = dummy_scene_bbox
    aux_params['scene_cbbox'] = dummy_scene_cbbox
    aux_params['steps'] = steps
    aux_params['view'] = view

    # Make queues, events and threads:
    break_event = threading.Event()
    frame_q = Queue(maxsize=10000)
    yolo_input_q = Queue(maxsize=1000)
    yolo_output_q = Queue(maxsize=1000)
    vibe_input_q = Queue(maxsize=1000)
    vibe_output_q = Queue(maxsize=1000)
    lwgan_input_q = Queue(maxsize=1000)
    lwgan_output_q = Queue(maxsize=1000)
    avatar_q = Queue(maxsize=10000)
    workers = []

    # Make workers:
    worker_args = (yolo_args, break_event, frame_q, yolo_input_q, aux_params)
    workers.append(threading.Thread(target=pre_yolo_worker, args=worker_args))
    worker_args = (vibe_args, break_event, yolo_output_q, vibe_input_q)
    workers.append(threading.Thread(target=pre_vibe_worker, args=worker_args))
    worker_args = (lwgan_args, break_event, vibe_output_q, lwgan_input_q)
    workers.append(threading.Thread(target=pre_lwgan_worker, args=worker_args))
    worker_args = (break_event, lwgan_output_q, avatar_q)
    workers.append(threading.Thread(target=postprocess_worker, args=worker_args))
    worker_args = (yolo, vibe, break_event, yolo_input_q, yolo_output_q, vibe_input_q, vibe_output_q)
    workers.append(threading.Thread(target=yolo_vibe_inference_worker, args=worker_args))
    if realtime_ms is None:
        worker_args = (lwgan, break_event, lwgan_input_q, lwgan_output_q)
    else:
        worker_args = (lwgan, break_event, lwgan_input_q, lwgan_output_q, 0.005, True)
    workers.append(threading.Thread(target=lwgan_inference_worker, args=worker_args))

    # Feed data:
    if realtime_ms is None:
        for data in test_data:
            frame_q.put(data)

    print('Inferencing... realtime fps:', realtime_ms)
    start = time.time()

    # Run workers:
    for w in workers:
        w.start()

    if realtime_ms is not None:
        # Simulate real-time frame capturing:
        for data in test_data:
            frame_q.put(data)
            print('{}/{}, yolo_in:{}, yolo_out:{}, vibe_in:{}, vibe_out:{}, lwgan_in:{}, lwgan_out:{}'.format(
                avatar_q.qsize(), len(test_data), yolo_input_q.qsize(),
                yolo_output_q.qsize(), vibe_input_q.qsize(), vibe_output_q.qsize(),
                lwgan_input_q.qsize(), lwgan_output_q.qsize()))
            time.sleep(realtime_ms)

    else:
        # Wait for all the data to be processed
        while not frame_q.empty() or \
                not yolo_input_q.empty() or \
                not yolo_output_q.empty() or \
                not vibe_input_q.empty() or \
                not vibe_output_q.empty() or \
                not lwgan_input_q.empty() or \
                not lwgan_output_q.empty():
            print ('{}/{}, yolo_in:{}, yolo_out:{}, vibe_in:{}, vibe_out:{}, lwgan_in:{}, lwgan_out:{}'.format(
                avatar_q.qsize(), len(test_data), yolo_input_q.qsize(),
                yolo_output_q.qsize(), vibe_input_q.qsize(), vibe_output_q.qsize(),
                lwgan_input_q.qsize(), lwgan_output_q.qsize()))
            time.sleep(0.1)

    # Stop workers:
    break_event.set()

    # Wait workers:
    for w in workers:
        w.join()

    # Log:
    elapsed = time.time() - start
    n = len(test_data)
    m = avatar_q.qsize()
    fps = n / elapsed
    spf = elapsed / len(test_data)  # seconds per frame
    print('###Elapsed time:', elapsed, 'processed:{}/{}'.format(m,n), 'fps:', fps, 'spf:', spf)

    # Save the results:
    result_dir = conf['output']['result_dir']
    if save_results and result_dir is not None:
        print ('Saving the results to', result_dir)

        if not os.path.exists(result_dir):
            os.makedirs(result_dir)
        idx = 0

        while True:
            try:
                data = avatar_q.get(timeout=1)
                avatar_q.task_done()
                out_path = os.path.join(result_dir, str(idx).zfill(5) + '.jpeg')
                if 'not_found' in data:
                    dummy_output = np.zeros((100, 100, 3), dtype=np.uint8)
                    cv2.imwrite(out_path, dummy_output)
                else:
                    cv2.imwrite(out_path, data['avatar'])
                idx += 1
            except Empty:
                break

    print ('All done!')