Пример #1
0
def test_single():
    with open('../test/depth_gt.npy', 'rb') as f:
        depth_gt = np.load(f)
    with open('../test/depth_res.npy', 'rb') as f:
        depth_res = np.load(f)
    vis = False
    params = sun3d.set_params()
    if not np.all(depth_gt.shape == depth_res.shape):
        depth_gt = cv2.resize(depth_gt,
                              (depth_res.shape[1], depth_res.shape[0]),
                              interpolation=cv2.INTER_NEAREST)

    sample_rate = [0.01, 0.05, 0.1, 0.2, 0.4, 0.8]
    acc = np.zeros(len(sample_rate), dtype=np.float32)
    uts.plot_images({'image': depth_gt})

    acc_o = eval_depth([depth_res], [depth_gt])

    for i, rate in enumerate(sample_rate):
        depth_gt_down = uts_3d.down_sample_depth(depth_gt,
                                                 method='uniform',
                                                 percent=rate,
                                                 K=params['intrinsic'])
        depth = uts_3d.xyz2depth(depth_gt_down, params['intrinsic'],
                                 depth_gt.shape)
        depth_up = upsampler.LaplacianDeform(depth_res, depth_gt_down,
                                             params['intrinsic'], False)

        acc[i] = eval_depth([depth_up], [depth_gt])

    if vis:
        plot_figure(np.append(0, sample_rate), np.apend(acc_o, acc),
                    'depth_acc', 'sample rate', 'relative l1 error')
    else:
        print "rates: {}, thresholds {}".format(sample_rate, acc)
def main(argv):
    """
    main method of converting torch to paddle files.
    :param argv:
    :return:
    """
    cmdparser = argparse.ArgumentParser(
        "Convert tensorflow parameter file to paddle model files.")
    cmdparser.add_argument(
        '-i', '--input', help='input filename of torch parameters')
    cmdparser.add_argument('-l', '--layers', help='list of layer names')
    cmdparser.add_argument('-o', '--output', help='output file path of paddle model')

    args = cmdparser.parse_args(argv)

    params = sun3d.set_params('sun3d')
    params['stage'] = 5

    inputs = d_net.get_demon_inputs(params)

    # Add neural network config
    # outputs, out_field = d_net.get_demon_outputs(inputs, params, ext_inputs=inputs)
    outputs, out_field = d_net.get_demon_outputs(inputs, params, ext_inputs=None)

    # Create parameters
    parameters = paddle.parameters.create(outputs[out_field])
    # for name in parameters.names():
    #     print name

    name_match = OrderedDict([])
    if params['stage'] >= 1:
        name_match = gen_demon_flow_block_name_matcher(name_match)
    if params['stage'] >= 2:
        name_match = gen_demon_depth_block_name_matcher(name_match)
    if params['stage'] >= 3:
        # name_match = OrderedDict([])
        name_match = gen_demon_flow_block_name_matcher(name_match,
                                                       net_name='iter',
                                                       is_iter=True)
    if params['stage'] >= 4:
        name_match = gen_demon_depth_block_name_matcher(name_match,
                                                       net_name='iter',
                                                       is_iter=True)
    if params['stage'] >= 5:
        # name_match = OrderedDict([])
        name_match = gen_demon_refine_block_name_matcher(name_match)

    # for name in name_match.keys():
    #     print '{} : {}'.format(name, name_match[name])

    #Create depth paramters
    if not args.input:
        args.input = './output/tf_weights/'

    if not args.output:
        args.output = './output/tf_model_' + str(params['stage']) + '.tar.gz'

    print "save parameters to {}".format(args.output)
    assign_weights(parameters, name_match, args.input, args.output)
Пример #3
0
def test_video():
    # PaddlePaddle init
    cv2.namedWindow("frame")
    cv2.namedWindow("depth")
    cv2.namedWindow("normal")
    base_path = '/home/peng/Data/videos/'
    video_names = preprocess_util.list_files(base_path)
    prefix_len = len(base_path)
    for name in video_names[0:]:
        name = name[prefix_len:]
        output_path = base_path + name[:-4] + '/'
        if os.path.exists(output_path):
            continue
        video_path = base_path + name
        uts.save_video_to_images(video_path, output_path, max_frame=20000)

    return

    paddle.init(use_gpu=True, gpu_id=FLAGS.gpu_id)
    params = sun3d.set_params()
    params['demon_model'] = 'output/tf_model_full_5.tar.gz'
    inputs = d_net.get_demon_inputs(params)
    geo_predictor = dp.DeMoNGeoPredictor(params, is_init=True)

    # load in video
    cap = cv2.VideoCapture(video_path)

    frame_id = 0
    frame_step = 10
    ret, last_frame = cap.read()
    height, width = last_frame.shape[0:2]

    while (cap.isOpened()):
        ret, frame = cap.read()
        frame_id += 1
        if frame_id % frame_step == 0:
            # pdb.set_trace()
            flow, normal, depth, motion = geo_predictor.demon_geometry(
                frame, last_frame)
            last_frame = frame

            depth = cv2.resize(depth, (width / 2, height / 2))
            depth = depth / np.amax(depth)

            normal = cv2.resize(normal, (width / 2, height / 2))
            normal = (normal + 1.) / 2.
            two_frame = np.concatenate([frame, last_frame], axis=1)
            cv2.imshow('frame', two_frame)
            cv2.imshow('depth', depth)
            cv2.imshow('normal', normal)
            print motion

            if cv2.waitKey(10) & 0xFF == ord('q'):
                break

    cap.release()
    cv2.destroyAllWindows()
Пример #4
0
def test():
    # PaddlePaddle init, gpu_id=FLAGS.gpu_id
    paddle.init(use_gpu=True, gpu_id=FLAGS.gpu_id)
    # eval_tasks = {'flow':0, 'depth':1, 'normal':2}
    tasks = ['flow', 'depth']
    tasks = ['flow', 'depth', 'normal']
    # tasks = ['normal']
    gt_name = [x + '_gt' for x in tasks]
    dict_task = dict(zip(gt_name, range(4, 4 + len(tasks))))

    params = sun3d.set_params()
    params['stage'] = 2
    # params['stage'] = 4

    inputs = d_net.get_demon_inputs(params)
    gts = d_net.get_ground_truth(params)

    #Add neural network config
    outputs, out_field = d_net.get_demon_outputs(inputs, params, ext_inputs=None)
    cost = gen_cost(outputs, gts, tasks, params)
    parameters = paddle.parameters.create(layers=cost)

    print("load parameters from {}".format(FLAGS.init_model))
    # if FLAGS.init_model:
    #     with gzip.open(FLAGS.init_model, 'r') as f:
    #         parameters_init = paddle.parameters.Parameters.from_tar(f)
    #     for name in parameters.names():
    #         parameters.set(name, parameters_init.get(name))

    optimizer = paddle.optimizer.Momentum(
        learning_rate=0,
        momentum=0,
        regularization=paddle.optimizer.L2Regularization(rate=0.0))
    trainer = paddle.trainer.SGD(cost=cost,
                parameters=parameters, update_equation=optimizer)

    feeding = {'image1': 0, 'image2': 1, 'weight': 2, 'intrinsic': 3}
    feeding.update(dict_task)

    print("start inference and evaluate")
    result = trainer.test(
        reader=paddle.batch(sun3d.test(params['test_scene'][0:2],
                                       height=params['size'][0],
                                       width=params['size'][1],
                                       tasks=tasks),
                            batch_size=32),
        feeding=feeding)
    print "Test with task {} and cost {}\n".format(tasks, result.cost)
Пример #5
0
def test_geowarp():
    image_path1 = '/home/peng/Data/sun3d/brown_bm_1/' + \
                  'brown_bm_1/image/0001761-000059310235.jpg'
    image1 = cv2.imread(image_path1)
    with open('../test/depth_gt.npy', 'rb') as f:
        depth_gt = np.load(f)
    with open('../test/depth_res.npy', 'rb') as f:
        depth_res = np.load(f)

    if not np.all(depth_gt.shape == depth_res.shape):
        depth_gt = cv2.resize(depth_gt,
                              (depth_res.shape[1], depth_res.shape[0]),
                              interpolation=cv2.INTER_NEAREST)

    params = sun3d.set_params()
    rate = 0.05
    height, width = depth_gt.shape[0], depth_gt.shape[1]
    depth_gt_down = uts_3d.down_sample_depth(depth_gt,
                                             method='uniform',
                                             percent=rate,
                                             K=params['intrinsic'])
    depth = uts_3d.xyz2depth(depth_gt_down, params['intrinsic'],
                             depth_gt.shape)

    depth_up = LaplacianDeform(depth_res, depth_gt_down, params['intrinsic'],
                               True)

    outputs, out_field = d_net.get_demon_outputs(inputs,
                                                 params,
                                                 ext_inputs=None)
    parameters, topo = paddle.parameters.create(outputs[out_field])
    uts.plot_images(OrderedDict([('image', image1), ('depth_gt', depth_gt),
                                 ('depth_down', depth),
                                 ('depth_res', depth_res), ('mask', mask),
                                 ('depth_up', depth_up)]),
                    layout=[4, 2])
Пример #6
0
def train():
    # PaddlePaddle init, gpu_id=FLAGS.gpu_id
    paddle.init(use_gpu=True, trainer_count=4, gpu_id=FLAGS.gpu_id)
    # paddle.init(use_gpu=True, trainer_count=2)
    data_source = 'sun3d'
    tasks = ['flow', 'trans', 'depth', 'normal']
    tasks = ['flow', 'normal', 'depth']
    feeding_task = get_feeding(tasks)

    params = sun3d.set_params()
    params['stage'] = 2
    inputs = d_net.get_demon_inputs(params)
    gts = d_net.get_ground_truth(params)

    # Add neural network config
    outputs, out_field = d_net.get_demon_outputs(inputs, params)
    cost = gen_cost(outputs, gts, tasks, params)

    # Create parameters
    print "Loading pre trained model"
    parameters = paddle.parameters.create(cost)

    if FLAGS.init_model:
        with gzip.open(FLAGS.init_model, 'r') as f:
            parameters_init = paddle.parameters.Parameters.from_tar(f)
        for name in parameters.names():
            parameters.set(name, parameters_init.get(name))

    # # Create optimizer poly learning rate
    # momentum_optimizer = paddle.optimizer.Momentum(
    #     momentum=0.9,
    #     regularization=paddle.optimizer.L2Regularization(rate=0.0002 * params['batch_size']),
    #     learning_rate=0.1 / params['batch_size'],
    #     learning_rate_decay_a=0.1,
    #     learning_rate_decay_b=50000 * 100,
    #     learning_rate_schedule='discexp',
    #     batch_size=params['batch_size'])

    # Create optimizer poly learning rate
    adam_optimizer = paddle.optimizer.Adam(
        beta1=0.9,
        learning_rate=0.000015 / params['batch_size'],
        learning_rate_decay_a=0.8,
        learning_rate_decay_b=100000,
        learning_rate_schedule='discexp',
        regularization=paddle.optimizer.L2Regularization(rate=0.0002 *
                                                         params['batch_size']),
        batch_size=params['batch_size'] * FLAGS.trainer_count)

    # End batch and end pass event handler
    feeding = {'image1': 0, 'image2': 1, 'weight': 2, 'intrinsic': 3}
    feeding.update(feeding_task)

    def event_handler(event):
        if isinstance(event, paddle.event.EndIteration):
            if event.batch_id % 50 == 0:
                print "\nPass %d, Batch %d, Cost %f" % (
                    event.pass_id, event.batch_id, event.cost)
            else:
                sys.stdout.write('.')
                sys.stdout.flush()

        elif isinstance(event, paddle.event.EndPass):
            result = trainer.test(reader=paddle.batch(
                sun3d.test(params['test_scene'][0:4],
                           height=params['size'][0],
                           width=params['size'][1],
                           tasks=tasks),
                batch_size=2 * params['batch_size']),
                                  feeding=feeding)

            task_string = '_'.join(tasks)
            print "\nTask %s, Pass %d, Cost %f" % (task_string, event.pass_id,
                                                   result.cost)

            folder = params['output_path'] + '/' + data_source
            uts.mkdir_if_need(folder)
            model_name = folder + '/model_stage_' + str(
                params['stage']) + '_' + task_string + '.tar.gz'

            with gzip.open(model_name, 'w') as f:
                parameters.to_tar(f)
            print "\nsave with pass %d" % (event.pass_id)

    # Create trainer
    trainer = paddle.trainer.SGD(cost=cost,
                                 parameters=parameters,
                                 update_equation=adam_optimizer)

    reader = sun3d.train(scene_names=params['train_scene'],
                         height=params['size'][0],
                         width=params['size'][1],
                         tasks=tasks)

    batch_reader = paddle.batch(paddle.reader.shuffle(
        reader, buf_size=FLAGS.buffer_size),
                                batch_size=params['batch_size'])

    trainer.train(reader=batch_reader,
                  num_passes=100,
                  event_handler=event_handler,
                  feeding=feeding)
Пример #7
0
def check_diff():

    # PaddlePaddle init
    paddle.init(use_gpu=True, gpu_id=FLAGS.gpu_id)
    # paddle.init(use_gpu=False)

    # setting parameters
    params = sun3d.set_params('sun3d')
    params['stage'] = 5
    layout = [2, 3]
    cur_level = 0
    inputs = d_net.get_demon_inputs(params)

    # define several external input here to avoid implementation difference
    inputs.update(
        d_net.get_cnn_input("image2_down", params['size_stage'][1], 3))
    inputs.update(d_net.get_cnn_input("image_warp", params['size_stage'][1],
                                      3))
    inputs.update(
        d_net.get_cnn_input("depth_trans", params['size_stage'][1], 1))
    inputs.update(d_net.get_cnn_input("flow", params['size_stage'][1], 2))

    # Add neural network config
    outputs, out_filed = d_net.get_demon_outputs(inputs,
                                                 params,
                                                 ext_inputs=inputs)
    print('load parameters')
    with gzip.open('./output/' + FLAGS.model, 'r') as f:
        parameters_init = paddle.parameters.Parameters.from_tar(f)

    # print parameters_init.names()
    parameters = paddle.parameters.create(outputs[out_filed])
    for name in parameters.names():
        # print "setting parameter {}".format(name)
        parameters.set(name, parameters_init.get(name))

    # load the input from saved example
    res_folder = 'output/example_output/'
    with open(res_folder + 'img_pair', 'rb') as f:
        tf_pair = np.load(f)
        tf_pair = tf_pair.squeeze()
    with open(res_folder + 'image2_down', 'rb') as f:
        image2_down = np.load(f)
        image2_down = image2_down.squeeze()
    intrinsic = np.array([0.89115971, 1.18821287, 0.5, 0.5])

    # load some extra inputs
    names = ['flow', 'depth', 'normal', 'rotation', 'translation']
    tf_names = [
        'predict_flow2', 'predict_depth2', 'predict_normal2',
        'predict_rotation', 'predict_translation'
    ]
    start_id = range(4, 4 + len(names))
    input_name_match = dict(zip(names, tf_names))
    results_names = dict(zip(names, start_id))
    boost_results = load_tf_boost_results(res_folder, input_name_match,
                                          params['stage'])

    test_data = [
        tf_pair[:3, :, :].flatten(), tf_pair[3:, :, :].flatten(),
        image2_down.flatten(), intrinsic
    ]
    test_data = [tuple(test_data + boost_results)]
    feeding = {'image1': 0, 'image2': 1, 'image2_down': 2, 'intrinsic': 3}
    feeding.update(results_names)

    # img_diff1 = tf_pair[:3, :, :] - image1_new.reshape((3, params['size'][0], params['size'][1]))
    # img_diff1 = img_diff1.transpose((1, 2, 0))
    # uts.plot_images({'img_diff': img_diff1}, layout=[1, 2])

    # print np.sum(np.abs(tf_pair[:3, :, :].flatten() - image1_new))
    # print np.sum(np.abs(tf_pair[3:, :, :].flatten() - image2_new))

    # return
    outputs_list = [outputs[x] for x in outputs.keys()]

    # pdb.set_trace()
    print len(test_data)
    print feeding.keys()

    conv = paddle.infer(output_layer=outputs_list,
                        parameters=parameters,
                        input=test_data,
                        feeding=feeding)

    height_list = [cp.g_layer_map[outputs[x].name].height \
                    for x in outputs.keys()]
    width_list = [cp.g_layer_map[outputs[x].name].width \
                    for x in outputs.keys()]

    conv = vec2img(inputs=conv, height=height_list, width=width_list)

    blob_name_match = get_name_matching(params['stage'])

    folder = './output/example_output/'
    # for name in outputs.keys()[cur_level:]:
    ob_names = outputs.keys()[cur_level:]
    # ob_names = ['depth_trans','geo_out']
    # ob_names = ['depth_0']

    for name in ob_names:
        i = outputs.keys().index(name)

        print name, ' ', blob_name_match[name]
        tf_conv_file = folder + str(params['stage']) + '_' + \
                       blob_name_match[name] + '.pkl'
        with open(tf_conv_file, 'rb') as f:
            tf_conv = np.load(f)

        print conv[i].shape, ' ', tf_conv.shape
        diff = conv[i] - tf_conv

        if len(diff.shape) <= 1:
            print '{} and {}, {}'.format(conv[i], tf_conv, diff)
        else:
            if len(diff.shape) == 2:
                diff = diff[:, :, np.newaxis]
            vis_dict = []
            for j in range(min(diff.shape[2], layout[0] * layout[1])):
                vis_dict.append(('diff_' + str(j), diff[:, :, j]))
            vis_dict = OrderedDict(vis_dict)
            uts.plot_images(OrderedDict(vis_dict), layout=layout)
Пример #8
0
def test_demo():
    # PaddlePaddle init
    paddle.init(use_gpu=True, gpu_id=FLAGS.gpu_id)
    params = sun3d.set_params()
    inputs = d_net.get_demon_inputs(params)

    params['stage'] = 5
    # Add neural network config
    outputs, out_field = d_net.get_demon_outputs(inputs, params, ext_inputs=None)
    parameters, topo = paddle.parameters.create(outputs[out_field])

    # Read image pair 1, 2 flow
    for scene_name in params['train_scene'][1:]:
        image_list = preprocess_util.list_files(
            params['flow_path'] + scene_name + '/flow/')
        image2depth = sun3d.get_image_depth_matching(scene_name)

        for pair_name in image_list[0:2]:
            image1, image2, flow_gt, depth1_gt, normal1_gt = \
                sun3d.load_image_pair(scene_name, pair_name, image2depth)

            image1_new = uts.transform(image1.copy(),
                                       height=params['size'][0],
                                       width=params['size'][1])

            image2_new = uts.transform(image2.copy(),
                                       height=params['size'][0],
                                       width=params['size'][1])
            intrinsic = np.array([0.89115971, 1.18821287, 0.5, 0.5])

            test_data = [(image1_new, image2_new, intrinsic)]
            depth_name = 'depth' if params['stage'] < 5 else 'depth_0'
            out_fields = ['flow', depth_name, 'normal', 'rotation',
                          'translation']

            # out_fields = ['flow']
            # height_list = [cp.g_layer_map[outputs[x].name].height \
            #                 for x in ['flow']]
            # width_list = [cp.g_layer_map[outputs[x].name].width \
            #                 for x in ['flow']]
            output_list = [outputs[x] for x in out_fields]
            flow, depth, normal, rotation, translation = paddle.infer(
                                    output=topo,
                                    parameters=parameters,
                                    input=test_data,
                                    feeding={'image1': 0,
                                             'image2': 1,
                                             'intrinsic': 2});
            height_list = [cp.g_layer_map[outputs[x].name].height \
                            for x in ['flow', depth_name,'normal']]
            width_list = [cp.g_layer_map[outputs[x].name].width \
                            for x in ['flow', depth_name,'normal']]

            # flow = paddle.infer(output=output_list,
            #                     parameters=parameters,
            #                     input=test_data,
            #                     feeding={'image1': 0,
            #                              'image2': 1,
            #                              'intrinsic': 2});
            # flow = vec2img(inputs=[flow],
            #                height=height_list,
            #                width=width_list)

            # uts.plot_images(OrderedDict([('image1',image1),
            #                              ('image2',image2),
            #                              ('flow',flow),
            #                              ('flow_gt',flow_gt)]),
            #                 layout=[4,2])
            flow, depth, normal = vec2img(inputs=[flow, depth, normal],
                           height=height_list,
                           width=width_list)

            # visualize depth in 3D
            # image1_down = cv2.resize(image1,
            #     (depth.shape[1], depth.shape[0]))
            # visualize_prediction(
            #     depth=depth,
            #     image=np.uint8(image1_down.transpose([2, 0, 1])),
            #     rotation=rotation,
            #     translation=translation)
            uts.plot_images(OrderedDict([('image1',image1),
                                         ('image2',image2),
                                         ('flow',flow),
                                         ('flow_gt',flow_gt),
                                         ('depth', depth),
                                         ('depth_gt', depth1_gt)]),
                                         # ('normal', (normal + 1.0)/2.),
                                         # ('normal_gt', (normal1_gt + 1.0)/2)]),
                            layout=[4,2])
Пример #9
0
def test_refine_net(dataset='sun3d', split='train', vis=False):

    paddle.init(use_gpu=True, gpu_id=FLAGS.gpu_id)
    params = sun3d.set_params()
    part, part_id = [int(x) for x in FLAGS.part.split(',')]
    test_ids = partition(len(params[split + '_scene']), part, part_id)
    rate = 0.05
    is_inverse = False
    depth_name = 'depth_inv' if is_inverse else 'depth'

    process_scene_names = [params[split + '_scene'][x] for x in test_ids]
    inputs = u_net.get_inputs(params)
    outputs = u_net.refine_net(inputs, params)
    parameters, topo = paddle.parameters.create(outputs[depth_name])
    print('load parameters {}'.format(FLAGS.model))
    with gzip.open(FLAGS.model, 'r') as f:
        parameters = paddle.parameters.Parameters.from_tar(f)
    feeding = {'image1': 0, 'depth': 1}

    for scene_name in process_scene_names:
        id_img2depth = sun3d.get_image_depth_matching(scene_name)
        upsample_output_path = params['flow_path'] + scene_name + \
          '/pair_depth/' + str(rate) + '/'
        prefix_len = len(upsample_output_path)
        image_list = preprocess_util.list_files(upsample_output_path)

        for pair_name in image_list:
            print pair_name
            pair_image_name = pair_name.split('/')[-1]
            outfile = upsample_output_path + pair_image_name[:-4] + '.npy'
            depth_net = np.load(outfile)
            depth_net_in = depth_net.flatten()
            if is_inverse:
                depth_net_in = uts_3d.inverse_depth(depth_net)

            image_name1, _ = pair_image_name.split('_')
            image_path1 = params['data_path'] + scene_name + \
                          '/image/' + image_name1 + '.jpg'
            depth_path1 = params['data_path'] + scene_name + '/depth/' + \
                          id_img2depth[image_name1] + '.png'

            image1 = cv2.imread(image_path1)
            depth1 = uts.read_depth(depth_path1)

            image1_new = uts.transform(image1.copy(),
                                       height=params['size'][0],
                                       width=params['size'][1])
            test_data = [(
                image1_new,
                depth_net_in,
            )]

            print 'forward'
            depth_out = paddle.infer(output=topo,
                                     parameters=parameters,
                                     input=test_data,
                                     feeding=feeding)
            if is_inverse:
                depth_out = uts_3d.inverse_depth(depth_out)

            depth = uts.vec2img(inputs=depth_out,
                                height=params['size'][0],
                                width=params['size'][1])

            if vis:
                uts.plot_images(OrderedDict([('image', image1),
                                             ('depth1', depth1),
                                             ('depth_net', depth_net),
                                             ('depth', depth)]),
                                layout=[4, 2])
Пример #10
0
def sequencial_upsampleing(dataset='sun3d',
                           split='train',
                           max_num=None,
                           vis=False):

    # Read image pair 1, 2, generate depth
    if dataset == 'sun3d':
        params = sun3d.set_params()
        params['demon_model'] = '../output/tf_model_full_5.tar.gz'
    else:
        print "dataset {} is not supported".format(dataset)

    deep_upsampler = DeepUpSampler(params)
    part, part_id = [int(x) for x in FLAGS.part.split(',')]
    test_ids = partition(len(params[split + '_scene']), part, part_id)
    rate = 0.05
    process_scene_names = [params[split + '_scene'][x] for x in test_ids]
    all_time = 0.
    all_count = 0.

    for scene_name in process_scene_names:
        image_list = preprocess_util.list_files(params['flow_path'] +
                                                scene_name + '/flow/')

        image2depth = sun3d.get_image_depth_matching(scene_name)
        image_num = len(image_list) if max_num is None \
                                    else min(len(image_list), max_num)
        image_id = range(0, len(image_list), len(image_list) / image_num)
        upsample_output_path = params['flow_path'] + scene_name + \
          '/pair_depth/' + str(rate) + '/'
        uts.mkdir_if_need(upsample_output_path)

        print "processing {} with images: {}".format(scene_name, len(image_id))

        image_name_list = [image_list[x] for x in image_id]
        for pair_name in image_name_list:
            pair_image_name = pair_name.split('/')[-1]
            outfile = upsample_output_path + pair_image_name[:-4] + '.npy'
            # if uts.exists(outfile):
            #   print "\t {} exists".format(pair_name)
            #   continue

            image1, image2, flow_gt, depth_gt = \
                sun3d.load_image_pair(scene_name, pair_name,
                  image2depth, False)

            print pair_name
            uts.plot_images(OrderedDict([('image', image1),
                                         ('depth_gt', depth_gt)]),
                            layout=[4, 2])
            continue

            depth_gt_down = uts_3d.down_sample_depth(depth_gt,
                                                     method='uniform',
                                                     percent=rate,
                                                     K=params['intrinsic'])

            try:
                start_time = time.time()
                print "\t upsampling {}".format(pair_name)
                depth_up = deep_upsampler.UpSample(depth_gt_down,
                                                   [image1, image2])
                np.save(outfile, depth_up)
                print "\t  time: {}".format(time.time() - start_time)

                all_time += time.time() - start_time
                all_count += 1

            except:
                print "{} failed".format(pair_name)

            if vis:
                uts.plot_images(OrderedDict([('image', image1),
                                             ('depth_gt', depth_gt),
                                             ('depth_up', depth_up)]),
                                layout=[4, 2])
    print "average run time {}\n".format(all_time / all_count)
Пример #11
0
def train(is_test=True):
    # PaddlePaddle init, gpu_id=FLAGS.gpu_id
    gpu_ids = [int(x) for x in FLAGS.gpu_id.split(',')]
    trainer_count = len(gpu_ids)
    # cost_name = ['depth_l1', 'depth_gradient']
    cost_name = ['rel_l1', 'depth_gradient']
    # cost_name = ['rel_l1']
    is_inverse = False

    if len(gpu_ids) == 1:
        gpu_ids = gpu_ids[0]
        paddle.init(use_gpu=True, gpu_id=gpu_ids)
    else:
        paddle.init(use_gpu=True, trainer_count=trainer_count)

    # paddle.init(use_gpu=True, trainer_count=2)
    data_source = 'sun3d'
    tasks = ['depth']

    if data_source == 'sun3d':
        params = sun3d.set_params()

    inputs = u_net.get_inputs(params)
    gts = u_net.get_ground_truth(params)
    outputs = u_net.refine_net(inputs, params)
    cost = gen_cost(outputs, gts, params, cost_name)

    # Create parameters
    parameters, _ = paddle.parameters.create(cost)

    # Create optimizer poly learning rate
    optimizer = paddle.optimizer.Adam(
        beta1=0.9,
        learning_rate=FLAGS.learning_rate / params['batch_size'],
        learning_rate_decay_a=0.8,
        learning_rate_decay_b=10000,
        learning_rate_schedule='discexp',
        regularization=paddle.optimizer.L2Regularization(rate=0.0002 *
                                                         params['batch_size']),
        batch_size=params['batch_size'] * trainer_count)

    # optimizer = paddle.optimizer.Momentum(
    #     learning_rate=FLAGS.learning_rate / params['batch_size'],
    #     momentum=0.9,
    #     learning_rate_decay_b=50000,
    #     learning_rate_schedule='discexp',
    #     regularization=paddle.optimizer.L2Regularization(
    #         rate=0.0005 * params['batch_size']))

    # End batch and end pass event handler
    if is_inverse:
        feeding = {'image1': 0, 'depth_inv': 1, 'depth_gt_inv': 2, 'weight': 3}
    else:
        feeding = {'image1': 0, 'depth': 1, 'depth_gt': 2, 'weight': 3}

    def event_handler(event):
        if isinstance(event, paddle.event.EndIteration):
            if event.batch_id % 50 == 0:
                if not isinstance(event.cost, list):
                    cost = [event.cost]
                else:
                    cost = event.cost
                print "\nPass %d, Batch %d, " % (event.pass_id,
                                                 event.batch_id),
                for i in range(len(cost)):
                    print "%s: %f, " % (cost_name[i], cost[i]),
                print "\n"
            else:
                sys.stdout.write('.')
                sys.stdout.flush()

        elif (isinstance(event, paddle.event.EndPass) and \
              event.pass_id % 4 == 1):
            print "Testing",
            result = trainer.test(reader=paddle.batch(
                sun3d.test_upsampler(params['test_scene'][0:5],
                                     rate=params['sample_rate'],
                                     height=params['size'][0],
                                     width=params['size'][1]),
                batch_size=params['batch_size']),
                                  feeding=feeding)

            print "\nTask upsample, Pass %d," % (event.pass_id),
            if not isinstance(result.cost, list):
                cost = [result.cost]
            else:
                cost = result.cost
            for i in range(len(cost)):
                print "%s: %f, " % (cost_name[i], cost[i]),

            folder = params['output_path'] + '/upsampler/'
            uts.mkdir_if_need(folder)
            model_name = folder + '/upsample_model_' + \
                         FLAGS.suffix + '.tar.gz'
            with gzip.open(model_name, 'w') as f:
                parameters.to_tar(f)
            print "model saved at %s" % model_name

    # Create trainer
    trainer = paddle.trainer.SGD(cost=cost,
                                 parameters=parameters,
                                 update_equation=optimizer)

    if is_test:
        print("load parameters from {}".format(FLAGS.init_model))
        with gzip.open(FLAGS.init_model, 'r') as f:
            parameters = paddle.parameters.Parameters.from_tar(f)

        result = trainer.test(reader=paddle.batch(
            sun3d.test_upsampler(params['test_scene'][0:5],
                                 rate=params['sample_rate'],
                                 height=params['size'][0],
                                 width=params['size'][1]),
            batch_size=params['batch_size']),
                              feeding=feeding)
        print "Test cost {}\n".format(result.cost)

    else:
        reader = sun3d.train_upsampler(scene_names=params['train_scene'],
                                       rate=params['sample_rate'],
                                       height=params['size'][0],
                                       width=params['size'][1],
                                       max_num=32)

        batch_reader = paddle.batch(paddle.reader.shuffle(
            reader, buf_size=FLAGS.buffer_size),
                                    batch_size=params['batch_size'])
        trainer.train(reader=batch_reader,
                      num_passes=FLAGS.num_passes,
                      event_handler=event_handler,
                      feeding=feeding)
def test_demo():
    # PaddlePaddle init
    paddle.init(use_gpu=True, gpu_id=FLAGS.gpu_id)
    params = sun3d.set_params()
    inputs = d_net.get_demon_inputs(params)

    # Add neural network config
    outputs_bs = d_net.bootstrap_net(inputs, params)
    outputs_it = d_net.iterative_net(inputs, params)
    outputs_re = d_net.refine_net(inputs, params)
    out_fields = ['flow', 'depth_inv', 'normal', 'rotation', 'translation']
    my_g_layer_map = {}
    parameters_bs, topo_bs = paddle.parameters.create(
        [outputs_bs[x] for x in out_fields])
    my_g_layer_map.update(cp.g_layer_map)
    parameters_it, topo_it = paddle.parameters.create(
        [outputs_it[x] for x in out_fields])
    my_g_layer_map.update(cp.g_layer_map)
    parameters_re, topo_re = paddle.parameters.create(outputs_re['depth_0'])
    my_g_layer_map.update(cp.g_layer_map)

    print('load parameters')
    with gzip.open(FLAGS.model, 'r') as f:
        parameters_init = paddle.parameters.Parameters.from_tar(f)

    for name in parameters_bs.names():
        parameters_bs.set(name, parameters_init.get(name))
    for name in parameters_it.names():
        parameters_it.set(name, parameters_init.get(name))
    for name in parameters_re.names():
        parameters_re.set(name, parameters_init.get(name))

    # Read image pair 1, 2 flow
    for scene_name in params['train_scene'][1:]:
        image_list = preprocess_util.list_files(params['flow_path'] +
                                                scene_name + '/flow/')
        image2depth = sun3d.get_image_depth_matching(scene_name)
        for pair_name in image_list[0:2]:
            image1, image2, flow_gt, depth1_gt, normal1_gt = \
                sun3d.load_image_pair(scene_name, pair_name, image2depth)

            #transform and yield
            image1_new = uts.transform(image1.copy(),
                                       height=params['size'][0],
                                       width=params['size'][1])
            image2_new = uts.transform(image2.copy(),
                                       height=params['size'][0],
                                       width=params['size'][1])
            intrinsic = np.array([0.89115971, 1.18821287, 0.5, 0.5])

            test_data_bs = [(image1_new, image2_new)]
            feeding_bs = {'image1': 0, 'image2': 1}
            flow, depth_inv, normal, rotation, translation = paddle.infer(
                output=topo_bs,
                parameters=parameters_bs,
                input=test_data_bs,
                feeding=feeding_bs)

            for i in range(3):
                test_data_it = [(image1_new, image2_new, intrinsic, rotation,
                                 translation, depth_inv, normal)]
                feeding_it = {
                    'image1': 0,
                    'image2': 1,
                    'intrinsic': 2,
                    'rotation': 3,
                    'translation': 4,
                    'depth_inv': 5,
                    'normal': 6
                }
                flow, depth_inv, normal, rotation, translation = paddle.infer(
                    output=topo_it,
                    parameters=parameters_it,
                    input=test_data_it,
                    feeding=feeding_it)

            test_data_re = [(image1_new, image2_new, depth_inv)]
            feeding_re = {'image1': 0, 'image2': 1, 'depth_inv': 2}
            depth = paddle.infer(output=topo_re,
                                 parameters=parameters_re,
                                 input=test_data_re,
                                 feeding=feeding_re)

            layer_names = [
                outputs_it['flow'].name, outputs_it['normal'].name,
                outputs_re['depth_0'].name
            ]
            height_list = [my_g_layer_map[x].height for x in layer_names]
            width_list = [my_g_layer_map[x].width for x in layer_names]

            flow, normal, depth = vec2img(inputs=[flow, normal, depth],
                                          height=height_list,
                                          width=width_list)

            # visualize depth in 3D
            # image1_down = cv2.resize(image1,
            #     (depth.shape[1], depth.shape[0]))

            # visualize_prediction(
            #     depth=depth,
            #     image=np.uint8(image1_down.transpose([2, 0, 1])),
            #     rotation=rotation,
            #     translation=translation)
            with open('./test/depth_gt.npy', 'wb') as f:
                np.save(f, depth1_gt)

            with open('./test/depth_res.npy', 'wb') as f:
                np.save(f, depth)

            uts.plot_images(OrderedDict([
                ('image1', image1), ('image2', image2), ('flow', flow),
                ('flow_gt', flow_gt), ('depth', depth),
                ('depth_gt', depth1_gt), ('normal', (normal + 1.0) / 2.),
                ('normal_gt', (normal1_gt + 1.0) / 2)
            ]),
                            layout=[4, 2])