def main(argv): """ main method of converting torch to paddle files. :param argv: :return: """ cmdparser = argparse.ArgumentParser( "Convert tensorflow parameter file to paddle model files.") cmdparser.add_argument( '-i', '--input', help='input filename of torch parameters') cmdparser.add_argument('-l', '--layers', help='list of layer names') cmdparser.add_argument('-o', '--output', help='output file path of paddle model') args = cmdparser.parse_args(argv) params = sun3d.set_params('sun3d') params['stage'] = 5 inputs = d_net.get_demon_inputs(params) # Add neural network config # outputs, out_field = d_net.get_demon_outputs(inputs, params, ext_inputs=inputs) outputs, out_field = d_net.get_demon_outputs(inputs, params, ext_inputs=None) # Create parameters parameters = paddle.parameters.create(outputs[out_field]) # for name in parameters.names(): # print name name_match = OrderedDict([]) if params['stage'] >= 1: name_match = gen_demon_flow_block_name_matcher(name_match) if params['stage'] >= 2: name_match = gen_demon_depth_block_name_matcher(name_match) if params['stage'] >= 3: # name_match = OrderedDict([]) name_match = gen_demon_flow_block_name_matcher(name_match, net_name='iter', is_iter=True) if params['stage'] >= 4: name_match = gen_demon_depth_block_name_matcher(name_match, net_name='iter', is_iter=True) if params['stage'] >= 5: # name_match = OrderedDict([]) name_match = gen_demon_refine_block_name_matcher(name_match) # for name in name_match.keys(): # print '{} : {}'.format(name, name_match[name]) #Create depth paramters if not args.input: args.input = './output/tf_weights/' if not args.output: args.output = './output/tf_model_' + str(params['stage']) + '.tar.gz' print "save parameters to {}".format(args.output) assign_weights(parameters, name_match, args.input, args.output)
def test(): # PaddlePaddle init, gpu_id=FLAGS.gpu_id paddle.init(use_gpu=True, gpu_id=FLAGS.gpu_id) # eval_tasks = {'flow':0, 'depth':1, 'normal':2} tasks = ['flow', 'depth'] tasks = ['flow', 'depth', 'normal'] # tasks = ['normal'] gt_name = [x + '_gt' for x in tasks] dict_task = dict(zip(gt_name, range(4, 4 + len(tasks)))) params = sun3d.set_params() params['stage'] = 2 # params['stage'] = 4 inputs = d_net.get_demon_inputs(params) gts = d_net.get_ground_truth(params) #Add neural network config outputs, out_field = d_net.get_demon_outputs(inputs, params, ext_inputs=None) cost = gen_cost(outputs, gts, tasks, params) parameters = paddle.parameters.create(layers=cost) print("load parameters from {}".format(FLAGS.init_model)) # if FLAGS.init_model: # with gzip.open(FLAGS.init_model, 'r') as f: # parameters_init = paddle.parameters.Parameters.from_tar(f) # for name in parameters.names(): # parameters.set(name, parameters_init.get(name)) optimizer = paddle.optimizer.Momentum( learning_rate=0, momentum=0, regularization=paddle.optimizer.L2Regularization(rate=0.0)) trainer = paddle.trainer.SGD(cost=cost, parameters=parameters, update_equation=optimizer) feeding = {'image1': 0, 'image2': 1, 'weight': 2, 'intrinsic': 3} feeding.update(dict_task) print("start inference and evaluate") result = trainer.test( reader=paddle.batch(sun3d.test(params['test_scene'][0:2], height=params['size'][0], width=params['size'][1], tasks=tasks), batch_size=32), feeding=feeding) print "Test with task {} and cost {}\n".format(tasks, result.cost)
def test_geowarp(): image_path1 = '/home/peng/Data/sun3d/brown_bm_1/' + \ 'brown_bm_1/image/0001761-000059310235.jpg' image1 = cv2.imread(image_path1) with open('../test/depth_gt.npy', 'rb') as f: depth_gt = np.load(f) with open('../test/depth_res.npy', 'rb') as f: depth_res = np.load(f) if not np.all(depth_gt.shape == depth_res.shape): depth_gt = cv2.resize(depth_gt, (depth_res.shape[1], depth_res.shape[0]), interpolation=cv2.INTER_NEAREST) params = sun3d.set_params() rate = 0.05 height, width = depth_gt.shape[0], depth_gt.shape[1] depth_gt_down = uts_3d.down_sample_depth(depth_gt, method='uniform', percent=rate, K=params['intrinsic']) depth = uts_3d.xyz2depth(depth_gt_down, params['intrinsic'], depth_gt.shape) depth_up = LaplacianDeform(depth_res, depth_gt_down, params['intrinsic'], True) outputs, out_field = d_net.get_demon_outputs(inputs, params, ext_inputs=None) parameters, topo = paddle.parameters.create(outputs[out_field]) uts.plot_images(OrderedDict([('image', image1), ('depth_gt', depth_gt), ('depth_down', depth), ('depth_res', depth_res), ('mask', mask), ('depth_up', depth_up)]), layout=[4, 2])
def train(): # PaddlePaddle init, gpu_id=FLAGS.gpu_id paddle.init(use_gpu=True, trainer_count=4, gpu_id=FLAGS.gpu_id) # paddle.init(use_gpu=True, trainer_count=2) data_source = 'sun3d' tasks = ['flow', 'trans', 'depth', 'normal'] tasks = ['flow', 'normal', 'depth'] feeding_task = get_feeding(tasks) params = sun3d.set_params() params['stage'] = 2 inputs = d_net.get_demon_inputs(params) gts = d_net.get_ground_truth(params) # Add neural network config outputs, out_field = d_net.get_demon_outputs(inputs, params) cost = gen_cost(outputs, gts, tasks, params) # Create parameters print "Loading pre trained model" parameters = paddle.parameters.create(cost) if FLAGS.init_model: with gzip.open(FLAGS.init_model, 'r') as f: parameters_init = paddle.parameters.Parameters.from_tar(f) for name in parameters.names(): parameters.set(name, parameters_init.get(name)) # # Create optimizer poly learning rate # momentum_optimizer = paddle.optimizer.Momentum( # momentum=0.9, # regularization=paddle.optimizer.L2Regularization(rate=0.0002 * params['batch_size']), # learning_rate=0.1 / params['batch_size'], # learning_rate_decay_a=0.1, # learning_rate_decay_b=50000 * 100, # learning_rate_schedule='discexp', # batch_size=params['batch_size']) # Create optimizer poly learning rate adam_optimizer = paddle.optimizer.Adam( beta1=0.9, learning_rate=0.000015 / params['batch_size'], learning_rate_decay_a=0.8, learning_rate_decay_b=100000, learning_rate_schedule='discexp', regularization=paddle.optimizer.L2Regularization(rate=0.0002 * params['batch_size']), batch_size=params['batch_size'] * FLAGS.trainer_count) # End batch and end pass event handler feeding = {'image1': 0, 'image2': 1, 'weight': 2, 'intrinsic': 3} feeding.update(feeding_task) def event_handler(event): if isinstance(event, paddle.event.EndIteration): if event.batch_id % 50 == 0: print "\nPass %d, Batch %d, Cost %f" % ( event.pass_id, event.batch_id, event.cost) else: sys.stdout.write('.') sys.stdout.flush() elif isinstance(event, paddle.event.EndPass): result = trainer.test(reader=paddle.batch( sun3d.test(params['test_scene'][0:4], height=params['size'][0], width=params['size'][1], tasks=tasks), batch_size=2 * params['batch_size']), feeding=feeding) task_string = '_'.join(tasks) print "\nTask %s, Pass %d, Cost %f" % (task_string, event.pass_id, result.cost) folder = params['output_path'] + '/' + data_source uts.mkdir_if_need(folder) model_name = folder + '/model_stage_' + str( params['stage']) + '_' + task_string + '.tar.gz' with gzip.open(model_name, 'w') as f: parameters.to_tar(f) print "\nsave with pass %d" % (event.pass_id) # Create trainer trainer = paddle.trainer.SGD(cost=cost, parameters=parameters, update_equation=adam_optimizer) reader = sun3d.train(scene_names=params['train_scene'], height=params['size'][0], width=params['size'][1], tasks=tasks) batch_reader = paddle.batch(paddle.reader.shuffle( reader, buf_size=FLAGS.buffer_size), batch_size=params['batch_size']) trainer.train(reader=batch_reader, num_passes=100, event_handler=event_handler, feeding=feeding)
def check_diff(): # PaddlePaddle init paddle.init(use_gpu=True, gpu_id=FLAGS.gpu_id) # paddle.init(use_gpu=False) # setting parameters params = sun3d.set_params('sun3d') params['stage'] = 5 layout = [2, 3] cur_level = 0 inputs = d_net.get_demon_inputs(params) # define several external input here to avoid implementation difference inputs.update( d_net.get_cnn_input("image2_down", params['size_stage'][1], 3)) inputs.update(d_net.get_cnn_input("image_warp", params['size_stage'][1], 3)) inputs.update( d_net.get_cnn_input("depth_trans", params['size_stage'][1], 1)) inputs.update(d_net.get_cnn_input("flow", params['size_stage'][1], 2)) # Add neural network config outputs, out_filed = d_net.get_demon_outputs(inputs, params, ext_inputs=inputs) print('load parameters') with gzip.open('./output/' + FLAGS.model, 'r') as f: parameters_init = paddle.parameters.Parameters.from_tar(f) # print parameters_init.names() parameters = paddle.parameters.create(outputs[out_filed]) for name in parameters.names(): # print "setting parameter {}".format(name) parameters.set(name, parameters_init.get(name)) # load the input from saved example res_folder = 'output/example_output/' with open(res_folder + 'img_pair', 'rb') as f: tf_pair = np.load(f) tf_pair = tf_pair.squeeze() with open(res_folder + 'image2_down', 'rb') as f: image2_down = np.load(f) image2_down = image2_down.squeeze() intrinsic = np.array([0.89115971, 1.18821287, 0.5, 0.5]) # load some extra inputs names = ['flow', 'depth', 'normal', 'rotation', 'translation'] tf_names = [ 'predict_flow2', 'predict_depth2', 'predict_normal2', 'predict_rotation', 'predict_translation' ] start_id = range(4, 4 + len(names)) input_name_match = dict(zip(names, tf_names)) results_names = dict(zip(names, start_id)) boost_results = load_tf_boost_results(res_folder, input_name_match, params['stage']) test_data = [ tf_pair[:3, :, :].flatten(), tf_pair[3:, :, :].flatten(), image2_down.flatten(), intrinsic ] test_data = [tuple(test_data + boost_results)] feeding = {'image1': 0, 'image2': 1, 'image2_down': 2, 'intrinsic': 3} feeding.update(results_names) # img_diff1 = tf_pair[:3, :, :] - image1_new.reshape((3, params['size'][0], params['size'][1])) # img_diff1 = img_diff1.transpose((1, 2, 0)) # uts.plot_images({'img_diff': img_diff1}, layout=[1, 2]) # print np.sum(np.abs(tf_pair[:3, :, :].flatten() - image1_new)) # print np.sum(np.abs(tf_pair[3:, :, :].flatten() - image2_new)) # return outputs_list = [outputs[x] for x in outputs.keys()] # pdb.set_trace() print len(test_data) print feeding.keys() conv = paddle.infer(output_layer=outputs_list, parameters=parameters, input=test_data, feeding=feeding) height_list = [cp.g_layer_map[outputs[x].name].height \ for x in outputs.keys()] width_list = [cp.g_layer_map[outputs[x].name].width \ for x in outputs.keys()] conv = vec2img(inputs=conv, height=height_list, width=width_list) blob_name_match = get_name_matching(params['stage']) folder = './output/example_output/' # for name in outputs.keys()[cur_level:]: ob_names = outputs.keys()[cur_level:] # ob_names = ['depth_trans','geo_out'] # ob_names = ['depth_0'] for name in ob_names: i = outputs.keys().index(name) print name, ' ', blob_name_match[name] tf_conv_file = folder + str(params['stage']) + '_' + \ blob_name_match[name] + '.pkl' with open(tf_conv_file, 'rb') as f: tf_conv = np.load(f) print conv[i].shape, ' ', tf_conv.shape diff = conv[i] - tf_conv if len(diff.shape) <= 1: print '{} and {}, {}'.format(conv[i], tf_conv, diff) else: if len(diff.shape) == 2: diff = diff[:, :, np.newaxis] vis_dict = [] for j in range(min(diff.shape[2], layout[0] * layout[1])): vis_dict.append(('diff_' + str(j), diff[:, :, j])) vis_dict = OrderedDict(vis_dict) uts.plot_images(OrderedDict(vis_dict), layout=layout)
def test_demo(): # PaddlePaddle init paddle.init(use_gpu=True, gpu_id=FLAGS.gpu_id) params = sun3d.set_params() inputs = d_net.get_demon_inputs(params) params['stage'] = 5 # Add neural network config outputs, out_field = d_net.get_demon_outputs(inputs, params, ext_inputs=None) parameters, topo = paddle.parameters.create(outputs[out_field]) # Read image pair 1, 2 flow for scene_name in params['train_scene'][1:]: image_list = preprocess_util.list_files( params['flow_path'] + scene_name + '/flow/') image2depth = sun3d.get_image_depth_matching(scene_name) for pair_name in image_list[0:2]: image1, image2, flow_gt, depth1_gt, normal1_gt = \ sun3d.load_image_pair(scene_name, pair_name, image2depth) image1_new = uts.transform(image1.copy(), height=params['size'][0], width=params['size'][1]) image2_new = uts.transform(image2.copy(), height=params['size'][0], width=params['size'][1]) intrinsic = np.array([0.89115971, 1.18821287, 0.5, 0.5]) test_data = [(image1_new, image2_new, intrinsic)] depth_name = 'depth' if params['stage'] < 5 else 'depth_0' out_fields = ['flow', depth_name, 'normal', 'rotation', 'translation'] # out_fields = ['flow'] # height_list = [cp.g_layer_map[outputs[x].name].height \ # for x in ['flow']] # width_list = [cp.g_layer_map[outputs[x].name].width \ # for x in ['flow']] output_list = [outputs[x] for x in out_fields] flow, depth, normal, rotation, translation = paddle.infer( output=topo, parameters=parameters, input=test_data, feeding={'image1': 0, 'image2': 1, 'intrinsic': 2}); height_list = [cp.g_layer_map[outputs[x].name].height \ for x in ['flow', depth_name,'normal']] width_list = [cp.g_layer_map[outputs[x].name].width \ for x in ['flow', depth_name,'normal']] # flow = paddle.infer(output=output_list, # parameters=parameters, # input=test_data, # feeding={'image1': 0, # 'image2': 1, # 'intrinsic': 2}); # flow = vec2img(inputs=[flow], # height=height_list, # width=width_list) # uts.plot_images(OrderedDict([('image1',image1), # ('image2',image2), # ('flow',flow), # ('flow_gt',flow_gt)]), # layout=[4,2]) flow, depth, normal = vec2img(inputs=[flow, depth, normal], height=height_list, width=width_list) # visualize depth in 3D # image1_down = cv2.resize(image1, # (depth.shape[1], depth.shape[0])) # visualize_prediction( # depth=depth, # image=np.uint8(image1_down.transpose([2, 0, 1])), # rotation=rotation, # translation=translation) uts.plot_images(OrderedDict([('image1',image1), ('image2',image2), ('flow',flow), ('flow_gt',flow_gt), ('depth', depth), ('depth_gt', depth1_gt)]), # ('normal', (normal + 1.0)/2.), # ('normal_gt', (normal1_gt + 1.0)/2)]), layout=[4,2])