Ejemplo n.º 1
0
    def _test_result_1d(self, dtype):
        A = np.array([[1, 2, 3, 4, 5]], dtype=dtype)
        tensor = sops.median3x3_downsample(A)
        B = tensor.eval()
        correct = np.array([[1, 3, 5]], dtype=dtype)
        self.assertAllEqual(B, correct)

        tensor = sops.median3x3_downsample(A.transpose())
        B = tensor.eval()
        self.assertAllEqual(B, correct.transpose())
Ejemplo n.º 2
0
    def test_gpu_equals_cpu(self):
        if not tf.test.is_gpu_available():
            return
        for dtype in TYPES:
            A = np.random.rand(10, 13).astype(dtype)
            with self.test_session(use_gpu=False, force_gpu=False):
                tensor_cpu = sops.median3x3_downsample(A)
                result_cpu = tensor_cpu.eval()

            with self.test_session(use_gpu=True, force_gpu=True):
                tensor_gpu = sops.median3x3_downsample(A)
                result_gpu = tensor_cpu.eval()

            self.assertAllEqual(result_cpu, result_gpu)
Ejemplo n.º 3
0
def recursive_median_downsample(inp, iterations):
    """Recursively downsamples the input using a 3x3 median filter"""
    result = []
    for i in range(iterations):
        if not result:
            tmp_inp = inp
        else:
            tmp_inp = result[-1]
        result.append(sops.median3x3_downsample(tmp_inp))
    return tuple(result)
Ejemplo n.º 4
0
def create_prediction_file(dataset, dataset_dir):
    """Creates a hdf5 file with the predictions
    
    dataset: str
        name of the dataset
    dataset_dir: str
        path to the directory containing the datasets
    Returns the path to the created file
    """
 
    if tf.test.is_gpu_available(True):
        data_format='channels_first'
    else: # running on cpu requires channels_last data format
        data_format='channels_last'
    print('Using data_format "{0}"'.format(data_format))
 
    ds = dataset
    # destination file
    prediction_file = '{0}_prediction.h5'.format(ds)
 
    # data types requested from the reader op
    data_tensors_keys = ('IMAGE_PAIR', 'MOTION', 'DEPTH', 'INTRINSICS')
 
    reader_params = {
             'batch_size': 1,
             'test_phase': True,   # deactivates randomization
             'builder_threads': 1, # must be 1 in test phase
             'inverse_depth': True,
             'motion_format': 'ANGLEAXIS6',
             'norm_trans_scale_depth': True,
             # inpu resolution for demon
             'scaled_height': 192,
             'scaled_width': 256,
             'scene_pool_size': 5,
             # no augmentation
             'augment_rot180': 0,
             'augment_mirror_x': 0,
             'top_output': data_tensors_keys,
             'source': [{'path': os.path.join(dataset_dir,'{0}_test.h5'.format(ds))}],
            }
 
    reader_tensors = multi_vi_h5_data_reader(len(data_tensors_keys), json.dumps(reader_params))
 
    # create a dict to make the distinct data tensors accessible via keys
    data_dict = dict(zip(data_tensors_keys,reader_tensors[2]))
    info_tensor = reader_tensors[0]
    sample_ids_tensor = reader_tensors[1]
    image1, image2 = tf.split(data_dict['IMAGE_PAIR'],2,axis=1)
 
    # downsample second image
    image2_2 = sops.median3x3_downsample(sops.median3x3_downsample(image2))
 
    gpu_options = tf.GPUOptions()
    gpu_options.per_process_gpu_memory_fraction=0.8 # leave some memory to other processes
    session = tf.InteractiveSession(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options))
 
 
    # init networks
    #bootstrap_net = BootstrapNet(session, data_format)
    #iterative_net = IterativeNet(session, data_format)
    #refine_net = RefinementNet(session, data_format)
 
    bootstrap_net = BootstrapNet(session)
    iterative_net = IterativeNet(session)
    refine_net = RefinementNet(session)
 
    session.run(tf.global_variables_initializer())
    
    # load weights
    saver = tf.train.Saver()
    #saver.restore(session,os.path.join(weights_dir,'demon_original'))
    saver.restore(session, args.checkpoint)
 
    fetch_dict = {
        'INFO': info_tensor,
        'SAMPLE_IDS': sample_ids_tensor,
        'image1': image1,
        'image2_2': image2_2,
    }
    fetch_dict.update(data_dict)
 
    if data_format == 'channels_last':
        for k in ('image1', 'image2_2', 'IMAGE_PAIR',):
            fetch_dict[k] = convert_NCHW_to_NHWC(fetch_dict[k])
 
    with h5py.File(prediction_file, 'w') as f:
 
        number_of_test_iterations = 1 # will be set to the correct value in the while loop
        test_iteration = 0
        while test_iteration < number_of_test_iterations:
 
            data =  session.run(fetch_dict)
 
            # get number of iterations from the info vector
            number_of_test_iterations = int(data['INFO'][0])
 
            # create group for the current test sample and save the sample id.
            group = f.require_group('snapshot_1/{0}'.format(test_iteration))
            sample_id = (''.join(map(chr, data['SAMPLE_IDS']))).strip()
            group.attrs['sample_id'] = np.string_(sample_id)
 
            # save intrinsics
            group['intrinsics'] = data['INTRINSICS']
 
            # run the network and save outputs for each network iteration 'i'.
            # iteration 0 corresponds to the bootstrap network.
            # we also store the refined depth for each iteration.
            for i in range(4):
                if i == 0:
                    result = bootstrap_net.eval(data['IMAGE_PAIR'], data['image2_2'])
                else:
                    result = iterative_net.eval(
                        data['IMAGE_PAIR'],
                        data['image2_2'],
                        result['predict_depth2'],
                        result['predict_normal2'],
                        result['predict_rotation'],
                        result['predict_translation']
                    )
                # write predictions
                if data_format == 'channels_last':
                    group['predicted_flow/{0}'.format(i)] = result['predict_flow2'][0].transpose([2,0,1])
                    group['predicted_depth/{0}'.format(i)] = result['predict_depth2'][0,:,:,0]
                else:
                    group['predicted_flow/{0}'.format(i)] = result['predict_flow2'][0]
                    group['predicted_depth/{0}'.format(i)] = result['predict_depth2'][0,0]
 
                predict_motion = np.concatenate((result['predict_rotation'],result['predict_translation']),axis=1)
                group['predicted_motion/{0}'.format(i)] = predict_motion[0]
 
                # run refinement network
                result_refined = refine_net.eval(data['image1'],result['predict_depth2'], result['predict_normal2'])
              
                # write refined depth prediction
                if data_format == 'channels_last':
                    group['predicted_depth/{0}_refined'.format(i)] = result_refined['predict_depth0'][0,:,:,0]
                else:
                    group['predicted_depth/{0}_refined'.format(i)] = result_refined['predict_depth0'][0,0]
 
            test_iteration += 1
 
    del session
    tf.reset_default_graph()
    return prediction_file
Ejemplo n.º 5
0
 def _test_single_element(self, dtype):
     A = np.array([[1]], dtype=dtype)
     tensor = sops.median3x3_downsample(A)
     B = tensor.eval()
     #print(A,B)
     self.assertAllEqual(B, A)