def run_bilateral_slice_apply(self, dev, grid_data, guide_data, input_data, has_offset=False): with tf.device(dev): grid_tensor = tf.convert_to_tensor(grid_data, name='grid', dtype=tf.float32) guide_tensor = tf.convert_to_tensor(guide_data, name='guide', dtype=tf.float32) input_tensor = tf.convert_to_tensor(input_data, name='input', dtype=tf.float32) output_tensor = ops.bilateral_slice_apply(grid_tensor, guide_tensor, input_tensor, has_offset=has_offset) with self.test_session() as sess: output_data = sess.run(output_tensor) return output_data
def bilateral_slice_apply(grid, guide, input_image, has_offset=True, name=None): """Slices into a bilateral grid using the guide map. Args: grid: (Tensor) [batch_size, grid_h, grid_w, depth, n_outputs] grid to slice from. guide: (Tensor) [batch_size, h, w ] guide map to slice along. input_image: (Tensor) [batch_size, h, w, n_input] input data onto which to apply the affine transform. name: (string) name for the operation. Returns: sliced: (Tensor) [batch_size, h, w, n_outputs] sliced output. """ with tf.name_scope(name): gridshape = grid.get_shape().as_list() if len(gridshape) == 6: gs = tf.shape(grid) _, _, _, _, n_out, n_in = gridshape grid = tf.reshape(grid, tf.stack([gs[0], gs[1], gs[2], gs[3], gs[4]*gs[5]])) # grid = tf.concat(tf.unstack(grid, None, axis=5), 4) # grid:[-1,16,16,8,12] [batch_size, grid_h, grid_w, depth, n_outputs] # guide:[-1,512,512] # input_image:[-1,512,512,3] sliced = hdrnet_ops.bilateral_slice_apply(grid, guide, input_image, has_offset=has_offset) return sliced
def test_guide_gradient(self): #TODO: this does not work yet, differentiable 'max' in the tent: max(1-abs(x), 0)? for dev in ['/gpu:0']: batch_size = 1 h = 6 w = 15 gh = 3 gw = 9 d = 7 i_chans = 1 o_chans = 1 grid_shape = [batch_size, gh, gw, d, (i_chans+1)*o_chans] guide_shape = [batch_size, h, w] input_shape = [batch_size, h, w, i_chans] output_shape = [batch_size, h, w, o_chans] grid_data = np.random.rand(*grid_shape).astype(np.float32) guide_data = np.random.rand(*guide_shape).astype(np.float32) input_data = np.random.rand(*input_shape).astype(np.float32) with tf.device(dev): grid_tensor = tf.convert_to_tensor(grid_data, name='data', dtype=tf.float32) guide_tensor = tf.convert_to_tensor(guide_data, name='guide', dtype=tf.float32) input_tensor = tf.convert_to_tensor(input_data, name='input', dtype=tf.float32) output_tensor = ops.bilateral_slice_apply(grid_tensor, guide_tensor, input_tensor, has_offset=True) with self.test_session(): num, th = tf.test.compute_gradient( guide_tensor, guide_shape, output_tensor, output_shape) margin = 1e-2 idx = np.where(np.abs(th-num) >= margin) for i in range(len(idx[0])): guide_idx = np.unravel_index(idx[0][i], guide_shape) output_idx = np.unravel_index(idx[1][i], output_shape) err = tf.test.compute_gradient_error( guide_tensor, guide_shape, output_tensor, output_shape) self.assertLess(err, margin)
def test_guide_gradient(self): #TODO: this does not work yet, differentiable 'max' in the tent: max(1-abs(x), 0)? for dev in ['/gpu:0']: batch_size = 1 h = 6 w = 15 gh = 3 gw = 9 d = 7 i_chans = 1 o_chans = 1 grid_shape = [batch_size, gh, gw, d, (i_chans + 1) * o_chans] guide_shape = [batch_size, h, w] input_shape = [batch_size, h, w, i_chans] output_shape = [batch_size, h, w, o_chans] grid_data = np.random.rand(*grid_shape).astype(np.float32) guide_data = np.random.rand(*guide_shape).astype(np.float32) input_data = np.random.rand(*input_shape).astype(np.float32) with tf.device(dev): grid_tensor = tf.convert_to_tensor(grid_data, name='data', dtype=tf.float32) guide_tensor = tf.convert_to_tensor(guide_data, name='guide', dtype=tf.float32) input_tensor = tf.convert_to_tensor(input_data, name='input', dtype=tf.float32) output_tensor = ops.bilateral_slice_apply(grid_tensor, guide_tensor, input_tensor, has_offset=True) with self.test_session(): num, th = tf.test.compute_gradient(guide_tensor, guide_shape, output_tensor, output_shape) margin = 1e-2 idx = np.where(np.abs(th - num) >= margin) for i in range(len(idx[0])): guide_idx = np.unravel_index(idx[0][i], guide_shape) output_idx = np.unravel_index(idx[1][i], output_shape) err = tf.test.compute_gradient_error(guide_tensor, guide_shape, output_tensor, output_shape) self.assertLess(err, margin)
def run_bilateral_slice_apply(self, dev, grid_data, guide_data, input_data, has_offset=False): with tf.device(dev): grid_tensor = tf.convert_to_tensor( grid_data, name='grid', dtype=tf.float32) guide_tensor = tf.convert_to_tensor( guide_data, name='guide', dtype=tf.float32) input_tensor = tf.convert_to_tensor( input_data, name='input', dtype=tf.float32) output_tensor = ops.bilateral_slice_apply(grid_tensor, guide_tensor, input_tensor, has_offset=has_offset) with self.test_session() as sess: output_data = sess.run(output_tensor) return output_data
def test_grid_gradient(self): for dev in ['/gpu:0']: batch_size = 3 h = 8 w = 5 gh = 6 gw = 3 d = 7 i_chans = 3 o_chans = 3 grid_shape = [batch_size, gh, gw, d, (1 + i_chans) * o_chans] guide_shape = [batch_size, h, w] input_shape = [batch_size, h, w, i_chans] output_shape = [batch_size, h, w, o_chans] grid_data = np.random.rand(*grid_shape).astype(np.float32) guide_data = 0.8 * np.random.rand(*guide_shape).astype( np.float32) + 0.1 input_data = np.random.rand(*input_shape).astype(np.float32) with tf.device(dev): grid_tensor = tf.convert_to_tensor(grid_data, name='data', dtype=tf.float32) guide_tensor = tf.convert_to_tensor(guide_data, name='guide', dtype=tf.float32) input_tensor = tf.convert_to_tensor(input_data, name='input', dtype=tf.float32) output_tensor = ops.bilateral_slice_apply(grid_tensor, guide_tensor, input_tensor, has_offset=True) with self.test_session(): err = tf.test.compute_gradient_error(grid_tensor, grid_shape, output_tensor, output_shape) self.assertLess(err, 3e-4)
def test_input_gradient(self): for dev in ['/gpu:0']: batch_size = 1 h = 8 w = 5 gh = 6 gw = 3 d = 7 i_chans = 3 o_chans = 3 grid_shape = [batch_size, gh, gw, d, (1+i_chans)*o_chans] guide_shape = [batch_size, h, w] input_shape = [batch_size, h, w, i_chans] output_shape = [batch_size, h, w, o_chans] grid_data = np.random.rand(*grid_shape).astype(np.float32) guide_data = np.random.rand(*guide_shape).astype(np.float32) input_data = np.random.rand(*input_shape).astype(np.float32) with tf.device(dev): grid_tensor = tf.convert_to_tensor(grid_data, name='data', dtype=tf.float32) guide_tensor = tf.convert_to_tensor(guide_data, name='guide', dtype=tf.float32) input_tensor = tf.convert_to_tensor(input_data, name='input', dtype=tf.float32) output_tensor = ops.bilateral_slice_apply(grid_tensor, guide_tensor, input_tensor, has_offset=True) with self.test_session(): err = tf.test.compute_gradient_error( input_tensor, input_shape, output_tensor, output_shape) self.assertLess(err, 3e-4)
def bilateral_slice_apply(grid, guide, input_image, has_offset=True, name=None): """Slices into a bilateral grid using the guide map. Args: grid: (Tensor) [batch_size, grid_h, grid_w, depth, n_outputs] grid to slice from. guide: (Tensor) [batch_size, h, w ] guide map to slice along. input_image: (Tensor) [batch_size, h, w, n_input] input data onto which to apply the affine transform. name: (string) name for the operation. Returns: sliced: (Tensor) [batch_size, h, w, n_outputs] sliced output. """ with tf.name_scope(name): gridshape = grid.get_shape().as_list() if len(gridshape) == 6: gs = tf.shape(grid) _, _, _, _, n_out, n_in = gridshape grid = tf.reshape(grid, tf.stack([gs[0], gs[1], gs[2], gs[3], gs[4]*gs[5]])) # grid = tf.concat(tf.unstack(grid, None, axis=5), 4) sliced = hdrnet_ops.bilateral_slice_apply(grid, guide, input_image, has_offset=has_offset) return sliced