def test_upsample_transposed_convolution_jacobian_random(self): """Tests the jacobian is correct.""" num_filters = 6 kernel_size = 1 data_init = np.random.uniform(size=(2, 5, num_filters)) pool_map = _batch_sparse_eye((2,), 5, np.float64) transposed_convolution_op = tf.keras.layers.Conv2DTranspose( filters=num_filters, kernel_size=(1, kernel_size), strides=(1, kernel_size), padding='valid', dtype='float64') # Calling the upsample_transposed_convolution to create the variables # in the transposed_convoution. gp.upsample_transposed_convolution( data_init, pool_map, sizes=None, kernel_size=kernel_size, transposed_convolution_op=transposed_convolution_op) def gp_upsample_transposed_convolution(data): return gp.upsample_transposed_convolution( data, pool_map, sizes=None, kernel_size=kernel_size, transposed_convolution_op=transposed_convolution_op) # Initializes variables of the transpose conv layer. self.evaluate(tf.compat.v1.global_variables_initializer()) self.assert_jacobian_is_correct_fn(gp_upsample_transposed_convolution, [data_init])
def test_upsample_transposed_convolution_exception_raised_types( self, err_msg, data_type, pool_map_type, sizes_type): """Tests the correct exceptions are raised for invalid types.""" data = np.ones((2, 3, 3), dtype=data_type) pool_map = _dense_to_sparse(np.ones((2, 3, 3), dtype=pool_map_type)) sizes = np.array(((1, 2), (2, 3)), dtype=sizes_type) with self.assertRaisesRegexp(TypeError, err_msg): gp.upsample_transposed_convolution( data, pool_map, sizes, kernel_size=1, transposed_convolution_op=None)
def test_upsample_transposed_convolution_exception_raised_callable(self): """Tests the correct exception is raised for a invalid convolution op.""" data = np.ones((5, 3)) pool_map = _dense_to_sparse(np.eye(5)) err_msg = "'transposed_convolution_op' must be callable." with self.assertRaisesRegexp(TypeError, err_msg): gp.upsample_transposed_convolution(data, pool_map, sizes=None, kernel_size=1, transposed_convolution_op=1)
def test_upsample_transposed_convolution_exception_raised_shapes( self, err_msg, data_shape, pool_map_shape, sizes_shape): """Tests the correct exceptions are raised for invalid shapes.""" data = np.ones(data_shape, dtype=np.float32) pool_map = _dense_to_sparse(np.ones(pool_map_shape, dtype=np.float32)) if sizes_shape is not None: sizes = np.ones(sizes_shape, dtype=np.int32) else: sizes = None with self.assertRaisesRegexp(ValueError, err_msg): gp.upsample_transposed_convolution( data, pool_map, sizes, kernel_size=1, transposed_convolution_op=None)
def test_upsample_transposed_convolution_jacobian_random_padding(self): """Tests the jacobian is correct with padded data.""" if not tf.executing_eagerly(): return num_filters = 6 sizes = ((2, 4), (3, 5)) data_init = np.random.uniform(size=(2, 3, num_filters)) data_init[0, -1, :] = 0. data = tf.convert_to_tensor(value=data_init) pool_map = np.array( (((0.5, 0.5, 0., 0., 0.), (0., 0., 0.5, 0.5, 0.), (0., 0., 0., 0., 0.)), ((1., 0., 0., 0., 0.), (0., 1. / 3., 1. / 3., 1. / 3., 0.), (0., 0., 0., 0., 1.))), dtype=data_init.dtype) pool_map = _dense_to_sparse(pool_map) kernel_size = 2 transposed_convolution_op = tf.keras.layers.Conv2DTranspose( filters=num_filters, kernel_size=(1, kernel_size), strides=(1, kernel_size), padding='valid') upsampled = gp.upsample_transposed_convolution( data, pool_map, sizes=sizes, kernel_size=kernel_size, transposed_convolution_op=transposed_convolution_op) self.assert_jacobian_is_correct(data, data_init, upsampled)
def test_upsample_transposed_convolution_zero_kernel( self, num_vertices, num_features, kernel_size, data_type): """Tests the upsampling with a zero kernel.""" if not tf.executing_eagerly(): return data = np.random.uniform(size=(num_vertices, num_features)).astype(data_type) pool_map = np.zeros(shape=(num_vertices, num_vertices * kernel_size), dtype=data_type) for i in range(num_vertices): pool_map[i, np.arange(kernel_size * i, kernel_size * (i + 1))] = (1.0 / kernel_size) pool_map = _dense_to_sparse(pool_map) # Transposed convolution op with a zero kernel. transposed_convolution_op = tf.keras.layers.Conv2DTranspose( filters=num_features, kernel_size=(1, kernel_size), strides=(1, kernel_size), padding='valid', use_bias=False, kernel_initializer=tf.compat.v1.keras.initializers.zeros()) upsampled = gp.upsample_transposed_convolution( data, pool_map, sizes=None, kernel_size=kernel_size, transposed_convolution_op=transposed_convolution_op) self.assertAllEqual(tf.shape(input=upsampled), (num_vertices * kernel_size, num_features)) self.assertAllEqual(upsampled, tf.zeros_like(upsampled))
def gp_upsample_transposed_convolution(data): return gp.upsample_transposed_convolution( data, pool_map, sizes=sizes, kernel_size=kernel_size, transposed_convolution_op=transposed_convolution_op)
def test_upsample_transposed_convolution_jacobian_random_padding(self): """Tests the jacobian is correct with padded data.""" num_filters = 6 sizes = ((2, 4), (3, 5)) data_init = np.random.uniform(size=(2, 3, num_filters)) data_init[0, -1, :] = 0. pool_map = np.array( (((0.5, 0.5, 0., 0., 0.), (0., 0., 0.5, 0.5, 0.), (0., 0., 0., 0., 0.)), ((1., 0., 0., 0., 0.), (0., 1. / 3., 1. / 3., 1. / 3., 0.), (0., 0., 0., 0., 1.))), dtype=data_init.dtype) pool_map = _dense_to_sparse(pool_map) kernel_size = 2 transposed_convolution_op = tf.keras.layers.Conv2DTranspose( filters=num_filters, kernel_size=(1, kernel_size), strides=(1, kernel_size), padding='valid', dtype='float64') # Calling the upsample_transposed_convolution to create the variables # in the transposed_convoution. gp.upsample_transposed_convolution( data_init, pool_map, sizes=sizes, kernel_size=kernel_size, transposed_convolution_op=transposed_convolution_op) def gp_upsample_transposed_convolution(data): return gp.upsample_transposed_convolution( data, pool_map, sizes=sizes, kernel_size=kernel_size, transposed_convolution_op=transposed_convolution_op) # Initializes variables of the transpose conv layer. self.evaluate(tf.compat.v1.global_variables_initializer()) self.assert_jacobian_is_correct_fn(gp_upsample_transposed_convolution, [data_init])
def test_upsample_transposed_convolution_selector_kernel_random( self, num_vertices, num_features, kernel_size, kernel_index, feature1_index, feature2_index): """Tests the upsampling with an indicator kernel.""" data = np.random.uniform(size=(num_vertices, num_features)).astype(np.float32) pool_map = np.zeros(shape=(num_vertices, num_vertices * kernel_size), dtype=np.float32) for i in range(num_vertices): pool_map[i, np.arange(kernel_size * i, kernel_size * (i + 1))] = (1.0 / kernel_size) pool_map = _dense_to_sparse(pool_map) selection = np.zeros(shape=(1, kernel_size, num_features, num_features), dtype=np.float32) selection[0, kernel_index, feature1_index, feature2_index] = 1. initializer = tf.compat.v1.constant_initializer(value=selection) transposed_convolution_op = tf.keras.layers.Conv2DTranspose( filters=num_features, kernel_size=(1, kernel_size), strides=(1, kernel_size), padding='valid', use_bias=False, kernel_initializer=initializer) true = np.zeros(shape=(num_vertices * kernel_size, num_features), dtype=np.float32) input_column = feature2_index output_column = feature1_index output_row_start = kernel_index true[output_row_start::kernel_size, output_column] = (data[:, input_column]) upsampled = gp.upsample_transposed_convolution( data, pool_map, sizes=None, kernel_size=kernel_size, transposed_convolution_op=transposed_convolution_op) # Initializes variables of the transpose conv layer. self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllEqual(upsampled, true)
def test_upsample_transposed_convolution_jacobian_random(self): """Tests the jacobian is correct.""" if not tf.executing_eagerly(): return num_filters = 6 kernel_size = 1 data_init = np.random.uniform(size=(2, 5, num_filters)) data = tf.convert_to_tensor(value=data_init) pool_map = _batch_sparse_eye((2, ), 5, np.float64) transposed_convolution_op = tf.keras.layers.Conv2DTranspose( filters=num_filters, kernel_size=(1, kernel_size), strides=(1, kernel_size), padding='valid') upsampled = gp.upsample_transposed_convolution( data, pool_map, sizes=None, kernel_size=kernel_size, transposed_convolution_op=transposed_convolution_op) self.assert_jacobian_is_correct(data, data_init, upsampled)
def test_upsample_transposed_convolution_preset_padded(self): """Tests upsampling with presets.""" data = np.reshape(np.arange(12).astype(np.float32), (2, 3, 2)) data[0, -1, :] = 0. sizes = ((2, 3), (3, 3)) pool_map = _dense_to_sparse( np.array((((0.5, 0.5, 0.), (0., 0., 1.), (0., 0., 0.)), ((1., 0., 0.), (0., 1., 0.), (0., 0., 1.))), dtype=np.float32)) kernel = np.ones(shape=(1, 2, 2, 2), dtype=np.float32) initializer = tf.constant_initializer(value=kernel) transposed_convolution_op = tf.keras.layers.Conv2DTranspose( filters=2, kernel_size=(1, 2), strides=(1, 2), padding='valid', use_bias=False, kernel_initializer=initializer) # Convolving with an all-ones kernel is equal to summation of the input. data_sum = np.tile(np.sum(data, axis=-1, keepdims=True), (1, 1, 2)) true = np.zeros(shape=(2, 3, 2), dtype=np.float32) true[0, :, :] = data_sum[0, (0, 0, 1), :] true[1, :, :] = data_sum[1, :, :] upsampled = gp.upsample_transposed_convolution( data, pool_map, sizes=sizes, kernel_size=2, transposed_convolution_op=transposed_convolution_op) # Initializes variables of the transpose conv layer. self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllEqual(upsampled.shape, (2, 3, 2)) self.assertAllClose(upsampled, true)