def test_feature_steered_convolution_padding_random(self, batch_size, num_vertices, in_channels, out_channels, num_weight_matrices): """Test mixed topology batches (random vertices and neighbors).""" data, neighbors, sizes = _random_data( batch_size, num_vertices, in_channels, padding=True, only_self_edges=False) u, v, c, w, b = _random_variables(in_channels, out_channels, num_weight_matrices) with self.subTest(name="if_w_is_0_then_y_is_b"): y = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=sizes, var_u=u, var_v=v, var_c=c, var_w=tf.zeros_like(w), var_b=b) for k in range(batch_size): y_crop = y[k, :sizes[k], :] y_expected = tf.broadcast_to(b, y_crop.shape) self.assertAllEqual(y_crop, y_expected) # Check for zeros in the padded region. self.assertAllEqual(y[k, sizes[k]:, :], tf.zeros((num_vertices - sizes[k], out_channels))) with self.subTest(name="convolve_with_constant"): constant_data = data for k in range(batch_size): constant_data[k, :sizes[k], :] = np.tile(data[k, 0, :], (sizes[k], 1)) y = gc.feature_steered_convolution( data=constant_data, neighbors=neighbors, sizes=sizes, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) for k in range(batch_size): y_crop = y[k, :sizes[k], :] y_const = tf.broadcast_to(y_crop[0, :], y_crop.shape) self.assertAllClose(y_crop, y_const) # Check for zeros in the padded region. self.assertAllEqual(y[k, sizes[k]:, :], tf.zeros([num_vertices - sizes[k], out_channels]))
def test_feature_steered_convolution_jacobian_random(self, batch_size, num_vertices, in_channels, num_weight_matrices, padding): """Test the jacobian for random input data.""" random_data = _random_data( batch_size, num_vertices, in_channels, padding, only_self_edges=False, data_type=np.float64, neighbors_type=np.float64) data_init = random_data[0] neighbors = random_data[1] sizes = None if not padding else random_data[2] u, v, c, w, b = _random_variables( in_channels, in_channels, num_weight_matrices, dtype=np.float64) data = tf.convert_to_tensor(value=data_init) y = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=sizes, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) self.assert_jacobian_is_correct(data, data_init, y)
def test_feature_steered_convolution_exception_not_raised_types( self, data_type, neighbors_type, sizes_type, var_type): """Check there are no exceptions for valid input types.""" data, neighbors, sizes = _random_data(1, 5, 3, True, False, data_type, neighbors_type, sizes_type) u, v, c, w, b = _random_variables(3, 3, 1, var_type) try: gc.feature_steered_convolution(data=data, neighbors=neighbors, sizes=sizes, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) except Exception as e: # pylint: disable=broad-except self.fail("Exception raised: %s" % str(e))
def feature_steered_convolution(data): return gc.feature_steered_convolution(data=data, neighbors=neighbors, sizes=None, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b)
def test_feature_steered_convolution_exception_raised_shapes(self): """Check that invalid input shapes trigger the right exceptions.""" with self.assertRaisesRegexp(ValueError, "must have a rank of 2"): data, neighbors = _dummy_data(1, 5, 2) u, v, c, w, b = _dummy_variables(2, 2, 1) data = data[0, :] _ = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=None, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) with self.assertRaisesRegexp(ValueError, "must have a rank greater than 1"): u, v, c, w, b = _dummy_variables(2, 2, 1) data = np.ones(shape=(5), dtype=np.float32) neighbors = _dense_to_sparse(np.ones(shape=(5), dtype=np.float32)) _ = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=None, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) with self.assertRaisesRegexp(ValueError, "Not all batch dimensions are identical."): data, neighbors = _dummy_data(1, 5, 2) u, v, c, w, b = _dummy_variables(2, 2, 1) _ = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=(1, 1), var_u=u, var_v=v, var_c=c, var_w=w, var_b=b)
def call(self, inputs, **kwargs): # pyformat: disable """Executes the convolution. The shorthands used below are `V`: The number of vertices. `C`: The number of channels in the input data. Note: In the following, A1 to An are optional batch dimensions. Args: inputs: A list of two tensors `[data, neighbors]`. `data` is a `float` tensor with shape `[A1, ..., An, V, C]`. `neighbors` is a `SparseTensor` with the same type as `data` and with shape `[A1, ..., An, V, V]` representing vertex neighborhoods. The neighborhood of a vertex defines the support region for convolution. For a mesh, a common choice for the neighborhood of vertex `i` would be the vertices in the K-ring of `i` (including `i` itself). Each vertex must have at least one neighbor. For a faithful implementation of the FeaStNet paper, neighbors should be a row-normalized weight matrix corresponding to the graph adjacency matrix with self-edges: `neighbors[A1, ..., An, i, j] > 0` if vertex `j` is a neighbor of vertex `i`, and `neighbors[A1, ..., An, i, i] > 0` for all `i`, and `sum(neighbors, axis=-1)[A1, ..., An, i] == 1.0` for all `i`. These requirements are relaxed in this implementation. **kwargs: A dictionary containing the key `sizes`, which is an `int` tensor of shape `[A1, ..., An]` indicating the true input sizes in case of padding (`sizes=None` indicates no padding). `sizes[A1, ..., An] <= V`. If `data` and `neighbors` are 2-D, `sizes` will be ignored. As an example usage of `sizes`, consider an input consisting of three graphs `G0`, `G1`, and `G2` with `V0`, `V1`, and `V2` vertices respectively. The padded input would have the shapes `data.shape = [3, V, C]`, and `neighbors.shape = [3, V, V]`, where `V = max([V0, V1, V2])`. The true sizes of each graph will be specified by `sizes=[V0, V1, V2]`. `data[i, :Vi, :]` and `neighbors[i, :Vi, :Vi]` will be the vertex and neighborhood data of graph `Gi`. The `SparseTensor` `neighbors` should have no nonzero entries in the padded regions. Returns: Tensor with shape `[A1, ..., An, V, num_output_channels]`. """ # pyformat: enable sizes = kwargs.get('sizes', None) return gc.feature_steered_convolution( data=inputs[0], neighbors=inputs[1], sizes=sizes, var_u=self.var_u, var_v=self.var_v, var_c=self.var_c, var_w=self.var_w, var_b=self.var_b)
def test_feature_steered_convolution_exception_raised_types( self, err_msg, data_type, neighbors_type, sizes_type, var_type): """Check the type errors for invalid input types.""" data, neighbors, sizes = _random_data(1, 5, 3, True, False, data_type, neighbors_type, sizes_type) u, v, c, w, b = _random_variables(3, 3, 1, var_type) with self.assertRaisesRegexp(TypeError, err_msg): _ = gc.feature_steered_convolution(data=data, neighbors=neighbors, sizes=sizes, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b)
def test_feature_steered_convolution_padding_preset(self, data, neighbors, u, v, c, w, b, expected): """Test expected result for preset data and filter values.""" array = (np.array(i) for i in (data, neighbors, expected)) data, neighbors, expected = array tensors = (tf.convert_to_tensor(value=np.array(i).astype(data.dtype)) \ for i in (u, v, c, w, b)) u, v, c, w, b = tensors y = gc.feature_steered_convolution( data=data, neighbors=_dense_to_sparse(neighbors), sizes=None, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) self.assertAllClose(y, expected)
def test_feature_steered_convolution_output_shape(self, batch_size, num_vertices, in_channels, out_channels, num_weight_matrices): """Check that the output of convolution has the correct shape.""" data, neighbors = _dummy_data(batch_size, num_vertices, in_channels) u, v, c, w, b = _dummy_variables(in_channels, out_channels, num_weight_matrices) y = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=None, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) y_shape = y.shape.as_list() self.assertEqual(y_shape[-1], out_channels) self.assertAllEqual(y_shape[:-1], data.shape[:-1])
def test_feature_steered_convolution_jacobian_preset(self, num_vertices, num_channels, data_multiplier): """Test the jacobian is correct for preset inputs.""" # Corner cases include one vertex, one channel, and all-zero features. data_init = data_multiplier * np.random.uniform( size=(num_vertices, num_channels)).astype(np.float64) neighbors = tf.sparse.eye(num_vertices, dtype=tf.float64) u, v, c, w, b = _random_variables( num_channels, num_channels, 1, dtype=np.float64) data = tf.convert_to_tensor(value=data_init) y = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=None, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) self.assert_jacobian_is_correct(data, data_init, y)
def feature_steered_convolution_layer( data, neighbors, sizes, translation_invariant=True, num_weight_matrices=8, num_output_channels=None, initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.1), name=None, var_name=None): # pyformat: disable """Wraps the function `feature_steered_convolution` as a TensorFlow layer. The shorthands used below are `V`: The number of vertices. `C`: The number of channels in the input data. Note: In the following, A1 to An are optional batch dimensions. Args: data: A `float` tensor with shape `[A1, ..., An, V, C]`. neighbors: A SparseTensor with the same type as `data` and with shape `[A1, ..., An, V, V]` representing vertex neighborhoods. The neighborhood of a vertex defines the support region for convolution. For a mesh, a common choice for the neighborhood of vertex `i` would be the vertices in the K-ring of `i` (including `i` itself). Each vertex must have at least one neighbor. For a faithful implementation of the FeaStNet paper, neighbors should be a row-normalized weight matrix corresponding to the graph adjacency matrix with self-edges: `neighbors[A1, ..., An, i, j] > 0` if vertex `i` and `j` are neighbors, `neighbors[A1, ..., An, i, i] > 0` for all `i`, and `sum(neighbors, axis=-1)[A1, ..., An, i] == 1.0` for all `i`. These requirements are relaxed in this implementation. sizes: An `int` tensor of shape `[A1, ..., An]` indicating the true input sizes in case of padding (`sizes=None` indicates no padding). `sizes[A1, ..., An] <= V`. If `data` and `neighbors` are 2-D, `sizes` will be ignored. As an example, consider an input consisting of three graphs `G0`, `G1`, and `G2` with `V0`, `V1` and `V2` vertices respectively. The padded input would have the following shapes: `data.shape = [3, V, C]`, and `neighbors.shape = [3, V, V]`, where `V = max([V0, V1, V2])`. The true sizes of each graph will be specified by `sizes=[V0, V1, V2]`. `data[i, :Vi, :]` and `neighbors[i, :Vi, :Vi]` will be the vertex and neighborhood data of graph `Gi`. The `SparseTensor` `neighbors` should have no nonzero entries in the padded regions. translation_invariant: A `bool`. If `True` the assignment of features to weight matrices will be invariant to translation. num_weight_matrices: An `int` specifying the number of weight matrices used in the convolution. num_output_channels: An optional `int` specifying the number of channels in the output. If `None` then `num_output_channels = C`. initializer: An initializer for the trainable variables. name: A (name_scope) name for this op. Passed through to feature_steered_convolution(). var_name: A (var_scope) name for the variables. Defaults to `graph_convolution_feature_steered_convolution_weights`. Returns: Tensor with shape `[A1, ..., An, V, num_output_channels]`. """ # pyformat: enable with tf.compat.v1.variable_scope( var_name, default_name='graph_convolution_feature_steered_convolution_weights'): # Skips shape validation to avoid redundancy with # feature_steered_convolution(). data = tf.convert_to_tensor(value=data) in_channels = tf.compat.v1.dimension_value(data.shape[-1]) if num_output_channels is None: out_channels = in_channels else: out_channels = num_output_channels var_u = tf.compat.v1.get_variable( shape=(in_channels, num_weight_matrices), dtype=data.dtype, initializer=initializer, name='u') if translation_invariant: var_v = -var_u else: var_v = tf.compat.v1.get_variable( shape=(in_channels, num_weight_matrices), dtype=data.dtype, initializer=initializer, name='v') var_c = tf.compat.v1.get_variable( shape=(num_weight_matrices), dtype=data.dtype, initializer=initializer, name='c') var_w = tf.compat.v1.get_variable( shape=(num_weight_matrices, in_channels, out_channels), dtype=data.dtype, initializer=initializer, name='w') var_b = tf.compat.v1.get_variable( shape=(out_channels), dtype=data.dtype, initializer=initializer, name='b') return gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=sizes, var_u=var_u, var_v=var_v, var_c=var_c, var_w=var_w, var_b=var_b, name=name)
def test_feature_steered_convolution_only_self_edges(self, batch_size, num_vertices, in_channels, out_channels, num_weight_matrices): """Test convolution when the graph only has self edges.""" data, neighbors = _random_data( batch_size, num_vertices, in_channels, padding=False, only_self_edges=True) u, v, c, w, b = _random_variables(in_channels, out_channels, num_weight_matrices) with self.subTest(name="w=0_expect_output=b"): y = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=None, var_u=u, var_v=v, var_c=c, var_w=tf.zeros_like(w), var_b=b) y_expected = tf.broadcast_to(b, y.shape) self.assertAllEqual(y, y_expected) with self.subTest(name="translation_invariant_self_edges"): y = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=None, var_u=u, var_v=-u, var_c=c, var_w=w, var_b=b) q = tf.reshape( tf.exp(c) / tf.reduce_sum(input_tensor=tf.exp(c)), (num_weight_matrices, 1, 1)) if batch_size > 0: q_times_w = tf.reduce_sum(input_tensor=q * w, axis=0, keepdims=True) q_times_w = tf.tile(q_times_w, (batch_size, 1, 1)) else: q_times_w = tf.reduce_sum(input_tensor=q * w, axis=0) y_expected = tf.matmul(data, q_times_w) + tf.broadcast_to(b, y.shape) self.assertAllClose(y, y_expected) with self.subTest(name="constant_signal"): if batch_size > 0: constant_data = np.tile( np.random.uniform(size=(batch_size, 1, in_channels)).astype(np.float32), (1, num_vertices, 1)) else: constant_data = np.tile( np.random.uniform(size=(1, in_channels)).astype(np.float32), (num_vertices, 1)) y = gc.feature_steered_convolution( data=constant_data, neighbors=neighbors, sizes=None, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) if batch_size > 0: y_expected = tf.tile(y[:, :1, :], (1, num_vertices, 1)) else: y_expected = tf.tile(y[:1, :], (num_vertices, 1)) self.assertAllClose(y, y_expected)