コード例 #1
0
ファイル: edge.py プロジェクト: jackd/deep-cloud
def distribute_node_features(node_features_a, node_features_b,
                             flat_edge_features, flat_node_indices,
                             row_splits):
    """
    Distribute node features amongst edges.

    Args:
        node_features_a: [n_a, f] float tensor of node features in set `a`.
        node_features_b: [n_b, f] float_tensor of node_features in set `b`.
        flat_edge_features: [n_e, f] float_tensor of edge features.
        flat_node_indices: indices of set `b` in ragged ordering of set `a`.
        row_splits: to form ragged ordering with flat_node_indices.

    Returns:
        [n_e, f] float resulting from adding the distributed node features
            to the existing edge features.
    """
    # No short-cut for symmetric version.
    from more_keras.ops import utils
    assert_flat_tensor('node_features_a', node_features_a, 2, FLOAT_TYPES)
    assert_flat_tensor('node_features_b', node_features_b, 2, FLOAT_TYPES)
    assert_flat_tensor('flat_edge_features', flat_edge_features, 2,
                       FLOAT_TYPES)
    assert_flat_tensor('flat_node_indices', flat_node_indices, 1, INT_TYPES)
    assert_flat_tensor('row_splits', row_splits, 1, INT_TYPES)
    node_features_a = op_utils.repeat(node_features_a,
                                      utils.diff(row_splits),
                                      axis=0)
    node_features_b = tf.gather(node_features_b, flat_node_indices)
    return tf.add_n([node_features_a, node_features_b, flat_edge_features])
コード例 #2
0
ファイル: utils_test.py プロジェクト: jackd/more-keras
 def test_diff(self):
     dims = (4, 5, 6)
     x = np.arange(np.prod(dims)).reshape(dims)
     axes = 0, 1, 2, -1, -2, -3
     ns = 1, 2
     for axis in axes:
         for n in ns:
             expected = np.diff(x, n=n, axis=axis)
             actual = self.evaluate(utils.diff(x, n=n, axis=axis))
             self.assertAllClose(actual, expected)
コード例 #3
0
 def __init__(self,
              row_splits_or_k,
              initial_units,
              network_fn,
              dense_factory=Dense):
     self.is_ragged = row_splits_or_k.shape.ndims
     if self.is_ragged:
         self.row_lengths = op_utils.diff(row_splits_or_k)
     else:
         self.k = row_splits_or_k
     self.network_fn = network_fn
     self.initial_units = initial_units
     self.dense_factory = dense_factory
コード例 #4
0
ファイル: conv.py プロジェクト: jackd/deep-cloud
def _reduce_unweighted_flat_mean(x, row_splits_or_k, eps):
    if row_splits_or_k.shape.ndims == 0:
        # constant neighborhood size
        x = utils.reshape_leading_dim(x, (-1, row_splits_or_k))
        _assert_is_rank(3, x, 'x')
        denom = tf.cast(row_splits_or_k, x.dtype)
    else:
        # ragged
        x = tf.RaggedTensor.from_row_splits(x, row_splits_or_k)
        assert (x.shape.ndims == 3)
        denom = tf.expand_dims(tf.cast(utils.diff(row_splits_or_k), x.dtype),
                               axis=-1)
        assert (denom.shape.ndims == 2)

    if eps is not None:
        denom += eps
    return tf.reduce_sum(x, axis=1) / denom
コード例 #5
0
def very_dense_semantic_segmenter(input_spec,
                                  output_spec,
                                  dense_factory=Dense,
                                  features_fn=very_dense_features):
    num_classes = output_spec.shape[-1]
    inputs = spec.inputs(input_spec)
    class_masks = inputs.pop('class_masks', None)
    node_features, edge_features, global_features = features_fn(inputs)
    del edge_features, global_features
    node_features = [nf[0] for nf in node_features]  # high res features
    preds = [dense_factory(num_classes)(n) for n in node_features]
    if_false = tf.fill(tf.shape(preds[0]),
                       value=tf.constant(-np.inf, dtype=tf.float32))
    outer_row_splits = inputs['outer_row_splits'][0]
    outer_row_lengths = op_utils.diff(outer_row_splits)
    if class_masks is not None:
        class_masks = tf.repeat(class_masks, outer_row_lengths, axis=0)
        preds = [tf.where(class_masks, pred, if_false) for pred in preds]
    # from_row_splits = tf.keras.layers.Lambda(_from_row_splits)
    # preds = [from_row_splits([pred, outer_row_splits]) for pred in preds]
    inputs = tf.nest.flatten(inputs)
    return tf.keras.Model(inputs=inputs, outputs=preds)
コード例 #6
0
ファイル: conv.py プロジェクト: jackd/deep-cloud
def flat_expanding_global_deconv(global_features, coord_features,
                                 row_splits_or_k):
    """
    Global deconvolution operation.

    Args:
        global_features: [pi, fi]
        coord_features: [po, fk]
        row_splits_or_k: [pi+1]

    Returns:
        convolved features: [po, fi*fk]
    """
    if row_splits_or_k.shape.ndims == 0:
        raise NotImplementedError

    global_features = utils.repeat(global_features,
                                   utils.diff(row_splits_or_k),
                                   axis=0)
    merged = utils.outer(global_features, coord_features)
    merged = utils.flatten_final_dims(merged, 2)
    return merged
コード例 #7
0
def multi_pointnet_transpose(node_layer,
                             coord_layer,
                             activation,
                             node_features,
                             coord_features,
                             indices,
                             row_splits,
                             gather_first=False):
    row_lengths = op_utils.diff(row_splits)
    if gather_first:
        node_features = repeat(node_features, row_lengths, axis=0)
        node_features = node_layer(node_features)
    else:
        node_features = node_layer(node_features)
        node_features = repeat(node_features, row_lengths, axis=0)
    coord_features = coord_layer(coord_features)
    features = node_features + coord_features
    if activation is not None:
        features = activation(features)
    features = tf.math.unsorted_segment_max(
        features, indices, num_segments=tf.reduce_max(indices) + 1)
    return features
コード例 #8
0
def ragged_convolution(activation,
                       layer,
                       node_features,
                       coord_features,
                       indices,
                       row_splits,
                       weights=None,
                       gather_first=False):
    coord_dims = coord_features.shape[-1]
    assert (coord_dims is not None)
    units = layer.units // coord_dims
    row_lengths = op_utils.diff(row_splits)
    if gather_first:
        node_features = repeat(node_features, row_lengths, axis=0)
        node_features = layer(node_features)
    else:
        node_features = layer(node_features)
        node_features = repeat(node_features, row_lengths, axis=0)

    node_features = tf.reshape(node_features, (-1, units, coord_dims))
    return _ragged_conv_combine(activation, node_features, coord_features,
                                indices, weights)