Example #1
0
def set_abstraction(
    features,
    neighborhood,
    edge_mlp,
    reduction=tf.reduce_max,
    node_mlp=None,
    coords_as_features=True,
):
    """
    Reimplementation of original `pointnet_sa_module` function.

    Args:
        features: [b, n_i?, filters_in] float32 tensor of flattend batched point
            features.
        neighborhood: `deepcloud.neigh.Neighborhood` instance with `n_i` inputs
            and `n_o` output points.
        edge_mlp: callable acting on each edge features.
        reduction: operation to reduce neighborhoods to point features.
        node_mpl: callacble acting on each point after reduction.
        coords_as_features: if True, use relative coords in neighborhood as well
            as features in edges.

    Returns:
        features: [b, n_o?, filters_o] float32 array, where filters_o is the
          number of output features of `edge_mlp` if `node_mlp` is None else the
          number of output features of `node_mlp`.
    """
    def flat_rel_coords():
        return b.as_batched_model_input(
            neighborhood.rel_coords.flat_values).flat_values

    if features is None:
        features = flat_rel_coords()
    else:
        features = layer_utils.flatten_leading_dims(features)
        offset_batched_neighbors = neighborhood.offset_batched_neighbors
        if coords_as_features:
            features = tf.gather(features,
                                 offset_batched_neighbors.flat_values)
            features = layers.Lambda(tf.concat, arguments=dict(axis=-1))(
                [features, flat_rel_coords()])
        else:
            # more efficient than original implementation
            features = edge_mlp(features)
            features = tf.gather(features,
                                 offset_batched_neighbors.flat_values)

    # features is not flat, [B, f]
    features = tf.RaggedTensor.from_nested_row_splits(
        features, offset_batched_neighbors.nested_row_splits)
    # features is now [b, n_o?, k?, E]
    features = ragged_lambda(reduction, arguments=dict(axis=-2))(features)
    # features is now [b, n_o?, E]
    if node_mlp is not None:
        if isinstance(features, tf.RaggedTensor):
            features = tf.ragged.map_flat_values(node_mlp, features)
        else:
            features = node_mlp(features)

    return features
Example #2
0
 def global_conv(self, features, coord_features, row_splits, filters_out):
     return conv.mlp_edge_conv(
         features,
         utils.flatten_leading_dims(coord_features, 2),
         None,
         row_splits,
         lambda features: self._global_fn(features, filters_out),
         weights=None)
Example #3
0
 def global_conv(self, features, coord_features, row_splits, filters_out):
     features = conv.flat_expanding_edge_conv(
         features, utils.flatten_leading_dims(coord_features, 2), None,
         row_splits)
     if filters_out is not None:
         features = Dense(filters_out)(features)
     if self._global_activation is not None:
         features = self._global_activation(features)
     return features
Example #4
0
 def _batched_ragged(self, rt):
     if rt in self._batched_inputs_dict:
         return self._batched_inputs_dict[rt]
     size = self._batched_fixed_tensor(
         ragged_layers.ragged_lambda(lambda x: x.nrows())(rt))
     nested_row_lengths = ragged_layers.nested_row_lengths(rt)
     nested_row_lengths = [
         self._batched_tensor(rl) for rl in nested_row_lengths
     ]
     nested_row_lengths = [
         layer_utils.flatten_leading_dims(rl, 2)
         for rl in nested_row_lengths
     ]
     values = layer_utils.flatten_leading_dims(
         self._batched_tensor(rt.flat_values), 2)
     out = ragged_layers.ragged_from_nested_row_lengths(
         values, [size] + nested_row_lengths)
     self._batched_inputs_dict[rt] = out
     return out
Example #5
0
    def __call__(self, global_features, local_features, edge_features):
        """
        Args:
            global_features: [B, fg] float global features. Can be `None`.
            local_features: [N, fl] float flat local features, e.g. normals
            edge_features: [N, fe] float flat edge features or None.
                Since all nodes are connected here to a single 'global' node,
                this will have the same number of rows as `local_features` -
                and it is concatenated onto `local_features` immediately. It
                could be thought of differently however, e.g. as coordinates
                (possibly relative to the mean).

        Returns:
            flat local features, output of `network_fn`.
        """
        if edge_features is not None:
            if local_features is not None:
                local_features = tf.concat([local_features, edge_features],
                                           axis=-1)
            else:
                local_features = edge_features
        elif local_features is None:
            raise ValueError(
                'At least one of `local_features` or `edge_features` must be '
                'non-None')
        if global_features is None:
            local_features = self.dense_factory(
                self.initial_units)(local_features)
        else:
            local_features, global_features = block_dense(
                self.dense_factory, self.initial_units, local_features,
                global_features)

            if self.is_ragged:
                global_features = layer_utils.repeat(global_features,
                                                     self.row_lengths,
                                                     axis=0)

                local_features = tf.add_n([local_features, global_features])
            else:
                local_features = layer_utils.reshape_leading_dim(
                    local_features, (-1, self.k))
                global_features = tf.expand_dims(global_features, axis=-2)
                local_features = tf.math.add_n(
                    [local_features, global_features])
                local_features = layer_utils.flatten_leading_dims(
                    local_features)
        local_features = self.network_fn(local_features)

        return local_features
Example #6
0
def post_batch_map(
    features,
    labels,
    weights=None,
    #    include_outer_row_splits=False
):
    all_coords, rel_coords, feature_weights, node_indices, sample_indices = (
        features[k] for k in ('all_coords', 'rel_coords', 'feature_weights',
                              'node_indices', 'sample_indices'))

    row_splits = tf.nest.map_structure(lambda rt: rt.nested_row_splits[1],
                                       node_indices)

    all_coords, sample_indices = tf.nest.map_structure(
        ragged_batching.post_batch_ragged, (all_coords, sample_indices))
    offsets = [op_utils.get_row_offsets(c) for c in all_coords]
    flat_rel_coords = tf.nest.map_structure(lambda x: x.flat_values,
                                            rel_coords)
    feature_weights = tf.nest.map_structure(lambda x: x.flat_values,
                                            feature_weights)

    depth = (len(node_indices) + 1) // 2
    flat_node_indices = [
        layer_utils.apply_row_offset(node_indices[0], offsets[0]).flat_values
    ]
    flat_sample_indices = []
    for i in range(depth - 1):
        flat_node_indices.extend(
            (layer_utils.apply_row_offset(ni, offsets[i + 1]).flat_values
             for ni in node_indices[2 * i + 1:2 * i + 3]))

        flat_sample_indices.append(
            layer_utils.apply_row_offset(sample_indices[i],
                                         offsets[i]).flat_values)

    flat_node_indices = tuple(flat_node_indices)
    flat_sample_indices = tuple(flat_sample_indices)

    class_index = features.get('class_index')

    normals = features.get('normals')

    features = dict(
        all_coords=all_coords,
        flat_rel_coords=flat_rel_coords,
        flat_node_indices=flat_node_indices,
        feature_weights=feature_weights,
        row_splits=row_splits,
        sample_indices=flat_sample_indices,
    )
    # if include_outer_row_splits:
    #     features['outer_row_splits'] = tuple(
    #         op_utils.get_row_splits(c) for c in all_coords)
    if normals is not None:
        normals = ragged_batching.post_batch_ragged(normals)
        normals = layer_utils.flatten_leading_dims(normals)
        features['normals'] = normals

    if class_index is not None:
        features['class_index'] = class_index
    labels, weights = get_current_problem().post_batch_map(labels, weights)

    if isinstance(labels, tf.Tensor) and labels.shape.ndims == 2:
        assert (isinstance(weights, tf.Tensor) and weights.shape.ndims == 2)
        labels = tf.reshape(labels, (-1, ))
        weights = tf.reshape(weights, (-1, ))

    return ((features, labels) if weights is None else
            (features, labels, weights))
Example #7
0
def cls_head(coords,
             normals=None,
             r0=0.1,
             initial_filters=(16, ),
             initial_activation=cls_head_activation,
             filters=(32, 64, 128, 256),
             global_units='combined',
             query_fn=core.query_pairs,
             radii_fn=core.constant_radii,
             coords_transform=None,
             weights_transform=None,
             convolver=None):
    if convolver is None:
        convolver = c.ExpandingConvolver(activation=cls_head_activation)
    if coords_transform is None:
        coords_transform = t.polynomial_transformer(max_order=1)
    if weights_transform is None:
        weights_transform = t.ctg_transformer()
        # weights_transform = lambda *args, **kwargs: None

    n_res = len(filters)
    unscaled_radii2 = radii_fn(n_res)

    if isinstance(unscaled_radii2, tf.Tensor):
        assert (unscaled_radii2.shape == (n_res, ))
        radii2 = utils.lambda_call(tf.math.scalar_mul, r0**2, unscaled_radii2)
        radii2 = tf.keras.layers.Lambda(tf.unstack,
                                        arguments=dict(axis=0))(radii2)
        for i, radius2 in enumerate(radii2):
            tb.add_custom_scalar('radius{}'.format(i), tf.sqrt(radius2))
            # tf.compat.v1.summary.scalar('r%d' % i,
            #                             tf.sqrt(radius2),
            #                             family='radii')
    else:
        radii2 = unscaled_radii2 * (r0**2)

    def maybe_feed(r2, r20):
        if isinstance(r2, (tf.Tensor, tf.Variable)):
            r = tf.keras.layers.Lambda(tf.sqrt)(radius2)
            return cache.get_cached(r, r20)
        else:
            return np.sqrt(r2)

    features = b.as_batched_model_input(normals)
    for f in initial_filters:
        layer = Dense(f)
        features = tf.ragged.map_flat_values(layer, features)
        features = tf.ragged.map_flat_values(initial_activation, features)

    features = utils.flatten_leading_dims(features, 2)
    global_features = []

    default_r0 = r0
    for i, radius2 in enumerate(radii2):
        neighbors, sample_rate = query_fn(coords,
                                          maybe_feed(radius2, default_r0**2),
                                          name='query%d' % i)
        default_r0 *= 2
        if not isinstance(radius2, tf.Tensor):
            radius2 = source.constant(radius2, dtype=tf.float32)
        neighborhood = n.InPlaceNeighborhood(coords, neighbors)
        features, nested_row_splits = core.convolve(features, radius2,
                                                    filters[i], neighborhood,
                                                    coords_transform,
                                                    weights_transform,
                                                    convolver.in_place_conv)
        if global_units == 'combined':
            coord_features = coords_transform(neighborhood.out_coords, None)
            global_features.append(
                convolver.global_conv(features, coord_features,
                                      nested_row_splits[-2], filters[i]))

        if i < n_res - 1:
            sample_indices = sample.sample(
                sample_rate,
                tf.keras.layers.Lambda(lambda s: tf.size(s) // 4)(sample_rate))
            neighborhood = n.SampledNeighborhood(neighborhood, sample_indices)
            features, nested_row_splits = core.convolve(
                features, radius2, filters[i + 1], neighborhood,
                coords_transform, weights_transform, convolver.resample_conv)

            coords = neighborhood.out_coords

    # global_conv
    if global_units == 'combined':
        features = tf.keras.layers.Lambda(
            tf.concat, arguments=dict(axis=-1))(global_features)
    else:
        coord_features = coords_transform(coords, None)
        features = convolver.global_conv(features, coord_features,
                                         nested_row_splits[-2], global_units)

    return features