Exemple #1
0
    def validate_model_independence(self, labels, log_probs, task_parameters):
        """Partition gradients into those assumed active and inactive."""
        num_task_parameters = len(task_parameters)
        # pylint: disable=g-complex-comprehension
        on_gradients = [[
            tf.norm(tensor=on_gradient) for on_gradient in on_gradients
        ] for on_gradients in [
            tf.gradients(ys=tf.gather(log_probs,
                                      tf.compat.v1.where(tf.equal(labels, i))),
                         xs=task_parameters[i * num_task_parameters:(i + 1) *
                                            num_task_parameters])
            for i in range(1)
        ]]
        off_gradients = [[
            tf.norm(tensor=off_gradient) for off_gradient in off_gradients
        ] for off_gradients in [
            tf.gradients(ys=tf.gather(log_probs,
                                      tf.compat.v1.where(tf.equal(labels, i))),
                         xs=task_parameters[i * num_task_parameters:(i + 1) *
                                            num_task_parameters])
            for i in range(1)
        ]]
        # pylint: enable=g-complex-comprehension

        return (list(itertools.chain.from_iterable(on_gradients)),
                list(itertools.chain.from_iterable(off_gradients)))
  def projection_dist(states):
    inner = tf.multiply(states - starting_states, goals - starting_states)
    upper = tf.reduce_sum(inner, -1)
    sign = tf.sign(upper)
    
    result = tf.math.divide(upper, tf.norm(goals - starting_states, ord=2))

    term_1 = tf.norm(states - starting_states, 2)
   
    
    return -1*term_1+result
  def normalized_dist(states):
    inner = tf.multiply(states - starting_states, goals - starting_states)
    upper = tf.reduce_sum(inner, -1)
    sign = tf.sign(upper)
    
    result = sign * tf.square(tf.math.divide(upper, tf.norm(goals - starting_states, ord=2)))

    term_1 = tf.square(tf.norm(states - starting_states, 2))
    term_2 = tf.square(tf.math.divide(upper, tf.norm(goals - starting_states, ord=2)))
    
    return tf.sqrt(epsilon + tf.abs(result - alpha * (term_1 - term_2)))
def _prepare_lidar_points(inputs, lidar_names):
    """Integrates and returns the lidar points in vehicle coordinate frame."""
    points_position = []
    points_intensity = []
    points_elongation = []
    points_normal = []
    points_in_image_frame_xy = []
    points_in_image_frame_id = []
    for lidar_name in lidar_names:
        lidar_location = tf.reshape(
            inputs[('lidars/%s/extrinsics/t') % lidar_name], [-1, 3])
        inside_no_label_zone = tf.reshape(
            inputs[('lidars/%s/pointcloud/inside_nlz' % lidar_name)], [-1])
        valid_points_mask = tf.math.logical_not(inside_no_label_zone)
        points_position_current_lidar = tf.boolean_mask(
            inputs[('lidars/%s/pointcloud/positions' % lidar_name)],
            valid_points_mask)
        points_position.append(points_position_current_lidar)
        points_intensity.append(
            tf.boolean_mask(
                inputs[('lidars/%s/pointcloud/intensity' % lidar_name)],
                valid_points_mask))
        points_elongation.append(
            tf.boolean_mask(
                inputs[('lidars/%s/pointcloud/elongation' % lidar_name)],
                valid_points_mask))
        points_to_lidar_vectors = lidar_location - points_position_current_lidar
        points_normal_direction = points_to_lidar_vectors / tf.expand_dims(
            tf.norm(points_to_lidar_vectors, axis=1), axis=1)
        points_normal.append(points_normal_direction)
        points_in_image_frame_xy.append(
            tf.boolean_mask(
                inputs['lidars/%s/camera_projections/positions' % lidar_name],
                valid_points_mask))
        points_in_image_frame_id.append(
            tf.boolean_mask(
                inputs['lidars/%s/camera_projections/ids' % lidar_name],
                valid_points_mask))
    points_position = tf.concat(points_position, axis=0)
    points_intensity = tf.concat(points_intensity, axis=0)
    points_elongation = tf.concat(points_elongation, axis=0)
    points_normal = tf.concat(points_normal, axis=0)
    points_in_image_frame_xy = tf.concat(points_in_image_frame_xy, axis=0)
    points_in_image_frame_id = tf.cast(tf.concat(points_in_image_frame_id,
                                                 axis=0),
                                       dtype=tf.int32)
    points_in_image_frame_yx = tf.cast(tf.reverse(points_in_image_frame_xy,
                                                  axis=[-1]),
                                       dtype=tf.int32)

    return (points_position, points_intensity, points_elongation,
            points_normal, points_in_image_frame_yx, points_in_image_frame_id)
Exemple #5
0
  def proto_maml_fc_bias(self, prototypes, zero_pad_to_max_way=False):
    """Computes the Prototypical MAML fc layer's bias.

    Args:
      prototypes: Tensor of shape [num_classes, embedding_size]
      zero_pad_to_max_way: Whether to zero padd to max num way.

    Returns:
      fc_bias: Tensor of shape [num_classes] or [self.logit_dim]
        when zero_pad_to_max_way is True.
    """
    fc_bias = -tf.square(tf.norm(prototypes, axis=1))
    if zero_pad_to_max_way:
      paddings = [[0, self.logit_dim - tf.shape(fc_bias)[0]]]
      fc_bias = tf.pad(fc_bias, paddings, 'CONSTANT', constant_values=0)
    return fc_bias
    def compute_logits(self, support_embeddings, query_embeddings,
                       onehot_support_labels):
        """Computes the class logits for the episode.

    Args:
      support_embeddings: A Tensor of size [num_support_images, embedding dim].
      query_embeddings: A Tensor of size [num_query_images, embedding dim].
      onehot_support_labels: A Tensor of size [batch size, way].

    Returns:
      The query set logits as a [num_query_images, way] matrix.

    Raises:
      ValueError: Distance must be one of l2 or cosine.
    """

        if self.knn_in_fc:
            # Recompute the support and query embeddings that were originally computed
            # in self.forward_pass() to be the fc layer activations.
            support_embeddings = self.forward_pass_fc(support_embeddings)
            query_embeddings = self.forward_pass_fc(query_embeddings)

        # ------------------------ K-NN look up -------------------------------
        # For each testing example in an episode, we use its embedding
        # vector to look for the closest neighbor in all the training examples'
        # embeddings from the same episode and then assign the training example's
        # class label to the testing example as the predicted class label for it.
        if self.distance == 'l2':
            #  [1, num_support, embed_dims]
            support_embeddings = tf.expand_dims(support_embeddings, axis=0)
            #  [num_query, 1, embed_dims]
            query_embeddings = tf.expand_dims(query_embeddings, axis=1)
            #  [num_query, num_support]
            distance = tf.norm(query_embeddings - support_embeddings, axis=2)
        elif self.distance == 'cosine':
            support_embeddings = tf.nn.l2_normalize(support_embeddings, axis=1)
            query_embeddings = tf.nn.l2_normalize(query_embeddings, axis=1)
            distance = -1 * tf.matmul(
                query_embeddings, support_embeddings, transpose_b=True)
        else:
            raise ValueError('Distance must be one of l2 or cosine.')
        #  [num_query]
        _, indices = tf.nn.top_k(-distance, k=1)
        indices = tf.squeeze(indices, axis=1)
        #  [num_query, num_classes]
        query_logits = tf.gather(onehot_support_labels, indices)
        return query_logits
Exemple #7
0
def points_to_normals_unbatched(points,
                                k,
                                distance_upper_bound,
                                viewpoint=None,
                                noise_magnitude=1e-4,
                                method='pca'):
    """Computes normals for the points in a point cloud.

  Args:
    points: A tf.float32 tensor of size [N, 3].
    k: An integer determining the size of the neighborhood.
    distance_upper_bound: Maximum distance of the neighbor points. If None, it
      will not add a cap on the distance.
    viewpoint: A tf.float32 tensor of size [3]. Normals will be flipped to point
      towards view point. If None, it won't be used.
    noise_magnitude: Noise magnitude to be added to the input of svd. If None,
      it won't add noise.
    method: The normal prediction method, options are `pca` and `cross` (cross
      product).

  Returns:
    normals: A tf.float32 tensor of size [N, 3].
  """
    if method == 'pca':
        if k <= 3:
            raise ValueError(
                'At least 3 neighbors are required for computing PCA.')
    elif method == 'cross':
        if k <= 2:
            raise ValueError(
                'At least 2 neighbors are required for computing cross.')
    else:
        raise ValueError(('Unknown method of normal prediction %s' % method))
    n = tf.shape(points)[0]
    d = points.get_shape().as_list()[1]
    if d != 3:
        raise ValueError('Points dimension is not 3.')
    _, knn_adjacencies = knn_graph_from_points_unbatched(
        points=points, k=k, distance_upper_bound=distance_upper_bound)
    knn_adjacencies = knn_adjacencies[:, 1:]
    knn_adjacencies = tf.reshape(knn_adjacencies, [n * (k - 1)])
    adjacency_points = tf.gather(points, indices=knn_adjacencies)
    adjacency_points = tf.reshape(adjacency_points, [n, (k - 1), d])
    if method == 'pca':
        adjacency_relative_points = adjacency_points - tf.expand_dims(points,
                                                                      axis=1)
        if noise_magnitude is not None:
            adjacency_relative_points += tf.random.uniform(
                tf.shape(adjacency_relative_points),
                minval=-noise_magnitude,
                maxval=noise_magnitude,
                dtype=tf.float32)
        _, _, v = tf.linalg.svd(adjacency_relative_points)
        normals = v[:, 2, :]
    elif method == 'cross':
        v1 = adjacency_points[:, 0, :] - points
        v2 = adjacency_points[:, 1, :] - points
        normals = tf.linalg.cross(v1, v2)
        normals_length = tf.expand_dims(tf.norm(normals, axis=1), axis=1)
        if noise_magnitude is not None:
            normals_length += noise_magnitude
        normals /= normals_length
    else:
        raise ValueError(('Unknown method of normal prediction %s' % method))
    if viewpoint is not None:
        normals = flip_normals_towards_viewpoint(points=points,
                                                 normals=normals,
                                                 viewpoint=viewpoint)
    return normals
Exemple #8
0
 def _gen_norm(cls, y, y_hat):
     return tf.norm(y - y_hat)
Exemple #9
0
def prepare_waymo_open_dataset(inputs,
                               valid_object_classes=None,
                               max_object_distance_from_source=74.88):
  """Maps the fields from loaded input to standard fields.

  Args:
    inputs: A dictionary of input tensors.
    valid_object_classes: List of valid object classes. if None, it is ignored.
    max_object_distance_from_source: Maximum distance of objects from source. It
      will be ignored if None.

  Returns:
    A dictionary of input tensors with standard field names.
  """
  prepared_inputs = {}
  if standard_fields.InputDataFields.point_positions in inputs:
    prepared_inputs[standard_fields.InputDataFields.point_positions] = inputs[
        standard_fields.InputDataFields.point_positions]
  if standard_fields.InputDataFields.point_intensities in inputs:
    prepared_inputs[standard_fields.InputDataFields.point_intensities] = inputs[
        standard_fields.InputDataFields.point_intensities]
  if standard_fields.InputDataFields.point_elongations in inputs:
    prepared_inputs[standard_fields.InputDataFields.point_elongations] = inputs[
        standard_fields.InputDataFields.point_elongations]
  if standard_fields.InputDataFields.point_normals in inputs:
    prepared_inputs[standard_fields.InputDataFields.point_normals] = inputs[
        standard_fields.InputDataFields.point_normals]
  if 'cameras/front/intrinsics/K' in inputs:
    prepared_inputs[standard_fields.InputDataFields
                    .camera_intrinsics] = inputs['cameras/front/intrinsics/K']
  if 'cameras/front/extrinsics/R' in inputs:
    prepared_inputs[
        standard_fields.InputDataFields
        .camera_rotation_matrix] = inputs['cameras/front/extrinsics/R']
  if 'cameras/front/extrinsics/t' in inputs:
    prepared_inputs[standard_fields.InputDataFields
                    .camera_translation] = inputs['cameras/front/extrinsics/t']
  if 'cameras/front/image' in inputs:
    prepared_inputs[standard_fields.InputDataFields
                    .camera_image] = inputs['cameras/front/image']
    prepared_inputs[standard_fields.InputDataFields
                    .camera_raw_image] = inputs['cameras/front/image']
    prepared_inputs[standard_fields.InputDataFields
                    .camera_original_image] = inputs['cameras/front/image']
  if 'scene_name' in inputs and 'frame_name' in inputs:
    prepared_inputs[
        standard_fields.InputDataFields.camera_image_name] = tf.strings.join(
            [inputs['scene_name'], inputs['frame_name']], separator='_')
  if 'objects/pose/R' in inputs:
    prepared_inputs[standard_fields.InputDataFields
                    .objects_rotation_matrix] = inputs['objects/pose/R']
  if 'objects/pose/t' in inputs:
    prepared_inputs[standard_fields.InputDataFields
                    .objects_center] = inputs['objects/pose/t']
  if 'objects/shape/dimension' in inputs:
    prepared_inputs[
        standard_fields.InputDataFields.objects_length] = tf.reshape(
            inputs['objects/shape/dimension'][:, 0], [-1, 1])
    prepared_inputs[standard_fields.InputDataFields.objects_width] = tf.reshape(
        inputs['objects/shape/dimension'][:, 1], [-1, 1])
    prepared_inputs[
        standard_fields.InputDataFields.objects_height] = tf.reshape(
            inputs['objects/shape/dimension'][:, 2], [-1, 1])
  if 'objects/category/label' in inputs:
    prepared_inputs[standard_fields.InputDataFields.objects_class] = tf.reshape(
        inputs['objects/category/label'], [-1, 1])
  if valid_object_classes is not None:
    valid_objects_mask = tf.cast(
        tf.zeros_like(
            prepared_inputs[standard_fields.InputDataFields.objects_class],
            dtype=tf.int32),
        dtype=tf.bool)
    for object_class in valid_object_classes:
      valid_objects_mask = tf.logical_or(
          valid_objects_mask,
          tf.equal(
              prepared_inputs[standard_fields.InputDataFields.objects_class],
              object_class))
    valid_objects_mask = tf.reshape(valid_objects_mask, [-1])
    for key in standard_fields.get_input_object_fields():
      if key in prepared_inputs:
        prepared_inputs[key] = tf.boolean_mask(prepared_inputs[key],
                                               valid_objects_mask)

  if max_object_distance_from_source is not None:
    if standard_fields.InputDataFields.objects_center in prepared_inputs:
      object_distances = tf.norm(
          prepared_inputs[standard_fields.InputDataFields.objects_center][:,
                                                                          0:2],
          axis=1)
      valid_mask = tf.less(object_distances, max_object_distance_from_source)
      for key in standard_fields.get_input_object_fields():
        if key in prepared_inputs:
          prepared_inputs[key] = tf.boolean_mask(prepared_inputs[key],
                                                 valid_mask)

  return prepared_inputs