예제 #1
0
  def update_state(self, inputs, outputs):
    """Function that updates the metric state at each example.

    Args:
      inputs: A dictionary containing input tensors.
      outputs: A dictionary containing output tensors.

    Returns:
      Update op.
    """
    detections_score = tf.reshape(
        outputs[standard_fields.DetectionResultFields.objects_score], [-1])
    detections_class = tf.reshape(
        outputs[standard_fields.DetectionResultFields.objects_class], [-1])
    num_detections = tf.shape(detections_score)[0]
    detections_instance_mask = tf.reshape(
        outputs[
            standard_fields.DetectionResultFields.instance_segments_voxel_mask],
        [num_detections, -1])
    gt_class = tf.reshape(inputs[standard_fields.InputDataFields.objects_class],
                          [-1])
    num_gt = tf.shape(gt_class)[0]
    gt_voxel_instance_ids = tf.reshape(
        inputs[standard_fields.InputDataFields.object_instance_id_voxels], [-1])
    gt_instance_masks = tf.transpose(
        tf.one_hot(gt_voxel_instance_ids - 1, depth=num_gt, dtype=tf.float32))
    for c in self.class_range:
      gt_mask_c = tf.equal(gt_class, c)
      num_gt_c = tf.math.reduce_sum(tf.cast(gt_mask_c, dtype=tf.int32))
      gt_instance_masks_c = tf.boolean_mask(gt_instance_masks, gt_mask_c)
      detections_mask_c = tf.equal(detections_class, c)
      num_detections_c = tf.math.reduce_sum(
          tf.cast(detections_mask_c, dtype=tf.int32))
      if num_detections_c == 0:
        continue
      det_scores_c = tf.boolean_mask(detections_score, detections_mask_c)
      det_instance_mask_c = tf.boolean_mask(detections_instance_mask,
                                            detections_mask_c)
      det_scores_c, sorted_indices = tf.math.top_k(
          det_scores_c, k=num_detections_c)
      det_instance_mask_c = tf.gather(det_instance_mask_c, sorted_indices)
      tp_c = tf.zeros([num_detections_c], dtype=tf.int32)
      if num_gt_c > 0:
        ious_c = instance_segmentation_utils.points_mask_iou(
            masks1=gt_instance_masks_c, masks2=det_instance_mask_c)
        max_overlap_gt_ids = tf.cast(
            tf.math.argmax(ious_c, axis=0), dtype=tf.int32)
        is_gt_box_detected = tf.zeros([num_gt_c], dtype=tf.int32)
        for i in tf.range(num_detections_c):
          gt_id = max_overlap_gt_ids[i]
          if (ious_c[gt_id, i] > self.iou_threshold and
              is_gt_box_detected[gt_id] == 0):
            tp_c = tf.maximum(
                tf.one_hot(i, num_detections_c, dtype=tf.int32), tp_c)
            is_gt_box_detected = tf.maximum(
                tf.one_hot(gt_id, num_gt_c, dtype=tf.int32), is_gt_box_detected)
      self.tp[c] = tf.concat([self.tp[c], tp_c], axis=0)
      self.scores[c] = tf.concat([self.scores[c], det_scores_c], axis=0)
      self.num_gt[c] += num_gt_c
    return tf.no_op()
예제 #2
0
파일: ppo.py 프로젝트: XinyiYS/reaver
    def loss_fn(self, policy=None, value=None):
        adv = tf.placeholder(tf.float32, [None], name="advantages")
        returns = tf.placeholder(tf.float32, [None], name="returns")
        logli_old = tf.placeholder(tf.float32, [None], name="logli_old")
        value_old = tf.placeholder(tf.float32, [None], name="value_old")

        if not self.subenvs:
            ratio = tf.exp(self.policy.logli - logli_old)
            clipped_ratio = tf.clip_by_value(ratio, 1 - self.clip_ratio,
                                             1 + self.clip_ratio)

            value_err = (self.value - returns)**2
            if self.clip_value > 0.0:
                clipped_value = tf.clip_by_value(self.value,
                                                 value_old - self.clip_value,
                                                 value_old + self.clip_value)
                clipped_value_err = (clipped_value - returns)**2
                value_err = tf.maximum(value_err, clipped_value_err)

            policy_loss = -tf.reduce_mean(
                tf.minimum(adv * ratio, adv * clipped_ratio))
            value_loss = tf.reduce_mean(value_err) * self.value_coef
            entropy_loss = tf.reduce_mean(
                self.policy.entropy) * self.entropy_coef

        else:
            assert policy is not None and value is not None, "Missing variables representing <policy> and <value>"

            ratio = tf.exp(policy.logli - logli_old)
            clipped_ratio = tf.clip_by_value(ratio, 1 - self.clip_ratio,
                                             1 + self.clip_ratio)

            value_err = (value - returns)**2
            if self.clip_value > 0.0:
                clipped_value = tf.clip_by_value(value,
                                                 value_old - self.clip_value,
                                                 value_old + self.clip_value)
                clipped_value_err = (clipped_value - returns)**2
                value_err = tf.maximum(value_err, clipped_value_err)

            policy_loss = -tf.reduce_mean(
                tf.minimum(adv * ratio, adv * clipped_ratio))
            value_loss = tf.reduce_mean(value_err) * self.value_coef
            entropy_loss = tf.reduce_mean(policy.entropy) * self.entropy_coef

        # we want to reduce policy and value errors, and maximize entropy
        # but since optimizer is minimizing the signs are opposite
        full_loss = policy_loss + value_loss - entropy_loss

        return full_loss, [policy_loss, value_loss,
                           entropy_loss], [adv, returns, logli_old, value_old]
예제 #3
0
def crop_and_pad_voxels(voxels, start_coordinates, end_coordinates):
    """Crops a voxel region and pads past the boundaries with zeros.

  This accepts start and end coordinates past the limits of the voxel grid,
  and uses it to calculate how much top/left/right/bottom padding to add.

  Args:
    voxels: A tf.float32 tensor of shape [x, y, z, f] to crop
    start_coordinates: A list of len 4 with the [x, y, z, f] starting location
      of our crop. This can be negative, which indicates left/top padding.
    end_coordinates: A list of len 4 with the [x, y, z, f] ending location of
      our crop. This can be beyond the size of the voxel tensor, which indicates
      padding.

  Returns:
    cropped_and_padded_voxels: A voxel grid with shape
      [end_coordinates[0] - start_coordinates[0],
       end_coordinates[1] - start_coordinates[1],
       end_coordinates[2] - start_coordinates[2],
       end_coordinates[3] - start_coordinates[3]]
  Raises:
    ValueError: If requested crop and pad is outside the bounds of what the
      function supports.
  """
    if len(start_coordinates) != 4:
        raise ValueError('start_coordinates should be of length 4')
    if len(end_coordinates) != 4:
        raise ValueError('end_coordinates should be of length 4')
    if any([coord <= 0 for coord in end_coordinates]):
        raise ValueError('Requested end coordinates should be > 0')

    start_coordinates = tf.convert_to_tensor(start_coordinates, tf.int32)
    end_coordinates = tf.convert_to_tensor(end_coordinates, tf.int32)

    # Clip the coordinates to within the voxel grid
    clipped_start_coordinates = tf.maximum(0, start_coordinates)
    clipped_end_coordinates = tf.minimum(voxels.shape, end_coordinates)

    cropped_voxels = tf.slice(voxels,
                              begin=clipped_start_coordinates,
                              size=(clipped_end_coordinates -
                                    clipped_start_coordinates))

    top_and_left_padding = tf.maximum(0, -start_coordinates)
    bottom_and_right_padding = tf.maximum(0, end_coordinates - voxels.shape)

    padding = tf.stack([top_and_left_padding, bottom_and_right_padding],
                       axis=1)
    return tf.pad(cropped_voxels, padding)
예제 #4
0
파일: utils.py 프로젝트: novaintrovert/mesh
def learning_rate_schedule_noam(train_steps,
                                warmup_steps=10000,
                                linear_decay_fraction=0.1,
                                multiplier=1.0):
    """Noam's favorite learning-rate schedule.

  (rsqrt(max(step_num, warmup_steps))
   * multiplier
   * min(1.0, (train_steps-step_num)/(train_steps*linear_decay_fraction)))

  Args:
    train_steps: a number
    warmup_steps: a number
    linear_decay_fraction: a number
    multiplier: a number
  Returns:
    a tf.scalar
  """
    train_steps = float(train_steps)
    step_num = tf.cast(tf.get_global_step(), tf.float32)
    learning_rate = tf.math.rsqrt(tf.maximum(step_num, warmup_steps))
    learning_rate *= multiplier
    if linear_decay_fraction > 0:
        learning_rate *= tf.minimum(1.0, (train_steps - step_num) /
                                    (train_steps * linear_decay_fraction))
    return learning_rate
예제 #5
0
def quantile_loss(y, y_hat, k=4):
    k = np.linspace(0., 1., k)
    loss = 0.
    y = tf.squeeze(y, axis=2)
    for idx, q in enumerate(k):
        error = tf.subtract(y, y_hat[:, :, idx])
        loss += tf.reduce_mean(tf.maximum(q * error, (q - 1) / error), axis=-1)
    return tf.reduce_mean(loss)
예제 #6
0
def compute_kitti_difficulty(boxes, occlusions, truncations, image_height):
    """Computes box difficulty as Hard(1), Moderate(2), Easy(3) or 0 (Super hard).

  Easy: height >=40 Px, occlusion <= 0, truncation <= 0.15
  Moderate: height >=25 Px, occlusion <= 1, truncation <= 0.30
  Hard: height >=25 Px, occlusion <= 2, truncation <= 0.50

  Note that 'Hard' box is also 'Moderate' and 'Easy'.

  Returns a (N, 1) tensor containing object difficulty with following labelmap:
    0: SuperHard
    1: Hard
    2: Moderate
    3: Easy

  TODO(abhijitkundu): Since difficulty level is very specific to kitti, this
  function should be in kitti evaluation rather than detection preprocessor.

  Args:
    boxes: (N, 4) tensor of 2d boxes with [ymin, xmin, ymax, xmax] each row.
    occlusions: (N, 1) tensor containing box occlusion level
    truncations: (N, 1) tensor containing box truncation level
    image_height: Image height.

  Returns:
  A (N, 1) int32 tensor containing per box difficulty labels with 0 (SuperHard),
  1 (Hard), 2 (Moderate) and 3 (Easy).
  """
    # box heights in pixels
    heights = tf.reshape(
        (boxes[:, 2] - boxes[:, 0]), [-1, 1]) * tf.cast(image_height,
                                                        dtype=tf.float32)

    # compute binary masks for each difficulty level
    is_easy = (heights >= 40.0) & (occlusions <= 0) & (truncations <= 0.15)
    is_moderate = (heights >= 25.0) & (occlusions <= 1) & (truncations <= 0.30)
    is_hard = (heights >= 25.0) & (occlusions <= 2) & (truncations <= 0.50)

    # set difficulty map
    difficulty = tf.maximum(
        tf.maximum(
            tf.cast(is_hard, dtype=tf.int32) * ObjectDifficulty.HARD,
            tf.cast(is_moderate, dtype=tf.int32) * ObjectDifficulty.MODERATE),
        tf.cast(is_easy, dtype=tf.int32) * ObjectDifficulty.EASY)
    return difficulty
예제 #7
0
def pointcloud_to_voxel_grid(points,
                             features,
                             grid_cell_size,
                             start_location,
                             end_location,
                             segment_func=tf.math.unsorted_segment_mean):
  """Converts a pointcloud into a voxel grid.

  Args:
    points: A tf.float32 tensor of size [N, 3].
    features: A tf.float32 tensor of size [N, F].
    grid_cell_size: A tf.float32 tensor of size [3].
    start_location: A tf.float32 tensor of size [3].
    end_location: A tf.float32 tensor of size [3].
    segment_func: A tensorflow function that operates on segments. Expect one
      of tf.math.unsorted_segment_{min/max/mean/prod/sum}. Defaults to
      tf.math.unsorted_segment_mean

  Returns:
    voxel_features: A tf.float32 tensor of
      size [grid_x_len, grid_y_len, grid_z_len, F].
    segment_ids: A tf.int32 tensor of IDs for each point indicating
      which (flattened) voxel cell its data was mapped to.
    point_indices: A tf.int32 tensor of size [num_points, 3] containing the
      location of each point in the 3d voxel grid.
  """
  grid_cell_size = tf.convert_to_tensor(grid_cell_size, dtype=tf.float32)
  start_location = tf.convert_to_tensor(start_location, dtype=tf.float32)
  end_location = tf.convert_to_tensor(end_location, dtype=tf.float32)
  point_indices = tf.cast(
      (points - tf.expand_dims(start_location, axis=0)) /
      tf.expand_dims(grid_cell_size, axis=0),
      dtype=tf.int32)
  grid_size = tf.cast(
      tf.math.ceil((end_location - start_location) / grid_cell_size),
      dtype=tf.int32)
  # Note: all points outside the grid are added to the edges
  # Cap index at grid_size - 1 (so a 10x10x10 grid's max cell is (9,9,9))
  point_indices = tf.minimum(point_indices, tf.expand_dims(grid_size - 1,
                                                           axis=0))
  # Don't allow any points below index (0, 0, 0)
  point_indices = tf.maximum(point_indices, 0)
  segment_ids = tf.reduce_sum(
      point_indices * tf.stack(
          [grid_size[1] * grid_size[2], grid_size[2], 1], axis=0),
      axis=1)
  voxel_features = segment_func(
      data=features,
      segment_ids=segment_ids,
      num_segments=(grid_size[0] * grid_size[1] * grid_size[2]))
  return (tf.reshape(voxel_features,
                     [grid_size[0],
                      grid_size[1],
                      grid_size[2],
                      features.get_shape().as_list()[1]]),
          segment_ids,
          point_indices)
예제 #8
0
def linear_decay(initial_epsilon: float, final_epsilon: float,
                 step: tf.Variable, decay_time: int) -> float:
    """
    Linear decay from initial_epsilon to final_epsilon in the given decay_time as measured by step.
    It is assumed that initial_epsilon > final_epsilon.
    """
    return tf.cast(
        tf.maximum(
            initial_epsilon - (initial_epsilon - final_epsilon) *
            (step / decay_time), final_epsilon), tf.float32)
def classification_loss_using_mask_iou_func_unbatched(
        embeddings, instance_ids, sampled_embeddings, sampled_instance_ids,
        sampled_class_labels, sampled_logits, similarity_strategy,
        is_balanced):
    """Classification loss using mask iou.

  Args:
    embeddings: A tf.float32 tensor of size [n, f].
    instance_ids: A tf.int32 tensor of size [n].
    sampled_embeddings: A tf.float32 tensor of size [num_samples, f].
    sampled_instance_ids: A tf.int32 tensor of size [num_samples].
    sampled_class_labels: A tf.int32 tensor of size [num_samples, 1].
    sampled_logits: A tf.float32 tensor of size [num_samples, num_classes].
    similarity_strategy: Defines the method for computing similarity between
                         embedding vectors. Possible values are 'dotproduct' and
                         'distance'.
    is_balanced: If True, the per-voxel losses are re-weighted to have equal
      total weight for foreground vs. background voxels.

  Returns:
    A tf.float32 loss scalar tensor.
  """
    predicted_soft_masks = metric_learning_utils.embedding_centers_to_soft_masks(
        embedding=embeddings,
        centers=sampled_embeddings,
        similarity_strategy=similarity_strategy)
    predicted_masks = tf.cast(tf.greater(predicted_soft_masks, 0.5),
                              dtype=tf.float32)
    gt_masks = tf.cast(tf.equal(tf.expand_dims(sampled_instance_ids, axis=1),
                                tf.expand_dims(instance_ids, axis=0)),
                       dtype=tf.float32)
    pairwise_iou = instance_segmentation_utils.points_mask_pairwise_iou(
        masks1=predicted_masks, masks2=gt_masks)
    num_classes = sampled_logits.get_shape().as_list()[1]
    sampled_class_labels_one_hot = tf.one_hot(indices=tf.reshape(
        sampled_class_labels, [-1]),
                                              depth=num_classes)
    sampled_class_labels_one_hot_fg = sampled_class_labels_one_hot[:, 1:]
    iou_coefs = tf.tile(tf.reshape(pairwise_iou, [-1, 1]),
                        [1, num_classes - 1])
    sampled_class_labels_one_hot_fg *= iou_coefs
    sampled_class_labels_one_hot_bg = tf.maximum(
        1.0 - tf.math.reduce_sum(
            sampled_class_labels_one_hot_fg, axis=1, keepdims=True), 0.0)
    sampled_class_labels_one_hot = tf.concat(
        [sampled_class_labels_one_hot_bg, sampled_class_labels_one_hot_fg],
        axis=1)
    params = {}
    if is_balanced:
        weights = loss_utils.get_balanced_loss_weights_multiclass(
            labels=tf.expand_dims(sampled_instance_ids, axis=1))
        params['weights'] = weights
    return classification_loss_fn(logits=sampled_logits,
                                  labels=sampled_class_labels_one_hot,
                                  **params)
예제 #10
0
  def _build_eval_metric(self):
    """Build a network to evaluate the metric between all prototypical states.

    For each pair of states (s, t) we return max(d(s, t), d(t, s)), since the
    approximant cannot in general guarantee symmetry.

    Returns:
      An op computing the euclidean distance between the representations of all
        pairs of states in self.eval_states_ph.
    """
    self.eval_states_ph = tf.placeholder(tf.float64, (self.num_states, 2),
                                         name='eval_states_ph')
    distances = tf.maximum(
        self.online_network(self._concat_states(self.eval_states_ph)),
        self.online_network(self._concat_states(self.eval_states_ph,
                                                transpose=True)))
    return distances
 def loss_fn():
     """Loss function."""
     num_classes = logits.get_shape().as_list()[-1]
     if num_classes is None:
         raise ValueError('Number of classes is unknown.')
     labels_one_hot = tf.one_hot(indices=(labels - 1),
                                 depth=(num_classes - 1))
     inverse_distance_coef = tf.maximum(
         tf.minimum(
             1.0 -
             normalized_center_distance / max_positive_normalized_distance,
             1.0), 0.0)
     labels_one_hot = tf.reshape(inverse_distance_coef,
                                 [-1, 1]) * labels_one_hot
     background_label = 1.0 - tf.math.reduce_sum(
         labels_one_hot, axis=1, keepdims=True)
     labels_one_hot = tf.concat([background_label, labels_one_hot], axis=1)
     loss = classification_loss_fn(logits=logits,
                                   labels=labels_one_hot,
                                   **params)
     return loss
예제 #12
0
  def _build_train_op(self, optimizer):
    """Build the TensorFlow graph used to learn the bisimulation metric.

    Args:
      optimizer: a tf.train optimizer.
    Returns:
      A TensorFlow op to minimize the bisimulation loss.
    """
    self.online_network = tf.make_template('Online',
                                           self._network_template)
    self.target_network = tf.make_template('Target',
                                           self._network_template)
    self.s1_ph = tf.placeholder(tf.float64, (self.batch_size, 2),
                                name='s1_ph')
    self.s2_ph = tf.placeholder(tf.float64, (self.batch_size, 2),
                                name='s2_ph')
    self.s1_online_distances = self.online_network(
        self._concat_states(self.s1_ph))
    self.s1_target_distances = self.target_network(
        self._concat_states(self.s1_ph))
    self.s2_target_distances = self.target_network(
        self._concat_states(self.s2_ph))
    self.action_ph = tf.placeholder(tf.int32, (self.batch_size,))
    self.rewards_ph = tf.placeholder(tf.float64, (self.batch_size,))
    # We use an expanding horizon for computing the distances.
    self.bisim_horizon_ph = tf.placeholder(tf.float64, ())
    # bisimulation_target_1 = rew_diff + gamma * next_distance.
    bisimulation_target_1 = tf.stop_gradient(self._build_bisimulation_target())
    # bisimulation_target_2 = curr_distance.
    bisimulation_target_2 = tf.stop_gradient(self.s1_target_distances)
    # We slowly taper in the maximum according to the bisim horizon.
    bisimulation_target = tf.maximum(
        bisimulation_target_1, bisimulation_target_2 * self.bisim_horizon_ph)
    # We zero-out diagonal entries, since those are estimating the distance
    # between a state and itself, which we know to be 0.
    diagonal_mask = 1.0 - tf.diag(tf.ones(self.batch_size, dtype=tf.float64))
    diagonal_mask = tf.reshape(diagonal_mask, (self.batch_size**2, 1))
    bisimulation_target *= diagonal_mask
    bisimulation_estimate = self.s1_online_distances
    # We start with a mask that includes everything.
    loss_mask = tf.ones(tf.shape(bisimulation_estimate))
    # We have to enforce that states being compared are done only using the same
    # action.
    indicators = self.action_ph
    indicators = tf.cast(indicators, tf.float64)
    # indicators will initially have shape [batch_size], we first tile it:
    square_ids = tf.tile([indicators], [self.batch_size, 1])
    # We subtract square_ids from its transpose:
    square_ids = square_ids - tf.transpose(square_ids)
    # At this point all zero-entries are the ones with equal IDs.
    # Now we would like to convert the zeros in this matrix to 1s, and make
    # everything else a 0. We can do this with the following operation:
    loss_mask = 1 - tf.abs(tf.sign(square_ids))
    # Now reshape to match the shapes of the estimate and target.
    loss_mask = tf.reshape(loss_mask, (self.batch_size**2, 1))
    larger_targets = bisimulation_target - bisimulation_estimate
    larger_targets_count = tf.reduce_sum(
        tf.cast(larger_targets > 0., tf.float64))
    tf.summary.scalar('Learning/LargerTargets', larger_targets_count)
    tf.summary.scalar('Learning/NumUpdates', tf.count_nonzero(loss_mask))
    tf.summary.scalar('Learning/BisimHorizon', self.bisim_horizon_ph)
    bisimulation_loss = tf.losses.mean_squared_error(
        bisimulation_target,
        bisimulation_estimate,
        weights=loss_mask)
    tf.summary.scalar('Learning/loss', bisimulation_loss)
    # Plot average distance between sampled representations.
    average_distance = tf.reduce_mean(bisimulation_estimate)
    tf.summary.scalar('Approx/AverageDistance', average_distance)
    return optimizer.minimize(bisimulation_loss)
예제 #13
0
def geometric_augmentation(images,
                           flow = None,
                           mask = None,
                           crop_height = 640,
                           crop_width = 640,
                           probability_flip_left_right = 0.5,
                           probability_flip_up_down = 0.1,
                           probability_scale = 0.8,
                           probability_relative_scale = 0.,
                           probability_stretch = 0.8,
                           probability_rotation = 0.0,
                           probability_relative_rotation = 0.0,
                           probability_crop_offset = 0.0,
                           min_bound_scale = -0.2,
                           max_bound_scale = 0.6,
                           max_strech_scale = 0.2,
                           min_bound_relative_scale = -0.1,
                           max_bound_relative_scale = 0.1,
                           max_rotation_deg = 15,
                           max_relative_rotation_deg = 3,
                           max_relative_crop_offset = 5,
                           return_full_scale=False):

  """Applies geometric augmentations to an image pair and corresponding flow.

  Args:
    images: Image pair of shape [2, height, width, channels].
    flow: Corresponding forward flow field of shape [height, width, 2].
    mask: Mask indicating which positions in the flow field hold valid flow
      vectors of shape [height, width, 1]. Non-valid poisitions are encoded with
      0, valid positions with 1.
    crop_height: Height of the final augmented output.
    crop_width: Width of the final augmented output.
    probability_flip_left_right: Probability of applying left/right flip.
    probability_flip_up_down: Probability of applying up/down flip
    probability_scale: Probability of applying scale augmentation.
    probability_relative_scale: Probability of applying scale augmentation to
      only the second frame of the the image pair.
    probability_stretch: Probability of applying stretch augmentation (scale
      without keeping the aspect ratio).
    probability_rotation: Probability of applying rotation augmentation.
    probability_relative_rotation: Probability of applying rotation augmentation
      to only the second frame of the the image pair.
    probability_crop_offset: Probability of applying a relative offset while
      cropping.
    min_bound_scale: Defines the smallest possible scaling factor as
      2**min_bound_scale.
    max_bound_scale: Defines the largest possible scaling factor as
      2**max_bound_scale.
    max_strech_scale: Defines the smallest and largest possible streching factor
      as 2**-max_strech_scale and 2**max_strech_scale.
    min_bound_relative_scale: Defines the smallest possible scaling factor for
      the relative scaling as 2**min_bound_relative_scale.
    max_bound_relative_scale: Defines the largest possible scaling factor for
      the relative scaling as 2**max_bound_relative_scale.
    max_rotation_deg: Defines the maximum angle of rotation in degrees.
    max_relative_rotation_deg: Defines the maximum angle of rotation in degrees
      for the relative rotation.
    max_relative_crop_offset: Defines the maximum relative offset in pixels for
      cropping.
    return_full_scale: bool. If this is passed, the full size images will be
      returned in addition to the geometrically augmented (cropped and / or
      resized) images. In addition to the resized images, the crop height,
      width, and any padding applied will be returned.

  Returns:
    if return_full_scale is False:
      Augmented images, flow and mask (if not None).
    if return_full_scale is True:
      Augmented images, flow, mask, full_size_images, crop_h, crop_w, pad_h,
       and pad_w.
  """

  # apply geometric augmentation
  if probability_flip_left_right > 0:
    images, flow, mask = random_flip_left_right(
        images, flow, mask, probability_flip_left_right)

  if probability_flip_up_down > 0:
    images, flow, mask = random_flip_up_down(
        images, flow, mask, probability_flip_up_down)

  if probability_scale > 0 or probability_stretch > 0:
    images, flow, mask = random_scale(
        images,
        flow,
        mask,
        min_scale=min_bound_scale,
        max_scale=max_bound_scale,
        max_strech=max_strech_scale,
        probability_scale=probability_scale,
        probability_strech=probability_stretch)

  if probability_relative_scale > 0:
    images, flow, mask = random_scale_second(
        images, flow, mask,
        min_scale=min_bound_relative_scale,
        max_scale=max_bound_relative_scale,
        probability_scale=probability_relative_scale)

  if probability_rotation > 0:
    images, flow, mask = random_rotation(
        images, flow, mask,
        probability=probability_rotation,
        max_rotation=max_rotation_deg, not_empty_crop=True)

  if probability_relative_rotation > 0:
    images, flow, mask = random_rotation_second(
        images, flow, mask,
        probability=probability_relative_rotation,
        max_rotation=max_relative_rotation_deg, not_empty_crop=True)

  images_uncropped = images
  images, flow, mask, offset_h, offset_w = random_crop(
      images, flow, mask, crop_height, crop_width,
      relative_offset=max_relative_crop_offset,
      probability_crop_offset=probability_crop_offset)
  # Add 100 / 200 pixels to crop height / width for full scale warp
  pad_to_size_h = crop_height + 200
  pad_to_size_w = crop_width + 400
  if return_full_scale:
    if pad_to_size_w:
      uncropped_shape = tf.shape(images_uncropped)
      if images.shape[1] > uncropped_shape[1] or images.shape[
          2] > uncropped_shape[2]:
        images_uncropped = images
        uncropped_shape = tf.shape(images_uncropped)
        offset_h = tf.zeros_like(offset_h)
        offset_w = tf.zeros_like(offset_w)

      if uncropped_shape[1] > pad_to_size_h:
        crop_ht = offset_h - (200 // 2)
        crop_hb = offset_h + crop_height + (200 // 2)
        crop_hb += tf.maximum(0, -crop_ht)
        crop_ht -= tf.maximum(0, -(uncropped_shape[1] - crop_hb))
        crop_ht = tf.maximum(crop_ht, 0)
        crop_hb = tf.minimum(crop_hb, uncropped_shape[1])
        offset_h -= crop_ht
        images_uncropped = images_uncropped[:, crop_ht:crop_hb, :, :]

      if uncropped_shape[2] > pad_to_size_w:
        crop_wt = offset_w - (400 // 2)
        crop_wb = offset_w + crop_width + (400 // 2)
        crop_wb += tf.maximum(0, -crop_wt)
        crop_wt -= tf.maximum(0, -(uncropped_shape[2] - crop_wb))
        crop_wt = tf.maximum(crop_wt, 0)
        crop_wb = tf.minimum(crop_wb, uncropped_shape[2])
        offset_w -= crop_wt
        images_uncropped = images_uncropped[:, :, crop_wt:crop_wb, :]

      uncropped_shape = tf.shape(images_uncropped)
      # remove remove_pixels_w from the width while keeping the crop centered
      pad_h = pad_to_size_h - uncropped_shape[1]
      pad_w = pad_to_size_w - uncropped_shape[2]
      with tf.control_dependencies([
          tf.compat.v1.assert_greater_equal(pad_h, 0),
          tf.compat.v1.assert_greater_equal(pad_w, 0)
      ]):
        images_uncropped = tf.pad(images_uncropped,
                                  [[0, 0], [pad_h, 0], [pad_w, 0], [0, 0]])
      images_uncropped = tf.ensure_shape(images_uncropped,
                                         [2, pad_to_size_h, pad_to_size_w, 3])
    return images, flow, mask, images_uncropped, offset_h, offset_w, pad_h, pad_w

  return images, flow, mask
예제 #14
0
    def update_state(self, inputs, outputs):
        """Function that updates the metric state at each example.

    Args:
      inputs: A dictionary containing input tensors.
      outputs: A dictionary containing output tensors.

    Returns:
      Update op.
    """
        detections_score = tf.reshape(
            outputs[standard_fields.DetectionResultFields.objects_score], [-1])
        detections_class = tf.reshape(
            outputs[standard_fields.DetectionResultFields.objects_class], [-1])
        detections_length = tf.reshape(
            outputs[standard_fields.DetectionResultFields.objects_length],
            [-1])
        detections_height = tf.reshape(
            outputs[standard_fields.DetectionResultFields.objects_height],
            [-1])
        detections_width = tf.reshape(
            outputs[standard_fields.DetectionResultFields.objects_width], [-1])
        detections_center = tf.reshape(
            outputs[standard_fields.DetectionResultFields.objects_center],
            [-1, 3])
        detections_rotation_matrix = tf.reshape(
            outputs[
                standard_fields.DetectionResultFields.objects_rotation_matrix],
            [-1, 3, 3])
        gt_class = tf.reshape(
            inputs[standard_fields.InputDataFields.objects_class], [-1])
        gt_length = tf.reshape(
            inputs[standard_fields.InputDataFields.objects_length], [-1])
        gt_height = tf.reshape(
            inputs[standard_fields.InputDataFields.objects_height], [-1])
        gt_width = tf.reshape(
            inputs[standard_fields.InputDataFields.objects_width], [-1])
        gt_center = tf.reshape(
            inputs[standard_fields.InputDataFields.objects_center], [-1, 3])
        gt_rotation_matrix = tf.reshape(
            inputs[standard_fields.InputDataFields.objects_rotation_matrix],
            [-1, 3, 3])
        for c in self.class_range:
            gt_mask_c = tf.equal(gt_class, c)
            num_gt_c = tf.math.reduce_sum(tf.cast(gt_mask_c, dtype=tf.int32))
            gt_length_c = tf.boolean_mask(gt_length, gt_mask_c)
            gt_height_c = tf.boolean_mask(gt_height, gt_mask_c)
            gt_width_c = tf.boolean_mask(gt_width, gt_mask_c)
            gt_center_c = tf.boolean_mask(gt_center, gt_mask_c)
            gt_rotation_matrix_c = tf.boolean_mask(gt_rotation_matrix,
                                                   gt_mask_c)
            detections_mask_c = tf.equal(detections_class, c)
            num_detections_c = tf.math.reduce_sum(
                tf.cast(detections_mask_c, dtype=tf.int32))
            if num_detections_c == 0:
                continue
            det_length_c = tf.boolean_mask(detections_length,
                                           detections_mask_c)
            det_height_c = tf.boolean_mask(detections_height,
                                           detections_mask_c)
            det_width_c = tf.boolean_mask(detections_width, detections_mask_c)
            det_center_c = tf.boolean_mask(detections_center,
                                           detections_mask_c)
            det_rotation_matrix_c = tf.boolean_mask(detections_rotation_matrix,
                                                    detections_mask_c)
            det_scores_c = tf.boolean_mask(detections_score, detections_mask_c)
            det_scores_c, sorted_indices = tf.math.top_k(det_scores_c,
                                                         k=num_detections_c)
            det_length_c = tf.gather(det_length_c, sorted_indices)
            det_height_c = tf.gather(det_height_c, sorted_indices)
            det_width_c = tf.gather(det_width_c, sorted_indices)
            det_center_c = tf.gather(det_center_c, sorted_indices)
            det_rotation_matrix_c = tf.gather(det_rotation_matrix_c,
                                              sorted_indices)
            tp_c = tf.zeros([num_detections_c], dtype=tf.int32)
            if num_gt_c > 0:
                ious_c = box_ops.iou3d(
                    boxes1_length=gt_length_c,
                    boxes1_height=gt_height_c,
                    boxes1_width=gt_width_c,
                    boxes1_center=gt_center_c,
                    boxes1_rotation_matrix=gt_rotation_matrix_c,
                    boxes2_length=det_length_c,
                    boxes2_height=det_height_c,
                    boxes2_width=det_width_c,
                    boxes2_center=det_center_c,
                    boxes2_rotation_matrix=det_rotation_matrix_c)
                max_overlap_gt_ids = tf.cast(tf.math.argmax(ious_c, axis=0),
                                             dtype=tf.int32)
                is_gt_box_detected = tf.zeros([num_gt_c], dtype=tf.int32)
                for i in tf.range(num_detections_c):
                    gt_id = max_overlap_gt_ids[i]
                    if (ious_c[gt_id, i] > self.iou_threshold
                            and is_gt_box_detected[gt_id] == 0):
                        tp_c = tf.maximum(
                            tf.one_hot(i, num_detections_c, dtype=tf.int32),
                            tp_c)
                        is_gt_box_detected = tf.maximum(
                            tf.one_hot(gt_id, num_gt_c, dtype=tf.int32),
                            is_gt_box_detected)
            self.tp[c] = tf.concat([self.tp[c], tp_c], axis=0)
            self.scores[c] = tf.concat([self.scores[c], det_scores_c], axis=0)
            self.num_gt[c] += num_gt_c
        return tf.no_op()
예제 #15
0
def preprocess(inputs,
               output_keys=None,
               is_training=False,
               using_sequence_dataset=False,
               num_frame_to_load=1,
               transform_points_fn=None,
               image_preprocess_fn_dic=None,
               images_points_correspondence_fn=None,
               compute_semantic_labels_fn=None,
               compute_motion_labels_fn=None,
               view_names=(),
               points_key='points',
               colors_key='colors',
               normals_key='normals',
               intensities_key='intensities',
               elongations_key='elongations',
               semantic_labels_key='semantic_labels',
               motion_labels_key='motion_labels',
               spin_coords_key=None,
               points_in_image_frame_key=None,
               num_points_to_randomly_sample=None,
               x_min_degree_rotation=None,
               x_max_degree_rotation=None,
               y_min_degree_rotation=None,
               y_max_degree_rotation=None,
               z_min_degree_rotation=None,
               z_max_degree_rotation=None,
               points_pad_or_clip_size=None,
               voxels_pad_or_clip_size=None,
               voxel_grid_cell_size=(0.1, 0.1, 0.1),
               num_offset_bins_x=4,
               num_offset_bins_y=4,
               num_offset_bins_z=4,
               point_feature_keys=('point_offsets', ),
               point_to_voxel_segment_func=tf.math.unsorted_segment_mean,
               x_random_crop_size=None,
               y_random_crop_size=None,
               min_scale_ratio=None,
               max_scale_ratio=None,
               semantic_labels_offset=0,
               ignore_labels=(),
               remove_unlabeled_images_and_points=False,
               labeled_view_name=None,
               only_keep_first_return_lidar_points=False):
    """Preprocesses a dictionary of `Tensor` inputs.

  If is_training=True, it will randomly rotate the points around the z axis,
  and will randomly flip the points with respect to x and/or y axis.

  Note that the preprocessor function does not correct normal vectors if they
  exist in the inputs.
  Note that the preprocessing effects all values of `inputs` that are `Tensors`.

  Args:
    inputs: A dictionary of inputs. Each value must be a `Tensor`.
    output_keys: Either None, or a list of strings containing the keys in the
      dictionary that is returned by the preprocess function.
    is_training: Whether we're training or testing.
    using_sequence_dataset: if true, the inputs will contain scene and multiple
      frames data.
    num_frame_to_load: If greater than 1, load multiframe point cloud point
      positions and its correspondence.
    transform_points_fn: Fn to transform other frames to a specific frame's
      coordinate.
    image_preprocess_fn_dic: Image preprocessing function. Maps view names to
      their image preprocessing functions. Set it to None, if there are no
      images to preprocess or you are not interested in preprocesing images.
    images_points_correspondence_fn: The function that computes correspondence
      between images and points.
    compute_semantic_labels_fn: If not None, semantic labels will be computed
      using this function.
    compute_motion_labels_fn: If not None, motion labels will be computed using
      this function.
    view_names: Names corresponding to 2d views of the scene.
    points_key: The key used for `points` in the inputs.
    colors_key: The key used for `colors` in the inputs.
    normals_key: The key used for 'normals' in the inputs.
    intensities_key: The key used for 'intensities' in the inputs.
    elongations_key: The key used for 'elongations' in the inputs.
    semantic_labels_key: The key used for 'semantic_labels' in the inputs.
    motion_labels_key: The key used for 'motion_labels' in the inputs.
    spin_coords_key: The key used for 'spin_coords' in the inputs. In Waymo
      data, spin_coords is a [num_points, 3] tensor that contains scan_index,
      shot_index, return_index. In Waymo data, return_index of the first return
      points is 0.
    points_in_image_frame_key: A string that identifies the tensor that contains
      the points_in_image_frame tensor. If None, it won't be used.
    num_points_to_randomly_sample: Number of points to randomly sample. If None,
      it will keep the original points and does not perform sampling.
    x_min_degree_rotation: Min degree of rotation around the x axis.
    x_max_degree_rotation: Max degree of ratation around the x axis.
    y_min_degree_rotation: Min degree of rotation around the y axis.
    y_max_degree_rotation: Max degree of ratation around the y axis.
    z_min_degree_rotation: Min degree of rotation around the z axis.
    z_max_degree_rotation: Max degree of ratation around the z axis.
    points_pad_or_clip_size: Number of target points to pad or clip to. If None,
      it will not perform the point padding.
    voxels_pad_or_clip_size: Number of target voxels to pad or clip to. If None,
      it will not perform the voxel padding.
    voxel_grid_cell_size: A three dimensional tuple determining the voxel grid
      size.
    num_offset_bins_x: Number of bins for point offsets in x direction.
    num_offset_bins_y: Number of bins for point offsets in y direction.
    num_offset_bins_z: Number of bins for point offsets in z direction.
    point_feature_keys: The keys used to form the voxel features.
    point_to_voxel_segment_func: The function used to aggregate the features
      of the points that fall in the same voxel.
    x_random_crop_size: Size of the random crop in x dimension. If None, random
      crop will not take place on x dimension.
    y_random_crop_size: Size of the random crop in y dimension. If None, random
      crop will not take place on y dimension.
    min_scale_ratio: Minimum scale ratio. Used for scaling point cloud.
    max_scale_ratio: Maximum scale ratio. Used for scaling point cloud.
    semantic_labels_offset: An integer offset that will be added to labels.
    ignore_labels: A tuple containing labels that should be ignored when
      computing the loss and metrics.
    remove_unlabeled_images_and_points: If True, removes the images that are not
      labeled and also removes the points that are associated with those images.
    labeled_view_name: The name of the view that is labeled, otherwise None.
    only_keep_first_return_lidar_points: If True, we only keep the first return
      lidar points.

  Returns:
    The mean subtracted points with an optional rotation applied.

  Raises:
    ValueError: if `inputs` doesn't contain the points_key.
    ValueError: if `points_in_image_frame` does not have rank 3.
  """
    inputs = dict(inputs)

    if using_sequence_dataset:
        all_frame_inputs = inputs
        scene = all_frame_inputs['scene']
        frame1 = all_frame_inputs['frame1']
        frame_start_index = all_frame_inputs['frame_start_index']
        inputs = dict(
            all_frame_inputs['frame0']
        )  # so that the following processing code can be unchanged.

    # Initializing empty dictionary for mesh, image, indices_2d and non tensor
    # inputs.
    non_tensor_inputs = {}
    view_image_inputs = {}
    view_indices_2d_inputs = {}
    mesh_inputs = {}

    if image_preprocess_fn_dic is None:
        image_preprocess_fn_dic = {}

    # Convert all float64 to float32 and all int64 to int32.
    for key in sorted(inputs):
        if isinstance(inputs[key], tf.Tensor):
            if inputs[key].dtype == tf.float64:
                inputs[key] = tf.cast(inputs[key], dtype=tf.float32)
            if inputs[key].dtype == tf.int64:
                inputs[key] = tf.cast(inputs[key], dtype=tf.int32)

    if points_key in inputs:
        inputs[standard_fields.InputDataFields.
               point_positions] = inputs[points_key]
    if colors_key is not None and colors_key in inputs:
        inputs[
            standard_fields.InputDataFields.point_colors] = inputs[colors_key]
    if normals_key is not None and normals_key in inputs:
        inputs[standard_fields.InputDataFields.
               point_normals] = inputs[normals_key]
    if intensities_key is not None and intensities_key in inputs:
        inputs[standard_fields.InputDataFields.
               point_intensities] = inputs[intensities_key]
    if elongations_key is not None and elongations_key in inputs:
        inputs[standard_fields.InputDataFields.
               point_elongations] = inputs[elongations_key]
    if semantic_labels_key is not None and semantic_labels_key in inputs:
        inputs[standard_fields.InputDataFields.
               object_class_points] = inputs[semantic_labels_key]
    if motion_labels_key is not None and motion_labels_key in inputs:
        inputs[standard_fields.InputDataFields.
               object_flow_points] = inputs[motion_labels_key]
    if spin_coords_key is not None and spin_coords_key in inputs:
        inputs[standard_fields.InputDataFields.
               point_spin_coordinates] = inputs[spin_coords_key]

    # Acquire point / image correspondences.
    if images_points_correspondence_fn is not None:
        fn_outputs = images_points_correspondence_fn(inputs)
        if 'points_position' in fn_outputs:
            inputs[standard_fields.InputDataFields.
                   point_positions] = fn_outputs['points_position']
        if 'points_intensity' in fn_outputs and intensities_key is not None:
            inputs[standard_fields.InputDataFields.
                   point_intensities] = fn_outputs['points_intensity']
        if 'points_elongation' in fn_outputs and elongations_key is not None:
            inputs[standard_fields.InputDataFields.
                   point_elongations] = fn_outputs['points_elongation']
        if 'points_label' in fn_outputs and semantic_labels_key is not None:
            inputs[standard_fields.InputDataFields.
                   object_class_points] = fn_outputs['points_label']
        if 'view_images' in fn_outputs:
            for key in sorted(fn_outputs['view_images']):
                if len(fn_outputs['view_images'][key].shape) != 4:
                    raise ValueError(('%s image should have rank 4.' % key))
            view_image_inputs = fn_outputs['view_images']
        if 'view_indices_2d' in fn_outputs:
            for key in sorted(fn_outputs['view_indices_2d']):
                if len(fn_outputs['view_indices_2d'][key].shape) != 3:
                    raise ValueError(
                        ('%s indices_2d should have rank 3.' % key))
            view_indices_2d_inputs = fn_outputs['view_indices_2d']
    else:
        if points_in_image_frame_key is not None:
            inputs['rgb_view/features'] = inputs['image']
            inputs['rgb_view/indices_2d'] = inputs[points_in_image_frame_key]
            if len(inputs['rgb_view/indices_2d'].shape) != 3:
                raise ValueError('`points_in_image_frame` should have rank 3.')

    frame0 = inputs.copy()
    if num_frame_to_load > 1:
        point_positions_list = [
            frame0[standard_fields.InputDataFields.point_positions]
        ]
        if view_indices_2d_inputs:
            view_indices_2d_list = [view_indices_2d_inputs[view_names[0]]]
        frame_source_list = [
            tf.zeros([
                tf.shape(
                    frame0[standard_fields.InputDataFields.point_positions])[0]
            ], tf.int32)
        ]
        for i in range(1, num_frame_to_load):
            target_frame_key = 'frame' + str(i)
            if images_points_correspondence_fn is not None:
                frame_i = images_points_correspondence_fn(
                    all_frame_inputs[target_frame_key])
            else:
                raise ValueError(
                    'images_points_correspondence_fn is needed for loading multi-frame pointclouds.'
                )
            transformed_point_positions = transform_points_fn(
                scene, frame_i['points_position'], frame_start_index,
                i + frame_start_index)
            point_positions_list.append(transformed_point_positions)
            if view_indices_2d_inputs:
                view_indices_2d_list.append(
                    frame_i['view_indices_2d'][view_names[0]])
            frame_source_list.append(
                tf.ones([tf.shape(transformed_point_positions)[0]], tf.int32) *
                i)

        # add multi-frame info to override inputs and view_indices_2d_inputs
        inputs[standard_fields.InputDataFields.
               point_frame_index] = tf.expand_dims(tf.concat(frame_source_list,
                                                             axis=0),
                                                   axis=1)
        inputs[standard_fields.InputDataFields.point_positions] = tf.concat(
            point_positions_list, axis=0)
        if view_indices_2d_inputs:
            view_indices_2d_inputs[view_names[0]] = tf.concat(
                view_indices_2d_list, axis=1)

    # Validate inputs.
    if standard_fields.InputDataFields.point_positions not in inputs:
        raise ValueError('`inputs` must contain a point_positions')
    if inputs[
            standard_fields.InputDataFields.point_positions].shape.ndims != 2:
        raise ValueError('points must be of rank 2.')
    if inputs[standard_fields.InputDataFields.point_positions].shape[1] != 3:
        raise ValueError('point should be 3 dimensional.')

    # Remove normal nans.
    if standard_fields.InputDataFields.point_normals in inputs:
        inputs[standard_fields.InputDataFields.point_normals] = tf.where(
            tf.math.is_nan(
                inputs[standard_fields.InputDataFields.point_normals]),
            tf.zeros_like(
                inputs[standard_fields.InputDataFields.point_normals]),
            inputs[standard_fields.InputDataFields.point_normals])

    # Compute semantic labels if compute_semantic_labels_fn is not None
    # An example is when the ground-truth contains 3d object boxes and not per
    # point labels. This would be a function that infers point labels from boxes.
    if compute_semantic_labels_fn is not None:
        inputs[standard_fields.InputDataFields.
               object_class_points] = compute_semantic_labels_fn(
                   inputs=frame0,
                   points_key=standard_fields.InputDataFields.point_positions)
    if compute_motion_labels_fn is not None:
        inputs[standard_fields.InputDataFields.
               object_flow_points] = compute_motion_labels_fn(
                   scene=scene,
                   frame0=frame0,
                   frame1=frame1,
                   frame_start_index=frame_start_index,
                   points_key=standard_fields.InputDataFields.point_positions)

    # Splitting inputs to {view_image_inputs,
    #                      view_indices_2d_inputs,
    #                      mesh_inputs,
    #                      non_tensor_inputs}
    mesh_keys = []
    for key in [
            standard_fields.InputDataFields.point_positions,
            standard_fields.InputDataFields.point_colors,
            standard_fields.InputDataFields.point_normals,
            standard_fields.InputDataFields.point_intensities,
            standard_fields.InputDataFields.point_elongations,
            standard_fields.InputDataFields.object_class_points,
            standard_fields.InputDataFields.point_spin_coordinates,
            standard_fields.InputDataFields.object_flow_points,
            standard_fields.InputDataFields.point_frame_index,
    ]:
        if key is not None and key in inputs:
            mesh_keys.append(key)
    view_image_names = [('%s/features' % key) for key in view_names]
    view_indices_2d_names = [('%s/indices_2d' % key) for key in view_names]

    # Additional key collecting
    for k, v in six.iteritems(inputs):
        if k in view_image_names:
            view_image_inputs[k] = v
        elif k in view_indices_2d_names:
            view_indices_2d_inputs[k] = v
        elif k in mesh_keys:
            if num_frame_to_load > 1:
                pad_size = tf.shape(
                    inputs[standard_fields.InputDataFields.
                           point_positions])[0] - tf.shape(v)[0]
                if k == standard_fields.InputDataFields.object_class_points:
                    pad_value = -1
                else:
                    pad_value = 0
                v = tf.pad(v, [[0, pad_size], [0, 0]],
                           constant_values=pad_value)
            mesh_inputs[k] = v
        else:
            non_tensor_inputs[k] = v

    # Remove points that are not in the lidar first return (optional)
    if only_keep_first_return_lidar_points:
        _remove_second_return_lidar_points(
            mesh_inputs=mesh_inputs,
            view_indices_2d_inputs=view_indices_2d_inputs)

    # Randomly sample points
    preprocessor_utils.randomly_sample_points(
        mesh_inputs=mesh_inputs,
        view_indices_2d_inputs=view_indices_2d_inputs,
        target_num_points=num_points_to_randomly_sample)

    # Add weights if it does not exist in inputs. The weight of the points with
    # label in `ignore_labels` is set to 0. This helps the loss and metrics to
    # ignore those labels.
    use_weights = (
        standard_fields.InputDataFields.object_class_points in mesh_inputs
        or standard_fields.InputDataFields.object_flow_points in mesh_inputs)
    if use_weights:
        if num_frame_to_load > 1:
            num_valid_points_frame0 = tf.shape(
                frame0[standard_fields.InputDataFields.point_positions])[0]
            num_additional_frame_points = tf.shape(
                mesh_inputs[standard_fields.InputDataFields.
                            object_class_points])[0] - num_valid_points_frame0
            weights = tf.concat([
                tf.ones([num_valid_points_frame0, 1], tf.float32),
                tf.zeros([num_additional_frame_points, 1], tf.float32)
            ],
                                axis=0)
        else:
            weights = tf.ones_like(mesh_inputs[
                standard_fields.InputDataFields.object_class_points],
                                   dtype=tf.float32)

    if standard_fields.InputDataFields.object_class_points in mesh_inputs:
        mesh_inputs[
            standard_fields.InputDataFields.object_class_points] = tf.cast(
                mesh_inputs[
                    standard_fields.InputDataFields.object_class_points],
                dtype=tf.int32)
        for ignore_label in ignore_labels:
            weights *= tf.cast(tf.not_equal(
                mesh_inputs[
                    standard_fields.InputDataFields.object_class_points],
                ignore_label),
                               dtype=tf.float32)
        mesh_inputs[
            standard_fields.InputDataFields.point_loss_weights] = weights
        mesh_inputs[standard_fields.InputDataFields.
                    object_class_points] += semantic_labels_offset

    # We normalize the intensities and elongations to be in a smaller range.
    if standard_fields.InputDataFields.point_intensities in mesh_inputs:
        mesh_inputs[standard_fields.InputDataFields.
                    point_intensities] = change_intensity_range(
                        intensities=mesh_inputs[
                            standard_fields.InputDataFields.point_intensities])
    if standard_fields.InputDataFields.point_elongations in mesh_inputs:
        mesh_inputs[
            standard_fields.InputDataFields.point_elongations] = (tf.cast(
                mesh_inputs[standard_fields.InputDataFields.point_elongations],
                dtype=tf.float32) * 2.0 / 255.0) - 1.0

    # Random scale the points.
    if min_scale_ratio is not None and max_scale_ratio is not None:
        scale_ratio = tf.random.uniform([],
                                        minval=min_scale_ratio,
                                        maxval=max_scale_ratio,
                                        dtype=tf.float32)
        mesh_inputs[
            standard_fields.InputDataFields.point_positions] *= scale_ratio
        if standard_fields.InputDataFields.object_flow_points in mesh_inputs:
            mesh_inputs[standard_fields.InputDataFields.
                        object_flow_points] *= scale_ratio

    # Random crop the points.
    randomly_crop_points(mesh_inputs=mesh_inputs,
                         view_indices_2d_inputs=view_indices_2d_inputs,
                         x_random_crop_size=x_random_crop_size,
                         y_random_crop_size=y_random_crop_size)

    # If training, pick the best labeled image and points that project to it.
    # In many datasets, only one image is labeled anyways.
    if remove_unlabeled_images_and_points:
        pick_labeled_image(mesh_inputs=mesh_inputs,
                           view_image_inputs=view_image_inputs,
                           view_indices_2d_inputs=view_indices_2d_inputs,
                           view_name=labeled_view_name)

    # Process images.
    preprocessor_utils.preprocess_images(
        view_image_inputs=view_image_inputs,
        view_indices_2d_inputs=view_indices_2d_inputs,
        image_preprocess_fn_dic=image_preprocess_fn_dic,
        is_training=is_training)

    # Record the original points.
    original_points = mesh_inputs[
        standard_fields.InputDataFields.point_positions]
    if standard_fields.InputDataFields.point_colors in mesh_inputs:
        original_colors = mesh_inputs[
            standard_fields.InputDataFields.point_colors]
    if standard_fields.InputDataFields.point_normals in mesh_inputs:
        original_normals = mesh_inputs[
            standard_fields.InputDataFields.point_normals]

    # Update feature visibility count.
    if 'feature_visibility_count' in mesh_inputs:
        mesh_inputs['feature_visibility_count'] = tf.maximum(
            mesh_inputs['feature_visibility_count'], 1)
        mesh_inputs['features'] /= tf.cast(
            mesh_inputs['feature_visibility_count'], dtype=tf.float32)

    # Subtract mean from points.
    mean_points = tf.reduce_mean(
        mesh_inputs[standard_fields.InputDataFields.point_positions], axis=0)
    mesh_inputs[
        standard_fields.InputDataFields.point_positions] -= tf.expand_dims(
            mean_points, axis=0)

    # Rotate points randomly.
    if standard_fields.InputDataFields.point_normals in mesh_inputs:
        normals = mesh_inputs[standard_fields.InputDataFields.point_normals]
    else:
        normals = None

    if standard_fields.InputDataFields.object_flow_points in mesh_inputs:
        motions = mesh_inputs[
            standard_fields.InputDataFields.object_flow_points]
    else:
        motions = None

    (mesh_inputs[standard_fields.InputDataFields.point_positions],
     rotated_normals, rotated_motions) = rotate_randomly(
         points=mesh_inputs[standard_fields.InputDataFields.point_positions],
         normals=normals,
         motions=motions,
         x_min_degree_rotation=x_min_degree_rotation,
         x_max_degree_rotation=x_max_degree_rotation,
         y_min_degree_rotation=y_min_degree_rotation,
         y_max_degree_rotation=y_max_degree_rotation,
         z_min_degree_rotation=z_min_degree_rotation,
         z_max_degree_rotation=z_max_degree_rotation)

    # Random flipping in x and y directions.
    (mesh_inputs[standard_fields.InputDataFields.point_positions],
     flipped_normals,
     flipped_motions) = flip_randomly_points_and_normals_motions(
         points=mesh_inputs[standard_fields.InputDataFields.point_positions],
         normals=rotated_normals,
         motions=rotated_motions,
         is_training=is_training)
    if standard_fields.InputDataFields.point_normals in mesh_inputs:
        mesh_inputs[
            standard_fields.InputDataFields.point_normals] = flipped_normals
    if standard_fields.InputDataFields.object_flow_points in mesh_inputs:
        mesh_inputs[standard_fields.InputDataFields.
                    object_flow_points] = flipped_motions
    # Normalize RGB to [-1.0, 1.0].
    if standard_fields.InputDataFields.point_colors in mesh_inputs:
        mesh_inputs[standard_fields.InputDataFields.point_colors] = tf.cast(
            mesh_inputs[standard_fields.InputDataFields.point_colors],
            dtype=tf.float32)
        mesh_inputs[standard_fields.InputDataFields.point_colors] *= (2.0 /
                                                                      255.0)
        mesh_inputs[standard_fields.InputDataFields.point_colors] -= 1.0

    # Add original points to mesh inputs.
    mesh_inputs[standard_fields.InputDataFields.
                point_positions_original] = original_points
    if standard_fields.InputDataFields.point_colors in mesh_inputs:
        mesh_inputs[standard_fields.InputDataFields.
                    point_colors_original] = original_colors
    if standard_fields.InputDataFields.point_normals in mesh_inputs:
        mesh_inputs[standard_fields.InputDataFields.
                    point_normals_original] = original_normals

    # Pad or clip the point tensors.
    pad_or_clip(mesh_inputs=mesh_inputs,
                view_indices_2d_inputs=view_indices_2d_inputs,
                pad_or_clip_size=points_pad_or_clip_size)
    if num_frame_to_load > 1:
        # Note: num_valid_points is the sum of 'num_points_per_fram' for now.
        # num_points_per_frame is each frame's valid num of points.
        # TODO(huangrui): if random sampling is called earlier, the count here
        # is not guaranteed to be in order. need sorting.
        if num_points_to_randomly_sample is not None:
            raise ValueError(
                'randomly sample is not compatible with padding multi frame point clouds yet!'
            )
        _, _, mesh_inputs[standard_fields.InputDataFields.
                          num_valid_points_per_frame] = tf.unique_with_counts(
                              tf.reshape(
                                  mesh_inputs[standard_fields.InputDataFields.
                                              point_frame_index], [-1]))
        if points_pad_or_clip_size is not None:
            padded_points = tf.where_v2(
                tf.greater(
                    points_pad_or_clip_size, mesh_inputs[
                        standard_fields.InputDataFields.num_valid_points]),
                points_pad_or_clip_size -
                mesh_inputs[standard_fields.InputDataFields.num_valid_points],
                0)

            # Correct the potential unique count error from optionally padded 0s point
            # frame index.
            mesh_inputs[
                standard_fields.InputDataFields.
                num_valid_points_per_frame] -= tf.pad(
                    tf.expand_dims(padded_points, 0), [[
                        0,
                        tf.shape(mesh_inputs[standard_fields.InputDataFields.
                                             num_valid_points_per_frame])[0] -
                        1
                    ]])

    # Putting back the dictionaries together
    processed_inputs = mesh_inputs.copy()
    processed_inputs.update(non_tensor_inputs)
    for key in sorted(view_image_inputs):
        processed_inputs[('%s/features' % key)] = view_image_inputs[key]
    for key in sorted(view_indices_2d_inputs):
        processed_inputs[('%s/indices_2d' % key)] = view_indices_2d_inputs[key]

    # Create features that do not exist
    if 'point_offsets' in point_feature_keys:
        preprocessor_utils.add_point_offsets(
            inputs=processed_inputs, voxel_grid_cell_size=voxel_grid_cell_size)
    if 'point_offset_bins' in point_feature_keys:
        preprocessor_utils.add_point_offset_bins(
            inputs=processed_inputs,
            voxel_grid_cell_size=voxel_grid_cell_size,
            num_bins_x=num_offset_bins_x,
            num_bins_y=num_offset_bins_y,
            num_bins_z=num_offset_bins_z)

    # Voxelize point features
    preprocessor_utils.voxelize_point_features(
        inputs=processed_inputs,
        voxels_pad_or_clip_size=voxels_pad_or_clip_size,
        voxel_grid_cell_size=voxel_grid_cell_size,
        point_feature_keys=point_feature_keys,
        point_to_voxel_segment_func=point_to_voxel_segment_func,
        num_frame_to_load=num_frame_to_load)

    # Voxelize point / image correspondence indices
    preprocessor_utils.voxelize_point_to_view_correspondences(
        inputs=processed_inputs,
        view_indices_2d_inputs=view_indices_2d_inputs,
        voxels_pad_or_clip_size=voxels_pad_or_clip_size,
        voxel_grid_cell_size=voxel_grid_cell_size)

    # Voxelizing the semantic labels
    preprocessor_utils.voxelize_semantic_labels(
        inputs=processed_inputs,
        voxels_pad_or_clip_size=voxels_pad_or_clip_size,
        voxel_grid_cell_size=voxel_grid_cell_size)

    # Voxelizing the loss weights
    preprocessor_utils.voxelize_property_tensor(
        inputs=processed_inputs,
        point_tensor_key=standard_fields.InputDataFields.point_loss_weights,
        corresponding_voxel_tensor_key=standard_fields.InputDataFields.
        voxel_loss_weights,
        voxels_pad_or_clip_size=voxels_pad_or_clip_size,
        voxel_grid_cell_size=voxel_grid_cell_size,
        segment_func=tf.math.unsorted_segment_max)

    # Voxelizing the object flow
    if standard_fields.InputDataFields.object_flow_points in processed_inputs:
        preprocessor_utils.voxelize_property_tensor(
            inputs=processed_inputs,
            point_tensor_key=standard_fields.InputDataFields.
            object_flow_points,
            corresponding_voxel_tensor_key='object_flow_voxels_max',
            voxels_pad_or_clip_size=voxels_pad_or_clip_size,
            voxel_grid_cell_size=voxel_grid_cell_size,
            segment_func=tf.math.unsorted_segment_max)
        preprocessor_utils.voxelize_property_tensor(
            inputs=processed_inputs,
            point_tensor_key=standard_fields.InputDataFields.
            object_flow_points,
            corresponding_voxel_tensor_key='object_flow_voxels_min',
            voxels_pad_or_clip_size=voxels_pad_or_clip_size,
            voxel_grid_cell_size=voxel_grid_cell_size,
            segment_func=tf.math.unsorted_segment_min)
        processed_inputs[standard_fields.InputDataFields.
                         object_flow_voxels] = processed_inputs[
                             'object_flow_voxels_max'] + processed_inputs[
                                 'object_flow_voxels_min']

    if num_frame_to_load > 1:
        mesh_inputs[
            standard_fields.InputDataFields.num_valid_points] = mesh_inputs[
                standard_fields.InputDataFields.num_valid_points_per_frame][0]

    # Filter preprocessed_inputs by output_keys if it is not None.
    if output_keys is not None:
        processed_inputs = {
            k: v
            for k, v in six.iteritems(processed_inputs) if k in output_keys
        }
    return processed_inputs
예제 #16
0
def l2_norm(x, axis=2):
    squared = tf.reduce_sum(tf.square(x), axis=axis, keepdims=True)
    norm = tf.sqrt(tf.maximum(squared, 1e-6))
    return norm
예제 #17
0
def smape(y, y_hat, eps=0.1):
    summ = tf.maximum(tf.abs(y) + tf.abs(y_hat) + eps, 0.5 + eps)
    smape = tf.div(2. * tf.abs(y_hat - y), summ, name="smape")
    return smape