Exemple #1
0
def make_block_pos_features(block_ids):
  """Creates feature with block relative positions in the original document."""
  block_ids_expanded_0 = tf.expand_dims(block_ids, 0)
  x = tf.cast(
      tf.logical_and(
          tf.equal(tf.expand_dims(block_ids, 1), block_ids_expanded_0),
          tf.not_equal(block_ids_expanded_0, 0)), tf.int32)

  # pylint: disable=line-too-long
  # `tf.linalg.band_part(x, -1, 0)` sets to lower triangual part of matrix to 0.
  # See https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/linalg/band_part
  # for more details.
  # pylint: enable=line-too-long
  return tf.reduce_sum(tf.linalg.band_part(x, -1, 0), 1)
    def _match_when_rows_are_non_empty():
      """Performs matching when the rows of similarity matrix are non empty.

      Returns:
        matches:  int32 tensor indicating the row each column matches to.
      """
      # Matches for each column
      matches = tf.argmax(similarity_matrix, 0, output_type=tf.int32)

      # Deal with matched and unmatched threshold
      if self._matched_threshold is not None:
        # Get logical indices of ignored and unmatched columns as tf.int64
        matched_vals = tf.reduce_max(similarity_matrix, 0)
        below_unmatched_threshold = tf.greater(self._unmatched_threshold,
                                               matched_vals)
        between_thresholds = tf.logical_and(
            tf.greater_equal(matched_vals, self._unmatched_threshold),
            tf.greater(self._matched_threshold, matched_vals))

        if self._negatives_lower_than_unmatched:
          matches = self._set_values_using_indicator(matches,
                                                     below_unmatched_threshold,
                                                     -1)
          matches = self._set_values_using_indicator(matches,
                                                     between_thresholds,
                                                     -2)
        else:
          matches = self._set_values_using_indicator(matches,
                                                     below_unmatched_threshold,
                                                     -2)
          matches = self._set_values_using_indicator(matches,
                                                     between_thresholds,
                                                     -1)

      if self._force_match_for_each_row:
        similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
            similarity_matrix)
        force_match_column_ids = tf.argmax(similarity_matrix, 1,
                                           output_type=tf.int32)
        force_match_column_indicators = tf.one_hot(
            force_match_column_ids, depth=similarity_matrix_shape[1])
        force_match_row_ids = tf.argmax(force_match_column_indicators, 0,
                                        output_type=tf.int32)
        force_match_column_mask = tf.cast(
            tf.reduce_max(force_match_column_indicators, 0), tf.bool)
        final_matches = tf.where(force_match_column_mask,
                                 force_match_row_ids, matches)
        return final_matches
      else:
        return matches
Exemple #3
0
    def _is_not_finished(i, unused_alive_seq, alive_log_probs,
                         unused_finished_seq, finished_scores,
                         unused_finished_in_finished, unused_states):
        """Checking termination condition.

    We terminate when we decoded up to decode_length or the lowest scoring item
    in finished has a greater score that the highest prob item in alive divided
    by the max length penalty

    Args:
      i: loop index
      alive_log_probs: probabilities of the beams. [batch_size, beam_size]
      finished_scores: scores for each of these sequences.
        [batch_size, beam_size]

    Returns:
      Bool.
    """
        max_length_penalty = tf.pow(((5. + to_float(decode_length)) / 6.),
                                    alpha)
        # The best possible score of the most likely alive sequence.
        lower_bound_alive_scores = alive_log_probs[:, 0] / max_length_penalty

        if not stop_early:
            # by considering the min score (in the top N beams) we ensure that
            # the decoder will keep decoding until there is at least one beam
            # (in the top N) that can be improved (w.r.t. the alive beams).
            # any unfinished beam will have score -INF - thus the min
            # will always be -INF if there is at least one unfinished beam -
            # which means the bound_is_met condition cannot be true in this case.
            lowest_score_of_finished_in_finished = tf.reduce_min(
                finished_scores)
        else:
            # by taking the max score we only care about the first beam;
            # as soon as this first beam cannot be beaten from the alive beams
            # the beam decoder can stop.
            # similarly to the above, if the top beam is not completed, its
            # finished_score is -INF, thus it will not activate the
            # bound_is_met condition. (i.e., decoder will keep going on).
            # note we need to find the max for every sequence eparately - so, we need
            # to keep the batch dimension (see axis=1)
            lowest_score_of_finished_in_finished = tf.reduce_max(
                finished_scores, axis=1)

        bound_is_met = tf.reduce_all(
            tf.greater(lowest_score_of_finished_in_finished,
                       lower_bound_alive_scores))

        return tf.logical_and(tf.less(i, decode_length),
                              tf.logical_not(bound_is_met))
Exemple #4
0
 def get_candidate_labels(self, candidate_starts, candidate_ends,
                          labeled_starts, labeled_ends, labels):
     # [num_labeled, num_candidates]
     same_start = tf.equal(tf.expand_dims(labeled_starts, 1),
                           tf.expand_dims(candidate_starts, 0))
     # [num_labeled, num_candidates]
     same_end = tf.equal(tf.expand_dims(labeled_ends, 1),
                         tf.expand_dims(candidate_ends, 0))
     # [num_labeled, num_candidates]
     same_span = tf.logical_and(same_start, same_end)
     candidate_labels = tf.matmul(tf.expand_dims(
         labels, 0), tf.to_int32(same_span))  # [1, num_candidates]
     candidate_labels = tf.squeeze(candidate_labels, 0)  # [num_candidates]
     return candidate_labels
def pad_to_bounding_box(image, offset_height, offset_width, target_height,
                        target_width, pad_value):
    """Pads the given image with the given pad_value.

  Works like tf.image.pad_to_bounding_box, except it can pad the image
  with any given arbitrary pad value and also handle images whose sizes are not
  known during graph construction.
  Args:
    image: 3-D tensor with shape [height, width, channels]
    offset_height: Number of rows of zeros to add on top.
    offset_width: Number of columns of zeros to add on the left.
    target_height: Height of output image.
    target_width: Width of output image.
    pad_value: Value to pad the image tensor with.

  Returns:
    3-D tensor of shape [target_height, target_width, channels].
  Raises:
    ValueError: If the shape of image is incompatible with the offset_* or
    target_* arguments.
  """
    image_rank = tf.rank(image)
    image_rank_assert = tf.Assert(
        tf.equal(image_rank, 3),
        ['Wrong image tensor rank [Expected] [Actual]', 3, image_rank])
    with tf.control_dependencies([image_rank_assert]):
        image -= pad_value
    image_shape = tf.shape(image)
    height, width = image_shape[0], image_shape[1]
    target_width_assert = tf.Assert(tf.greater_equal(target_width, width),
                                    ['target_width must be >= width'])
    target_height_assert = tf.Assert(tf.greater_equal(target_height, height),
                                     ['target_height must be >= height'])
    with tf.control_dependencies([target_width_assert]):
        after_padding_width = target_width - offset_width - width
    with tf.control_dependencies([target_height_assert]):
        after_padding_height = target_height - offset_height - height
    offset_assert = tf.Assert(
        tf.logical_and(tf.greater_equal(after_padding_width, 0),
                       tf.greater_equal(after_padding_height, 0)),
        ['target size not possible with the given target offsets'])

    height_params = tf.stack([offset_height, after_padding_height])
    width_params = tf.stack([offset_width, after_padding_width])
    channel_params = tf.stack([0, 0])
    with tf.control_dependencies([offset_assert]):
        paddings = tf.stack([height_params, width_params, channel_params])
    padded = tf.pad(image, paddings)
    return padded + pad_value
  def _continue_search(self, state):
    """Return whether to continue the search loop.

    The loops should terminate when
      1) when decode length has been reached, or
      2) when the worst score in the finished sequences is better than the best
         score in the alive sequences (i.e. the finished sequences are provably
         unchanging)

    Args:
      state: A dictionary with the current loop state.

    Returns:
      Bool tensor with value True if loop should continue, False if loop should
      terminate.
    """
    i = state[_StateKeys.CUR_INDEX]
    alive_log_probs = state[_StateKeys.ALIVE_LOG_PROBS]
    finished_scores = state[_StateKeys.FINISHED_SCORES]
    finished_flags = state[_StateKeys.FINISHED_FLAGS]

    not_at_max_decode_length = tf.less(i, self.max_decode_length)

    # Calculate largest length penalty (the larger penalty, the better score).
    max_length_norm = _length_normalization(self.alpha, self.max_decode_length,
                                            dtype=self.dtype)
    # Get the best possible scores from alive sequences.
    best_alive_scores = alive_log_probs[:, 0] / max_length_norm

    # Compute worst score in finished sequences for each batch element
    finished_scores *= tf.cast(finished_flags,
                               self.dtype)  # set filler scores to zero
    lowest_finished_scores = tf.reduce_min(finished_scores, axis=1)

    # If there are no finished sequences in a batch element, then set the lowest
    # finished score to -INF for that element.
    finished_batches = tf.reduce_any(finished_flags, 1)
    lowest_finished_scores += ((1.0 -
                                tf.cast(finished_batches, self.dtype)) *
                               -inf(self.dtype))

    worst_finished_score_better_than_best_alive_score = tf.reduce_all(
        tf.greater(lowest_finished_scores, best_alive_scores)
    )

    return tf.logical_and(
        not_at_max_decode_length,
        tf.logical_not(worst_finished_score_better_than_best_alive_score)
    )
Exemple #7
0
    def make_counter():
        with tf.variable_scope("counter", reuse=tf.AUTO_REUSE, use_resource=True):
            itr_counter = tf.get_variable("iterations", [], tf.int32, trainable=False)
            increment_counter = tf.assign_add(itr_counter, 1)
            mod_itrs = tf.math.floormod(increment_counter, iterations_per_step)
            last_itr = tf.equal(mod_itrs, 0, name="last_update_itr")

            # Add accumulation counter if pipelined
            if opts.pipeline:
                grad_counter = internal_ops.get_current_iteration_counter()
                last_grad_itr = tf.equal(grad_counter, opts.gradient_accumulation_count-1, name="last_grad_itr")

                last_itr = tf.logical_and(last_itr, last_grad_itr, name="last_itr")

        return last_itr
Exemple #8
0
    def filter_by_num_objects(self, d):
        if "visibility" not in d:
            return tf.constant(True)
        min_num_objects = self.max_num_objects or 0
        max_num_objects = self.max_num_objects or 6

        min_predicate = tf.greater_equal(
            tf.reduce_sum(d["visibility"]),
            tf.constant(min_num_objects - 1e-5, dtype=tf.float32),
        )
        max_predicate = tf.less_equal(
            tf.reduce_sum(d["visibility"]),
            tf.constant(max_num_objects + 1e-5, dtype=tf.float32),
        )
        return tf.logical_and(min_predicate, max_predicate)
Exemple #9
0
def verb_refs_to_lengths(task, verb_refs, include_eos=True):
    """Computes the length of a sequence."""
    eos_positions = tf.to_int32(
        tf.expand_dims(tf.where(tf.equal(task, 1))[:, 1], 1))
    seq_mask = tf.logical_not(
        tf.cast(
            tf.cumsum(tf.to_int32(
                tf.logical_and(tf.equal(verb_refs[:, :, 0], eos_positions),
                               tf.equal(verb_refs[:, :, 1],
                                        eos_positions + 1))),
                      axis=-1), tf.bool))
    lengths = tf.reduce_sum(tf.to_float(seq_mask), axis=-1)
    if include_eos:
        lengths = lengths + 1
    return lengths
Exemple #10
0
def _clamp_and_filter_result(pixel_x, pixel_y, z):
  """Clamps and masks out out-of-bounds pixel coordinates.

  Args:
    pixel_x: a tf.Tensor containing x pixel coordinates in an image.
    pixel_y: a tf.Tensor containing y pixel coordinates in an image.
    z: a tf.Tensor containing the depth ar each (pixel_y, pixel_x)  All shapes
      are [B, H, W].

  Returns:
    pixel_x, pixel_y, mask, where pixel_x and pixel_y are the original ones,
    except:
    - Values that fall out of the image bounds, which are [0, W-1) in x and
      [0, H-1) in y, are clamped to the bounds
    - NaN values in pixel_x, pixel_y are replaced by zeros
    mask is False at allpoints where:
    - Clamping in pixel_x or pixel_y was performed
    - NaNs were replaced by zeros
    - z is non-positive,
    and True everywhere else, that is, where pixel_x, pixel_y are finite and
    fall within the frame.
  """
  with tf.name_scope('Clamp', values=[pixel_x, pixel_y, z]):
    _, height, width = tf.unstack(tf.shape(pixel_x))

    def _tensor(x):
      return tf.to_float(tf.convert_to_tensor(x))

    x_not_underflow = pixel_x >= 0.0
    y_not_underflow = pixel_y >= 0.0
    x_not_overflow = pixel_x < _tensor(width - 1)
    y_not_overflow = pixel_y < _tensor(height - 1)
    z_positive = z > 0.0
    x_not_nan = tf.math.logical_not(tf.is_nan(pixel_x))
    y_not_nan = tf.math.logical_not(tf.is_nan(pixel_y))
    not_nan = tf.logical_and(x_not_nan, y_not_nan)
    not_nan_mask = tf.to_float(not_nan)
    pixel_x *= not_nan_mask
    pixel_y *= not_nan_mask
    pixel_x = tf.clip_by_value(pixel_x, 0.0, _tensor(width - 1))
    pixel_y = tf.clip_by_value(pixel_y, 0.0, _tensor(height - 1))
    mask_stack = tf.stack([
        x_not_underflow, y_not_underflow, x_not_overflow, y_not_overflow,
        z_positive, not_nan
    ],
                          axis=0)
    mask = tf.reduce_all(mask_stack, axis=0)
    return pixel_x, pixel_y, mask
Exemple #11
0
def prune_small_boxes(boxlist, min_side, scope=None):
    """Prunes small boxes in the boxlist which have a side smaller than min_side.

  Args:
    boxlist: BoxList holding N boxes.
    min_side: Minimum width AND height of box to survive pruning.
    scope: name scope.

  Returns:
    A pruned boxlist.
  """
    with tf.name_scope(scope, 'PruneSmallBoxes'):
        height, width = height_width(boxlist)
        is_valid = tf.logical_and(tf.greater_equal(width, min_side),
                                  tf.greater_equal(height, min_side))
        return gather(boxlist, tf.reshape(tf.where(is_valid), [-1]))
def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1):
    """Asserts the input box tensor is normalized.
  Args:
    boxes: a tensor of shape [N, 4] where N is the number of boxes.
    maximum_normalized_coordinate: Maximum coordinate value to be considered
      as normalized, default to 1.1.
  Returns:
    a tf.Assert op which fails when the input box tensor is not normalized.
  Raises:
    ValueError: When the input box tensor is not normalized.
  """
    box_minimum = tf.reduce_min(boxes)
    box_maximum = tf.reduce_max(boxes)
    return tf.Assert(
        tf.logical_and(
            tf.less_equal(box_maximum, maximum_normalized_coordinate),
            tf.greater_equal(box_minimum, 0)), [boxes])
def randomly_replace_msa_with_unknown(protein, replace_proportion):
    """Replace a proportion of the MSA with 'X'."""
    msa_mask = (tf.random.uniform(shape_helpers.shape_list(protein['msa'])) <
                replace_proportion)
    x_idx = 20
    gap_idx = 21
    msa_mask = tf.logical_and(msa_mask, protein['msa'] != gap_idx)
    protein['msa'] = tf.where(msa_mask,
                              tf.ones_like(protein['msa']) * x_idx,
                              protein['msa'])
    aatype_mask = (tf.random.uniform(
        shape_helpers.shape_list(protein['aatype'])) < replace_proportion)

    protein['aatype'] = tf.where(aatype_mask,
                                 tf.ones_like(protein['aatype']) * x_idx,
                                 protein['aatype'])
    return protein
Exemple #14
0
def mask_attention(attention, seq_len1, seq_len2):
    """Masks an attention matrix.

  Args:
    attention: <tf.float32>[batch, seq_len1, seq_len2]
    seq_len1: <tf.int32>[batch]
    seq_len2: <tf.int32>[batch]

  Returns:
    the masked scores <tf.float32>[batch, seq_len1, seq_len2]
  """
    dim1 = tensor_utils.shape(attention, 1)
    dim2 = tensor_utils.shape(attention, 2)
    m1 = tf.sequence_mask(seq_len1, dim1)
    m2 = tf.sequence_mask(seq_len2, dim2)
    joint_mask = tf.logical_and(tf.expand_dims(m1, 2), tf.expand_dims(m2, 1))
    return ops.mask_logits(attention, joint_mask)
Exemple #15
0
    def add_loss(self, separated_waveforms):
        """Add loss for given separated_waveforms."""
        # Permute separated to match references through self.loss_fns.
        _, separated_waveforms = groupwise.apply(self.loss_fns,
                                                 self.signal_types,
                                                 self.source_waveforms,
                                                 separated_waveforms,
                                                 self.unique_signal_types)
        separated_waveforms_nonzero = tf.boolean_mask(
            separated_waveforms, self.source_is_nonzero)[:, tf.newaxis]
        separated_waveforms_zero = tf.boolean_mask(
            separated_waveforms, self.source_is_zero)[:, tf.newaxis]
        # Use eventual loss function as log_mse_loss.
        # Loss for zero references only if self.loss_zero_ref_weight is nonzero.
        if self.loss_zero_ref_weight:

            def loss_zero_fn():
                return tf.reduce_sum(
                    self.loss_weights_zero *
                    log_mse_loss(self.source_waveforms_zero,
                                 separated_waveforms_zero,
                                 max_snr=self.max_snr_for_zero_sources,
                                 bias_ref_signal=self.mixture_waveforms_zero))

            calc_losses_zero_ref = tf.logical_and(
                self.count_zero > 0, self.count_zero < self.total_count)
            loss_zero = tf.cond(calc_losses_zero_ref, loss_zero_fn,
                                lambda: 0.0)
            loss_weight = self.loss_zero_ref_weight / self.total_count
            loss_zero = tf.identity(loss_weight * loss_zero,
                                    name='loss_ref_zero')
            tf.losses.add_loss(loss_zero)

        # Loss for nonzero references.
        def loss_nonzero_fn():
            return tf.reduce_sum(self.loss_weights_nonzero *
                                 log_mse_loss(self.source_waveforms_nonzero,
                                              separated_waveforms_nonzero,
                                              max_snr=self.max_snr))

        loss_nonzero = tf.cond(self.count_nonzero > 0.0, loss_nonzero_fn,
                               lambda: 0.0)
        loss_nonzero = tf.identity(loss_nonzero / self.total_count,
                                   name='loss_ref_nonzero')
        tf.losses.add_loss(loss_nonzero)
        return separated_waveforms
Exemple #16
0
def _weights_for_num_sources(source_waveforms,
                             num_sources,
                             consider_as_zero=None):
    """Return shape (batch, source) weights for examples with num_sources."""
    source_norms = tf.sqrt(tf.reduce_mean(tf.square(source_waveforms),
                                          axis=-1))
    max_sources = signal_util.static_or_dynamic_dim_size(source_waveforms, 1)
    consider_nonzero = tf.greater(source_norms, 1e-8)
    if consider_as_zero is not None:
        consider_nonzero = tf.logical_and(consider_nonzero,
                                          tf.logical_not(consider_as_zero))
    num_sources_per_example = tf.reduce_sum(tf.cast(consider_nonzero,
                                                    tf.float32),
                                            axis=1,
                                            keepdims=True)
    has_num_sources = tf.equal(num_sources_per_example, num_sources)
    return tf.tile(has_num_sources, (1, max_sources))
Exemple #17
0
def _calculate_aggregate_mask(answer, output_layer_aggregation,
                              output_bias_agg, output_weights_agg,
                              cell_select_pref, label_ids):
    """Finds examples where the model should select cells with no aggregation.

  Returns a mask that determines for which examples should the model select
  answers directly from the table, without any aggregation function. If the
  answer is a piece of text the case is unambiguous as aggregation functions
  only apply to numbers. If the answer is a number but does not appear in the
  table then we must use some aggregation case. The ambiguous case is when the
  answer is a number that also appears in the table. In this case we use the
  aggregation function probabilities predicted by the model to decide whether
  to select or aggregate. The threshold for this is a hyperparameter
  `cell_select_pref`.

  Args:
    answer: <float32>[batch_size]
    output_layer_aggregation: <float32>[batch_size, hidden_size]
    output_bias_agg: <float32>[num_aggregation_labels]
    output_weights_agg: <float32>[num_aggregation_labels, hidden_size_agg]
    cell_select_pref: Preference for cell selection in ambiguous cases.
    label_ids: <int32>[batch_size, seq_length]

  Returns:
    aggregate_mask: <float32>[batch_size] A mask set to 1 for examples that
      should use aggregation functions.
  """
    # <float32>[batch_size]
    aggregate_mask_init = tf.cast(tf.logical_not(tf.is_nan(answer)),
                                  tf.float32)
    logits_aggregation = _calculate_aggregation_logits(
        output_layer_aggregation, output_weights_agg, output_bias_agg)
    dist_aggregation = tfp.distributions.Categorical(logits=logits_aggregation)
    aggregation_ops_total_mass = tf.reduce_sum(
        _get_probs(dist_aggregation)[:, 1:], axis=1)
    # Cell selection examples according to current model.
    is_pred_cell_selection = aggregation_ops_total_mass <= cell_select_pref
    # Examples with non-empty cell selection supervision.
    is_cell_supervision_available = tf.reduce_sum(label_ids, axis=1) > 0
    aggregate_mask = tf.where(
        tf.logical_and(is_pred_cell_selection, is_cell_supervision_available),
        tf.zeros_like(aggregate_mask_init, dtype=tf.float32),
        aggregate_mask_init)
    aggregate_mask = tf.stop_gradient(aggregate_mask)
    return aggregate_mask
def _crop(image, offset_height, offset_width, crop_height, crop_width):
    """Crops the given image using the provided offsets and sizes.

  Note that the method doesn't assume we know the input image size but it does
  assume we know the input image rank.
  Args:
    image: an image of shape [height, width, channels].
    offset_height: a scalar tensor indicating the height offset.
    offset_width: a scalar tensor indicating the width offset.
    crop_height: the height of the cropped image.
    crop_width: the width of the cropped image.

  Returns:
    The cropped (and resized) image.
  Raises:
    ValueError: if `image` doesn't have rank of 3.
    InvalidArgumentError: if the rank is not 3 or if the image dimensions are
      less than the crop size.
  """
    original_shape = tf.shape(image)

    if len(image.get_shape().as_list()) != 3:
        raise ValueError('input must have rank of 3')
    original_channels = image.get_shape().as_list()[2]

    rank_assertion = tf.Assert(tf.equal(tf.rank(image), 3),
                               ['Rank of image must be equal to 3.'])
    with tf.control_dependencies([rank_assertion]):
        cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])

    size_assertion = tf.Assert(
        tf.logical_and(tf.greater_equal(original_shape[0], crop_height),
                       tf.greater_equal(original_shape[1], crop_width)),
        ['Crop size greater than the image size.'])

    offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

    # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
    # define the crop size.
    with tf.control_dependencies([size_assertion]):
        image = tf.slice(image, offsets, cropped_shape)
    image = tf.reshape(image, cropped_shape)
    image.set_shape([crop_height, crop_width, original_channels])
    return image
def metrics(expected_box_encodings, expected_scores, actual_box_encodings,
            actual_scores):
    """Calculate metrics from expected and actual blazeface outputs.

  Args:
    expected_box_encodings: box encodings from model
    expected_scores: classifications from model
    actual_box_encodings: golden box encodings
    actual_scores: golden classifications

  Returns:
    two-item list with classification error and localization error
  """
    squashed_expected_scores = tf.math.divide(
        1.0, 1.0 + tf.math.exp(-expected_scores))
    squashed_actual_scores = tf.math.divide(1.0,
                                            1.0 + tf.math.exp(-actual_scores))
    kld_metric = kl_divergence.symmetric_kl_divergence(expected_scores,
                                                       actual_scores)
    # ML Kit uses 0.5 as the threshold. We use
    # 0.1 to use more possible boxes based on experimentation with the model.
    high_scoring_indices = tf.math.logical_or(
        tf.math.greater(squashed_expected_scores, 0.1),
        tf.math.greater(squashed_actual_scores, 0.1))

    high_scoring_actual_boxes = tf.where(condition=tf.broadcast_to(
        input=high_scoring_indices, shape=tf.shape(actual_box_encodings)),
                                         x=actual_box_encodings,
                                         y=expected_box_encodings)
    box_diff = high_scoring_actual_boxes - expected_box_encodings
    box_squared_diff = tf.math.pow(box_diff, 2)
    # MSE is calculated over the high-scoring boxes.
    box_mse = tf.divide(
        tf.math.reduce_sum(box_squared_diff),
        tf.math.maximum(
            tf.math.count_nonzero(high_scoring_indices, dtype=tf.float32),
            1.0))
    # Thresholds were determined experimentally by running validation on a variety
    # of devices. Known good devices give KLD ~10-e7 and MSE ~10-e12. A buggy
    # NNAPI implementation gives KLD > 200 and MSE > 100.
    ok = tf.logical_and(kld_metric < 0.1, box_mse < 0.01)

    return [kld_metric, box_mse, ok]
Exemple #20
0
  def apply_increasing_monotonic_fn(self, wrapper, fn, *args, **parameters):
    if fn.__name__ != 'relu':
      # Fallback to regular interval bound propagation for unsupported
      # operations.
      logging.warn('"%s" is not supported by RelativeSymbolicBounds. '
                   'Fallback on RelativeIntervalBounds.', fn.__name__)
      interval_bounds = relative_bounds.RelativeIntervalBounds.convert(self)
      converted_args = [relative_bounds.RelativeIntervalBounds.convert(b)
                        for b in args]
      interval_bounds = interval_bounds._increasing_monotonic_fn(  # pylint: disable=protected-access
          fn, *converted_args)
      return self.convert(interval_bounds)

    concrete = self.concretize()
    lb, ub = concrete.lower_offset, concrete.upper_offset
    is_ambiguous = tf.logical_and(ub > -self._nominal, lb < -self._nominal)
    # Ensure denominator is always positive, even when not needed.
    ambiguous_denom = tf.where(is_ambiguous, ub - lb, tf.ones_like(ub))
    scale = tf.where(
        is_ambiguous, (self._nominal + ub) / ambiguous_denom,
        tf.where(lb >= -self._nominal, tf.ones_like(lb), tf.zeros_like(lb)))
    scale_complement = tf.where(
        is_ambiguous, -(self._nominal + lb) / ambiguous_denom,
        tf.where(lb >= -self._nominal, tf.zeros_like(lb), tf.ones_like(lb)))
    # Need lb_out.b = scale * (nom_in + lb_in.b) - nom_out
    # and ub_out.b = scale * (nom_in + ub_in.b - min(nom_in + lb, 0)) - nom_out
    lower_bias = (scale * (tf.minimum(self._nominal, 0.)) +
                  scale_complement * tf.minimum(-self._nominal, 0.))
    upper_bias = (scale * tf.maximum(tf.minimum(-self._nominal, 0.) - lb,
                                     tf.minimum(self._nominal, 0.)) +
                  scale_complement * tf.minimum(-self._nominal, 0.))
    lb_out = LinearExpression(
        w=tf.expand_dims(scale, 1) * self.lower.w,
        b=scale * self.lower.b + lower_bias,
        lower=self.lower.lower, upper=self.lower.upper)
    ub_out = LinearExpression(
        w=tf.expand_dims(scale, 1) * self.upper.w,
        b=scale * self.upper.b + upper_bias,
        lower=self.upper.lower, upper=self.upper.upper)

    nominal_out = tf.nn.relu(self._nominal)
    return RelativeSymbolicBounds(
        lb_out, ub_out, nominal_out).with_priors(wrapper.output_bounds)
def pano_forwards_condition(trip):
    """Checks if a pano is in a forward condition."""
    ref_pose = trip.pose[1, :, :]
    pano_pose = trip.pose[3, :, :]
    ref_twds = -1.0 * ref_pose[:, 2]

    # make sure max_depth>forward motion>median_depth
    t_vec = pano_pose[:, 3] - ref_pose[:, 3]
    ref_depth = trip.depth[1, :, :, 0]
    ref_depth = tf.where(tf.equal(ref_depth, 0.0),
                         tf.reduce_max(ref_depth) * tf.ones_like(ref_depth),
                         ref_depth)
    max_depth = tf.reduce_max(ref_depth)
    median_depth = tf.contrib.distributions.percentile(ref_depth, 0.5)

    min_depth_cond = tf.greater(tf.reduce_sum(ref_twds * t_vec), median_depth)
    max_depth_cond = tf.less(tf.reduce_sum(ref_twds * t_vec), max_depth)

    return tf.logical_and(min_depth_cond, max_depth_cond)
def check_min_image_dim(min_dim, image_tensor):
    """Checks that the image width/height are greater than some number.

    This function is used to check that the width and height of an image are above
    a certain value. If the image shape is static, this function will perform the
    check at graph construction time. Otherwise, if the image shape varies, an
    Assertion control dependency will be added to the graph.

    Args:
      min_dim: The minimum number of pixels along the width and height of the
               image.
      image_tensor: The image tensor to check size for.

    Returns:
      If `image_tensor` has dynamic size, return `image_tensor` with a Assert
      control dependency. Otherwise returns image_tensor.

    Raises:
      ValueError: if `image_tensor`'s' width or height is smaller than `min_dim`.
    """
    image_shape = image_tensor.get_shape()
    image_height = static_shape.get_height(image_shape)
    image_width = static_shape.get_width(image_shape)
    if image_height is None or image_width is None:
        shape_assert = tf.Assert(
            tf.logical_and(
                tf.greater_equal(tf.shape(image_tensor)[1], min_dim),
                tf.greater_equal(tf.shape(image_tensor)[2], min_dim),
            ),
            [
                "image size must be >= {} in both height and width.".format(
                    min_dim)
            ],
        )
        with tf.control_dependencies([shape_assert]):
            return tf.identity(image_tensor)

    if image_height < min_dim or image_width < min_dim:
        raise ValueError(
            "image size must be >= %d in both height and width; image dim = %d,%d"
            % (min_dim, image_height, image_width))

    return image_tensor
Exemple #23
0
        def body_fn(i, partial, outputs):
            """Body function for while_loop.

      Args:
        i: integer scalar
        partial: dictionary of Tensor (partially-constructed example)
        outputs: dictionary of TensorArray
      Returns:
        A triple containing the new values of the inputs.
      """
            can_append = True
            one_example = {}
            for k in keys:
                val = tf.cast(x[k][i], tf.int32)
                val = val[:tf.
                          reduce_sum(tf.cast(tf.not_equal(val, 0), tf.int32))]
                one_example[k] = val
            for k in keys:
                can_append = tf.logical_and(
                    can_append,
                    tf.less_equal(
                        tf.size(partial[k]) + tf.size(one_example[k]),
                        length[k]))

            def false_fn():
                return write_packed_example(partial, outputs)

            def true_fn():
                return partial, outputs

            partial, outputs = tf.cond(can_append, true_fn, false_fn)
            new_partial = {}
            for k in keys:
                new_seq = one_example[k][:length[k]]
                new_seq_len = tf.size(new_seq)
                new_partial[k] = tf.concat([partial[k], new_seq], 0)
                new_partial[k + "_position"] = tf.concat([
                    partial[k + "_position"],
                    tf.range(new_seq_len, dtype=tf.int32)
                ], 0)
            partial = new_partial
            return i + 1, partial, outputs
Exemple #24
0
def _get_anchor_positive_triplet_mask(labels):
    """Return a 2D mask where mask[a, p] is True iff a and p are distinct and have same label.
    Args:
        labels: tf.int32 `Tensor` with shape [batch_size]
    Returns:
        mask: tf.bool `Tensor` with shape [batch_size, batch_size]
    """
    # Check that i and j are distinct
    indices_equal = tf.cast(tf.eye(tf.shape(labels)[0]), tf.bool)
    indices_not_equal = tf.logical_not(indices_equal)

    # Check if labels[i] == labels[j]
    # Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
    labels_equal = tf.equal(tf.expand_dims(labels, 0),
                            tf.expand_dims(labels, 1))

    # Combine the two masks
    mask = tf.logical_and(indices_not_equal, labels_equal)

    return mask
Exemple #25
0
def crop_image(x, offset_height, offset_width, crop_height, crop_width):
    """Crops the given image using the provided offsets and sizes."""
    original_shape = tf.shape(x)

    rank_assertion = tf.Assert(tf.equal(tf.rank(x), 3),
                               ['Rank of image must be equal to 3.'])
    with tf.control_dependencies([rank_assertion]):
        cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])

    size_assertion = tf.Assert(
        tf.logical_and(tf.greater_equal(original_shape[0], crop_height),
                       tf.greater_equal(original_shape[1], crop_width)),
        ['Crop size greater than the image size.'])

    offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

    with tf.control_dependencies([size_assertion]):
        x = tf.slice(x, offsets, cropped_shape)
        x = tf.reshape(x, cropped_shape)
    return x
        def _match_when_rows_are_non_empty():

            matches = tf.argmax(similarity_matrix, 0, output_type=tf.int32)

            if self._matched_threshold is not None:
                matched_vals = tf.reduce_max(similarity_matrix, 0)
                below_unmatched_threshold = tf.greater(
                    self._unmatched_threshold, matched_vals)
                between_thresholds = tf.logical_and(
                    tf.greater_equal(matched_vals, self._unmatched_threshold),
                    tf.greater(self._matched_threshold, matched_vals))

                if self._negatives_lower_than_unmatched:
                    matches = self._set_values_using_indicator(
                        matches, below_unmatched_threshold, -1)
                    matches = self._set_values_using_indicator(
                        matches, between_thresholds, -2)
                else:
                    matches = self._set_values_using_indicator(
                        matches, below_unmatched_threshold, -2)
                    matches = self._set_values_using_indicator(
                        matches, between_thresholds, -1)

            if self._force_match_for_each_row:
                similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
                    similarity_matrix)
                force_match_column_ids = tf.argmax(similarity_matrix,
                                                   1,
                                                   output_type=tf.int32)
                force_match_column_indicators = tf.one_hot(
                    force_match_column_ids, depth=similarity_matrix_shape[1])
                force_match_row_ids = tf.argmax(force_match_column_indicators,
                                                0,
                                                output_type=tf.int32)
                force_match_column_mask = tf.cast(
                    tf.reduce_max(force_match_column_indicators, 0), tf.bool)
                final_matches = tf.where(force_match_column_mask,
                                         force_match_row_ids, matches)
                return final_matches
            else:
                return matches
    def _extract_proposal_features(self, preprocessed_inputs, scope):
        """Extracts first stage RPN features.

    Args:
      preprocessed_inputs: A [batch, height, width, channels] float32 tensor
        representing a batch of images.
      scope: A scope name.

    Returns:
      rpn_feature_map: A tensor with shape [batch, height, width, depth]
      activations: A dictionary mapping feature extractor tensor names to
        tensors

    Raises:
      InvalidArgumentError: If the spatial size of `preprocessed_inputs`
        (height or width) is less than 33.
      ValueError: If the created network is missing the required activation.
    """

        preprocessed_inputs.get_shape().assert_has_rank(4)
        shape_assert = tf.Assert(
            tf.logical_and(
                tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),
                tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),
            ['image size must at least be 33 in both height and width.'])

        with tf.control_dependencies([shape_assert]):
            with tf.variable_scope('InceptionV2',
                                   reuse=self._reuse_weights) as scope:
                with _batch_norm_arg_scope(
                    [slim.conv2d, slim.separable_conv2d],
                        batch_norm_scale=True,
                        train_batch_norm=self._train_batch_norm):
                    _, activations = inception_v2.inception_v2_base(
                        preprocessed_inputs,
                        final_endpoint='Mixed_4e',
                        min_depth=self._min_depth,
                        depth_multiplier=self._depth_multiplier,
                        scope=scope)

        return activations['Mixed_4e'], activations
Exemple #28
0
def one_hot_segments(start_indices, end_indices, num_samples):
    """Returns a one-hot, float matrix of segments at each timestep.

  All integers in the inclusive range of start_indices and end_indices are used.
  This allows start and end timestamps to be mapped to the same index and the
  segment will not be omitted.

  Args:
    start_indices: a 1d tensor of integer indices for the start of each
      segement.
    end_indices: a tensor of integer indices for the end of each segment.
      Must be the same shape as start_indices. Values should be >= start_indices
      but not strictly enforced.
    num_samples: the number of rows in the output. Indices should be <
      num_samples, but this is not strictly enforced.
  Returns:
    (segments, indicator)
    segments: A [num_samples, num_elements(start_indices)] tensor where in each
      column the rows with indices >= start_indices[column] and
      <= end_indices[column] are 1.0 and all other values are 0.0.
    indicator: a tensor of 1.0 values with shape [num_samples, 1]. If padded
      with zeros to align sizes, the indicator marks where segments is valid.
  """
    start_indices = tf.convert_to_tensor(start_indices)
    end_indices = tf.convert_to_tensor(end_indices)
    start_indices.shape.assert_is_compatible_with(end_indices.shape)
    start_indices.shape.assert_has_rank(1)
    end_indices.shape.assert_has_rank(1)
    # create a matrix of the index at each row with a column per segment.
    indices = tf.to_int64(
        tf.tile(tf.transpose(tf.expand_dims(tf.range(num_samples), 0)),
                [1, tf.shape(start_indices)[0]]))
    # switch to one hot encoding of segments (includes start and end indices)
    segments = tf.to_float(
        tf.logical_and(tf.greater_equal(indices, start_indices),
                       tf.less_equal(indices, end_indices)))
    # create a tensors of ones everywhere there's an annotation. If padded with
    # zeros later, element-wise multiplication of the loss will mask out the
    # padding.
    indicator = tf.ones(shape=[num_samples, 1], dtype=tf.float32)
    return segments, indicator
Exemple #29
0
def _weights_for_nonzero_refs(source_waveforms, consider_as_zero=None):
    """Return shape (batch, source) weights for signals that are nonzero.

  Args:
    source_waveforms: A tensor (batch, source, samples), dtype=tf.float32.
    consider_as_zero: An optional tensor (batch, source), dtype=tf.bool which
      indicates some entries as being zero even if they are not exactly zero.
      This can be used to indicate some source types (e.g. noise) as being
      zero regardless of their norm.
  Returns:
    consider_nonzero: A tensor (batch, source), dtype=tf.bool of sources
      that will be considered as non-zero based on their norm being greater than
      1e-8 or they are not in consider_as_zero array if given.
  """
    source_norms = tf.sqrt(tf.reduce_mean(tf.square(source_waveforms),
                                          axis=-1))
    consider_nonzero = tf.greater(source_norms, 1e-8)
    if consider_as_zero is not None:
        consider_nonzero = tf.logical_and(consider_nonzero,
                                          tf.logical_not(consider_as_zero))
    return consider_nonzero
def resize_im(image, image_size, pad_val, channels, features=None):
    """Decodes and resizes the image.

  Args:
    image: Image to resize.
    image_size: The desired max image size.
    pad_val: The value to pad with.
    channels: The number of channels in the image.
    features: Other features to resize.

  Returns:
    Resized image with possible padded regions,
    and possibly the resized elements boxes.
  """
    [height, width, got_channels] = preprocess_utils.resolve_shape(image,
                                                                   rank=3)

    new_height, new_width = get_resize_dim(height, width, image_size)

    image = tf.reshape(image, [height, width, -1])
    image = tf.cond(
        tf.logical_and(channels == 3, tf.equal(got_channels, 1)),
        true_fn=lambda: tf.image.grayscale_to_rgb(image),
        false_fn=lambda: image,
    )

    image = tf.image.resize_images(image, [new_height, new_width])

    image = preprocess_utils.pad_to_bounding_box(image, 0, 0, image_size,
                                                 image_size, pad_val)
    if features is not None:
        width, height = tf.to_float(width), tf.to_float(height)
        max_dim = tf.to_float(tf.maximum(width, height))
        features[ELEMENTS_BOX_ID] = features[ELEMENTS_BOX_ID] / max_dim
        if GROUNDTRUTH_XMIN_ID in features:
            features[GROUNDTRUTH_XMIN_ID] *= width / max_dim
            features[GROUNDTRUTH_XMAX_ID] *= width / max_dim
            features[GROUNDTRUTH_YMIN_ID] *= height / max_dim
            features[GROUNDTRUTH_YMAX_ID] *= height / max_dim
    return image