示例#1
0
 def f(example: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:
     input_list = [example[k] for k in preprocess_input_names]
     output_types = [v for k, v in preprocess_output_types.items()]
     output_list = tf.numpy_function(preprocess_with_list_input_fn,
                                     input_list, output_types)
     output_dict = {
         k: v
         for k, v in zip(preprocess_output_types.keys(), output_list)
     }
     return output_dict
    def tf_set_priority(self, indices, priorities):
        """Sets the priorities for the given indices.

    Args:
      indices: tf.Tensor with dtype int32 and shape [n].
      priorities: tf.Tensor with dtype float and shape [n].

    Returns:
       A tf op setting the priorities for prioritized sampling.
    """
        return tf.numpy_function(
            self.memory.set_priority, [indices, priorities], [],
            name='prioritized_replay_set_priority_py_func')
    def tf_get_priority(self, indices):
        """Gets the priorities for the given indices.

    Args:
      indices: tf.Tensor with dtype int32 and shape [n].

    Returns:
      priorities: tf.Tensor with dtype float and shape [n], the priorities at
        the indices.
    """
        return tf.numpy_function(
            self.memory.get_priority, [indices],
            tf.float32,
            name='prioritized_replay_get_priority_py_func')
示例#4
0
def compress(summary, epsilon):
  """Compress a summary to within `epsilon` accuracy.

  The compression step is needed to keep the summary sizes small after merging,
  and also used to return the final target boundaries. It finds the new bins
  based on interpolating cumulative weight percentages from the large summary.
  Taking the difference of the cumulative weights from the previous bin's
  cumulative weight will give the new weight for that bin.

  Args:
      summary: 2D `np.ndarray` summary to be compressed.
      epsilon: A `'float32'` that determines the approxmiate desired precision.

  Returns:
      A 2D `np.ndarray` that is a compressed summary. First column is the
      interpolated partition values, the second is the weights (counts).
  """
  # TODO(b/184863356): remove the numpy escape hatch here.
  return tf.numpy_function(
      lambda s: _compress_summary_numpy(s, epsilon), [summary], tf.float32)
示例#5
0
        def convert(idx, ex):
            # Create input and target feature sequences
            input_, target_ = task_converter(ex)

            # Tokenize inputs & targets
            output_types = [tf.int64, tf.int64, tf.int64]
            input_ids, attention_mask, token_type_ids = tf.numpy_function(
                py_tokenize_example, [input_], output_types)
            target_ids, _, _ = tf.numpy_function(py_tokenize_example,
                                                 [target_], output_types)

            max_seq_len = self.max_seq_len
            input_ids.set_shape([max_seq_len])
            attention_mask.set_shape([max_seq_len])
            token_type_ids.set_shape([max_seq_len]),
            target_ids.set_shape([max_seq_len])

            # Log first 5 inputs and targets for each dataset
            if idx < LOG_EXAMPLES and decode:
                tf.numpy_function(py_decode_and_log, [idx, 'Input', input_ids],
                                  [])
                tf.numpy_function(py_decode_and_log,
                                  [idx, 'Target', target_ids], [])

            # Prepare input dictionary
            input_dict = {
                'input_ids': input_ids,
                'attention_mask': attention_mask,
                'token_type_ids': token_type_ids,
                'task': dataset,
            }

            if self.config.is_encoder_decoder:
                if train:
                    start_ids = tf.constant(self.config.decoder_start_token_id,
                                            shape=[1],
                                            dtype=tf.int64)
                    decoder_ids = tf.concat([start_ids, target_ids[:-1]],
                                            axis=-1)
                    decoder_ids.set_shape([max_seq_len])
                    input_dict['decoder_input_ids'] = decoder_ids
                else:
                    input_dict['decoder_input_ids'] = input_ids

            # Add an extra dimension to targets to support temporal class weights in tf
            target_ids = tf.expand_dims(target_ids, axis=-1)

            return input_dict, target_ids, attention_mask
def tf_metric_fixed_point(action_cost_matrix, gamma):
    return tf.numpy_function(metric_fixed_point, [action_cost_matrix, gamma],
                             Tout=tf.float32)
示例#7
0
        def _filter_top_k(x):
            # This loses the static shape.
            x = tf.numpy_function(_identity, (x, ), tf.float32)

            return metrics_utils._filter_top_k(x=x, k=2)