def flatten_image(x, label=False):
    if label:
        return (tf.divide(
            tf.dtypes.cast(tf.reshape(x["image"], (1, 28 * 28)), tf.float32),
            256.0), x["label"])
    else:
        return (tf.divide(
            tf.dtypes.cast(tf.reshape(x["image"], (1, 28 * 28)), tf.float32),
            256.0))
예제 #2
0
    def __call__(self, step):
        with tf.name_scope(self.name or "PolynomialDecay") as name:
            initial_learning_rate = tf.convert_to_tensor(
                self.initial_learning_rate, name="initial_learning_rate")
            dtype = initial_learning_rate.dtype
            end_learning_rate = tf.cast(self.end_learning_rate, dtype)
            power = tf.cast(self.power, dtype)

            global_step_recomp = tf.cast(step, dtype)
            decay_steps_recomp = tf.cast(self.decay_steps, dtype)
            if self.cycle:
                # Find the first multiple of decay_steps that is bigger than
                # global_step. If global_step is zero set the multiplier to 1
                multiplier = tf.where(
                    tf.equal(global_step_recomp, 0), 1.0,
                    tf.math.ceil(global_step_recomp / self.decay_steps))
                decay_steps_recomp = tf.multiply(decay_steps_recomp,
                                                 multiplier)
            else:
                # Make sure that the global_step used is not bigger than decay_steps.
                global_step_recomp = tf.minimum(global_step_recomp,
                                                decay_steps_recomp)

            p = tf.divide(global_step_recomp, decay_steps_recomp)
            return tf.add(tf.multiply(
                initial_learning_rate - end_learning_rate,
                tf.pow(1 - p, power)),
                          end_learning_rate,
                          name=name)
예제 #3
0
def read_tensor_from_image_file(file_name,
                                input_height=299,
                                input_width=299,
                                input_mean=0,
                                input_std=255):
    input_name = "file_reader"
    output_name = "normalized"
    file_reader = tf.read_file(file_name, input_name)
    if file_name.endswith(".png"):
        image_reader = tf.image.decode_png(file_reader,
                                           channels=3,
                                           name='png_reader')
    elif file_name.endswith(".gif"):
        image_reader = tf.squeeze(
            tf.image.decode_gif(file_reader, name='gif_reader'))
    elif file_name.endswith(".bmp"):
        image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')
    else:
        image_reader = tf.image.decode_jpeg(file_reader,
                                            channels=3,
                                            name='jpeg_reader')
    float_caster = tf.cast(image_reader, tf.float32)
    dims_expander = tf.expand_dims(float_caster, 0)
    resized = tf.image.resize_bilinear(dims_expander,
                                       [input_height, input_width])
    normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
    sess = tf.Session()
    result = sess.run(normalized)

    return result
예제 #4
0
파일: reduction.py 프로젝트: zy009197/keras
    def call(self, inputs, weights=None):
        # If we are not weighting the inputs we can immediately reduce the data
        # and return it.
        if weights is None:
            return get_reduce_op(self.reduction)(inputs, axis=self.axis)

        # TODO(momernick): Add checks for this and a decent error message if the
        # weight shape isn't compatible.
        if weights.shape.rank + 1 == inputs.shape.rank:
            weights = tf.compat.v1.expand_dims(weights, -1)

        weighted_inputs = tf.multiply(inputs, weights)

        # Weighted sum and prod can be expressed as reductions over the weighted
        # values, as can min and max.
        if self.reduction in ("sum", "prod", "min", "max"):
            return get_reduce_op(self.reduction)(weighted_inputs,
                                                 axis=self.axis)

        # Weighted mean is a bit more complicated: we have to do a sum of the
        # weighted values and divide by the sum of the weights.
        if self.reduction == "mean":
            input_sum = tf.reduce_sum(weighted_inputs, axis=self.axis)
            weight_sum = tf.reduce_sum(weights, axis=self.axis)
            return tf.divide(input_sum, weight_sum)

        # sqrtn is also more complicated: it's like mean but with a normalized
        # divisor.
        if self.reduction == "sqrtn":
            logging.warning(
                "Reduction `sqrtn` is deprecated and will be removed "
                "2021-01-01. Please use the `sum` reduction and divide "
                "the output by the normalized weights instead.")
            input_sum = tf.reduce_sum(weighted_inputs, axis=self.axis)
            squared_weights = tf.pow(weights, 2)
            squared_weights_sum = tf.reduce_sum(squared_weights,
                                                axis=self.axis)
            sqrt_weights_sum = tf.sqrt(squared_weights_sum)
            return tf.divide(input_sum, sqrt_weights_sum)

        raise ValueError("%s is not a supported weighted reduction." %
                         self.reduction)
예제 #5
0
    def _variational_recurrent_dropout_value(self, unused_index, value, noise,
                                             keep_prob):
        """Performs dropout given the pre-calculated noise tensor."""
        # uniform [keep_prob, 1.0 + keep_prob)
        random_tensor = keep_prob + noise

        # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
        binary_tensor = tf.floor(random_tensor)
        ret = tf.divide(value, keep_prob) * binary_tensor
        ret.set_shape(value.get_shape())
        return ret
예제 #6
0
    def __call__(self, step):
        with tf.name_scope(self.name or "InverseTimeDecay") as name:
            initial_learning_rate = tf.convert_to_tensor(
                self.initial_learning_rate, name="initial_learning_rate")
            dtype = initial_learning_rate.dtype
            decay_steps = tf.cast(self.decay_steps, dtype)
            decay_rate = tf.cast(self.decay_rate, dtype)

            global_step_recomp = tf.cast(step, dtype)
            p = global_step_recomp / decay_steps
            if self.staircase:
                p = tf.floor(p)
            const = tf.cast(tf.constant(1), dtype)
            denom = tf.add(const, tf.multiply(decay_rate, p))
            return tf.divide(initial_learning_rate, denom, name=name)
예제 #7
0
def cal_longest_subsequence(softmaxed_logits):

    int_logits = tf.dtypes.cast(tf.round(softmaxed_logits), dtype=tf.int32)
    index_tensor = tf.range(softmaxed_logits.shape[1], dtype=tf.int32)
    t_index = tf.reshape(index_tensor, [softmaxed_logits.shape[1], 1])
    new_seq = tf.transpose(tf.matmul(int_logits, t_index))[0].numpy().tolist()

    # new_seq = [3,2,4,5,6,5,5,6,7]
    # print(new_seq)
    subseq = []
    indexseq = []
    for i in range(len(new_seq)):
        if i == 0:
            subseq.append(new_seq[i])
            indexseq.append(i)
        else:
            if new_seq[i] > subseq[-1]:
                subseq.append(new_seq[i])
                indexseq.append(i)
            elif new_seq[i] < subseq[0]:
                subseq[0] = new_seq[i]
                indexseq[0] = i
            else:
                index = binarySearch(subseq, 0, len(subseq) - 1, new_seq[i])
                if index != -1:
                    subseq[index] = new_seq[i]
                    indexseq[index] = i
    # print(subseq)
    # print(indexseq)

    subseq_tensor = tf.reshape(subseq, [1, -1])
    index_tensor = tf.reshape(indexseq, [1, -1])
    # print(subseq_tensor,index_tensor)
    te = tf.subtract(subseq_tensor, index_tensor)
    # print(te)
    minus_result = tf.square(tf.subtract(subseq_tensor, index_tensor))
    one_tensor = tf.ones([1, len(subseq)], tf.int32)

    result = tf.divide(one_tensor, tf.add(one_tensor, minus_result))

    # return tf.reduce_sum(result)
    return subseq
예제 #8
0
        def pearson_correlation(x, y):
            """Computes the Pearson correlation coefficient between tensors of data.

      This routine computes a vector correlation (ala cosine distance).

      Args:
        x: one of two input arrays.
        y: second of two input arrays.

      Returns:
        scalar correlation coefficient.
      """
            # From: https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
            x_m = x - tf.math.reduce_mean(x, axis=0)
            y_m = y - tf.math.reduce_mean(y, axis=0)
            return tf.divide(
                tf.math.reduce_sum(tf.multiply(x_m, y_m), axis=0),
                tf.multiply(
                    tf.math.sqrt(
                        tf.math.reduce_sum(tf.math.square(x_m), axis=0)),
                    tf.math.sqrt(
                        tf.math.reduce_sum(tf.math.square(y_m), axis=0))))
예제 #9
0
    def call(self, x, y):
        """The actual correlation calculation.  See class notes above.

    Args:
      x: first data array of size num_frames x num_features.
      y: second data array of the same size as x

    Returns:
      A vector with num_frame individual *negative* correlations. The negative
      of the correlations are returned so that this can be used as a loss.
    """
        if x.shape != y.shape:
            raise ValueError(
                'Two correlation arrays must have the same size, not '
                ' %s vs %s.' % ((x.shape, y.shape)))
        x_m = x - tf.math.reduce_mean(x, axis=0)
        y_m = y - tf.math.reduce_mean(y, axis=0)

        x_std = tf.math.reduce_sum(tf.math.square(x_m), axis=0)
        y_std = tf.math.reduce_sum(tf.math.square(y_m), axis=0)

        power = tf.sqrt(tf.multiply(x_std, y_std))
        return -tf.math.reduce_sum(tf.divide(tf.multiply(x_m, y_m), power),
                                   axis=-1)
예제 #10
0
 def scale_errors(error_component, tol_scale):
     abs_square_error = rk_util.abs_square(error_component)
     abs_square_tol_scale = rk_util.abs_square(tol_scale)
     return tf.divide(abs_square_error, abs_square_tol_scale)
예제 #11
0
    def _head(self, neck_outputs):
        # Shape : [time * batch]
        path_ids = neck_outputs[constants.PATH_ID]
        path_ids = tf.transpose(
            tf.reshape(
                path_ids,
                [self._current_num_timesteps, self._current_batch_size]))

        # <tf.float32>[time * batch_size, 1, hidden_dim]
        visual_feature = neck_outputs['visual_feature']
        # <tf.float32>[time * batch_size, num_tokens, hidden_dim]
        raw_text_feature = tf.reshape(
            neck_outputs['text_feature'],
            [self._current_num_timesteps, self._current_batch_size] +
            neck_outputs['text_feature'].shape[1:].as_list())
        # Shape = [batch_size, time, num_tokens, hidden_dim]
        raw_text_feature = tf.transpose(raw_text_feature, perm=[1, 0, 2, 3])

        # <tf.float32>[time, batch_size, 1, hidden_dim]
        visual_feature = tf.reshape(
            visual_feature,
            [self._current_num_timesteps, self._current_batch_size] +
            visual_feature.shape[1:].as_list())

        # <tf.float32>[batch_size, time, hidden_dim]
        visual_feature = tf.squeeze(visual_feature, axis=2)
        visual_feature = tf.transpose(visual_feature, [1, 0, 2])

        first_true = utils.get_first_true_column(
            tf.reshape(
                neck_outputs[constants.DISC_MASK],
                [self._current_num_timesteps, self._current_batch_size]))
        first_true = tf.transpose(first_true)

        # Sanity Check: path_ids are consistent for first_true and last_true.
        last_true = utils.get_last_true_column(
            tf.reshape(
                neck_outputs[constants.DISC_MASK],
                [self._current_num_timesteps, self._current_batch_size]))
        last_true = tf.transpose(last_true)
        path_ids_first_true = tf.cond(
            tf.keras.backend.any(first_true),
            lambda: tf.boolean_mask(path_ids, first_true),
            lambda: path_ids[:, 0])
        path_ids_last_true = tf.cond(
            tf.keras.backend.any(last_true),
            lambda: tf.boolean_mask(path_ids, last_true),
            lambda: path_ids[:, 0])
        tf.debugging.assert_equal(path_ids_first_true, path_ids_last_true)

        # <tf.float32>[batch_size, num_tokens, hidden_dim]
        text_feature = tf.cond(
            tf.keras.backend.any(first_true),
            lambda: tf.boolean_mask(raw_text_feature, first_true),
            lambda: raw_text_feature[:, 0, :, :])

        text_feature_last_true = tf.cond(
            tf.keras.backend.any(last_true),
            lambda: tf.boolean_mask(raw_text_feature, last_true),
            lambda: raw_text_feature[:, 0, :, :])
        tf.debugging.assert_equal(text_feature, text_feature_last_true)
        # visual_feature = tf.nn.l2_normalize(visual_feature, axis=2)
        # text_feature = tf.nn.l2_normalize(text_feature, axis=2)

        # <tf.float32>[batch_size, time, num_tokens]
        alpha_i_j = tf.matmul(visual_feature,
                              tf.transpose(text_feature, perm=[0, 2, 1]))
        # <tf.float32>[batch, time, num_tokens]
        c_i_j = tf.nn.softmax(alpha_i_j)
        # <tf.float32>[batch_size, time, num_tokens]
        mask = tf.cast(
            tf.transpose(tf.reshape(
                neck_outputs[constants.DISC_MASK],
                [self._current_num_timesteps, self._current_batch_size]),
                         perm=[1, 0]), tf.float32)

        # <tf.float32>[batch, time]
        score = tf.reduce_sum(c_i_j * alpha_i_j, 2)

        # Compute softmin(x) = softmax(-x)
        # Use stable softmax since softmax(x) = softmax(x+c) for any constant c.
        # Here we use constant c = max(-x).
        negative_score = -1.0 * score
        escore = tf.exp(negative_score - tf.reduce_max(negative_score)) * mask
        sum_escore = tf.tile(tf.expand_dims(tf.reduce_sum(escore, 1), 1),
                             [1, tf.shape(escore)[1]])
        score_weight = tf.divide(escore, sum_escore)

        similarities = tf.reduce_sum(mask * score * score_weight, 1)
        similarities = tf.expand_dims(similarities, axis=0)
        # shape: [time * batch_size]
        similarities = tf.reshape(
            tf.tile(similarities, [self._current_num_timesteps, 1]), [-1])

        # Apply an affine transform.
        similarities = similarities * self.affine_a + self.affine_b

        output_a = tf.reshape(tf.convert_to_tensor(self.affine_a), [1, 1])
        output_b = tf.reshape(tf.convert_to_tensor(self.affine_b), [1, 1])

        # shape: [time * batch]
        output_a = tf.reshape(
            tf.tile(output_a,
                    [self._current_num_timesteps, self._current_batch_size]),
            [-1])
        output_b = tf.reshape(
            tf.tile(output_b,
                    [self._current_num_timesteps, self._current_batch_size]),
            [-1])

        return common.AgentOutput(policy_logits=similarities,
                                  baseline=(output_a, output_b))
예제 #12
0
def digital_net_sample(generating_matrices: types.IntTensor,
                       num_results: types.IntTensor,
                       num_digits: types.IntTensor,
                       sequence_indices: types.IntTensor = None,
                       scrambling_matrices: types.IntTensor = None,
                       digital_shift: types.IntTensor = None,
                       apply_tent_transform: bool = False,
                       validate_args: bool = False,
                       dtype: tf.DType = None,
                       name: str = None) -> types.IntTensor:
  r"""Constructs a digital net from a generating matrix.

  #### Examples

  ```python
  import tf_quant_finance as tff

  # Example: Sampling 1,000 points from 2D Sobol generating matrices.

  dim = 2
  num_results = 1000
  num_digits = 10

  tff.math.qmc.digital_net_sample(
      tff.math.qmc.sobol_generating_matrices(dim, num_results, num_digits),
      num_results,
      num_digits)
  # ==> tf.Tensor([
  #             [0.,         0.        ],
  #             [0.5,        0.5       ],
  #             [0.25,       0.75      ],
  #             ...
  #             [0.65527344, 0.9736328 ],
  #             [0.40527344, 0.7236328 ],
  #             [0.90527344, 0.22363281],
  #         ], shape=(1000, 2), dtype=float32)
  ```

  Args:
    generating_matrices: Positive scalar `Tensor` of integers with rank 2. The
      matrix from which to sample points.
    num_results: Positive scalar `Tensor` of integers with rank 0. The maximum
      number of points to sample from `generating_matrices`.
    num_digits: Positive scalar `Tensor` of integers with rank 0. the base-2
      precision of the points sampled from `generating_matrices`.
    sequence_indices: Optional positive scalar `Tensor` of integers with rank 1.
      The elements of the sequence to return specified by their position in the
      sequence.
      Default value: `None` which corresponds to the `[0, num_results)` range.
    scrambling_matrices: Optional positive scalar `Tensor` of integers with the
      same shape as `generating_matrices`. The left matrix scramble to apply to
      the generating matrices.
      Default value: `None`.
    digital_shift: Optional positive scalar `Tensor` of integers with shape
      (`dim`) where `dim = tf.shape(generating_matrices)[0]`. The digital shift
      to apply to all the sampled points via a bitwise xor.
      Default value: `None`.
    apply_tent_transform: Python `bool` indicating whether to apply a tent
      transform to the sampled points.
      Default value: `False`.
    validate_args: Python `bool` indicating whether to validate arguments.
      Default value: `False`.
    dtype: Optional `dtype`. The `dtype` of the output `Tensor` (either
      `float32` or `float64`).
      Default value: `None` which maps to `float32`.
    name: Python `str` name prefixed to ops created by this function.
      Default value: `None` which maps to `sample_digital_net`.

  Returns:
    A `Tensor` of samples from  the Sobol sequence with `shape`
    `(num_samples, dim)` where `num_samples = min(num_results,
    size(sequence_indices))` and `dim = tf.shape(generating_matrices)[0]`.
  """

  with tf.name_scope(name or 'sample_digital_net'):
    # shape: (dim, log_num_results)
    generating_matrices = tf.convert_to_tensor(
        generating_matrices, name='generating_matrices')
    if scrambling_matrices is not None:
      scrambling_matrices = tf.convert_to_tensor(
          scrambling_matrices, name='scrambling_matrices')

    dim = utils.get_shape(generating_matrices)[0]
    int_dtype = generating_matrices.dtype
    real_dtype = dtype or tf.float32

    num_results = tf.convert_to_tensor(
        num_results, dtype=int_dtype, name='num_results')
    num_digits = tf.convert_to_tensor(
        num_digits, dtype=int_dtype, name='num_digits')

    log_num_results = tf.cast(
        tf.math.ceil(utils.log2(tf.cast(num_results, tf.float32))), int_dtype,
        'log_num_results')

    # shape: (num_samples,)
    if sequence_indices is not None:
      sequence_indices = tf.cast(
          sequence_indices, int_dtype, name='sequence_indices')

    control_deps = []
    if validate_args:
      control_deps.append(
          tf.debugging.assert_equal(
              tf.rank(generating_matrices),
              2,
              message='generating_matrices must have rank 2'))
      control_deps.append(
          tf.debugging.assert_positive(
              num_results, message='num_results must be positive'))
      control_deps.append(
          tf.debugging.assert_positive(
              num_digits, message='num_digits must be positive'))
      control_deps.append(
          tf.debugging.assert_less(
              log_num_results,
              tf.cast(32, int_dtype),
              message='log2(num_results) must be less than 32'))
      if sequence_indices is not None:
        control_deps.append(
            tf.debugging.assert_equal(
                tf.rank(sequence_indices),
                1,
                message='sequence_indices must have rank 1'))
        control_deps.append(
            tf.debugging.assert_less(
                sequence_indices,
                num_results,
                message='values in sequence_indices must be less than num_results'
            ))
      if scrambling_matrices is not None:
        control_deps.append(
            tf.debugging.assert_equal(
                utils.get_shape(scrambling_matrices),
                utils.get_shape(generating_matrices),
                message='scrambling_matrices must have the same shape as ' +
                'generating_matrices'))
      if digital_shift is not None:
        control_deps.append(
            tf.debugging.assert_equal(
                tf.rank(digital_shift),
                1,
                message='digital_shift must have rank 1'))
        control_deps.append(
            tf.debugging.assert_equal(
                tf.size(digital_shift),
                dim,
                message='digital_shift must have size ' +
                'tf.shape(generating_matrices)[0]'))

    # shape: (num_samples,)
    if sequence_indices is None:
      sequence_indices = tf.range(
          0, num_results, dtype=int_dtype, name='sequence_indices')

    with tf.control_dependencies(control_deps):
      # shape: (dim)
      if digital_shift is None:
        digital_shift = tf.zeros(
            shape=(dim), dtype=int_dtype, name='digital_shift')
      else:
        digital_shift = tf.cast(digital_shift, int_dtype, name='digital_shift')

      if scrambling_matrices is not None:
        generating_matrices = scramble_generating_matrices(
            generating_matrices,
            scrambling_matrices,
            num_digits,
            validate_args=validate_args,
            dtype=int_dtype)

      # shape: (1, dim, log_num_results)
      generating_matrices = tf.expand_dims(generating_matrices, axis=0)

      def loop_predicate_fn(binary_points, log_index):
        del binary_points
        return log_index < log_num_results

      def loop_body_fn(binary_points, log_index):
        # shape: (num_samples, dim)
        updated_binary_points = tf.bitwise.bitwise_xor(
            binary_points,
            utils.filter_tensor(
                # shape: (1, dim)
                tf.gather(generating_matrices, log_index, axis=2),
                # shape: (num_samples, 1)
                tf.cast(tf.expand_dims(sequence_indices, axis=1), int_dtype),
                # shape: ()
                log_index))

        return (updated_binary_points, log_index + 1)

      binary_points, _ = tf.while_loop(
          loop_predicate_fn,
          loop_body_fn,
          loop_vars=(
              # shape: (num_samples, dim)
              tf.repeat(
                  tf.expand_dims(digital_shift, 0), tf.size(sequence_indices),
                  0),
              # shape: ()
              tf.constant(0, dtype=int_dtype)),
          maximum_iterations=tf.cast(log_num_results, tf.int32))

      # shape: ()
      max_binary_point = tf.bitwise.left_shift(
          tf.constant(1, dtype=int_dtype), num_digits)

      # shape: (num_samples, dim)
      points = tf.divide(
          tf.cast(binary_points, real_dtype),
          tf.cast(max_binary_point, real_dtype))

      # shape: (num_samples, dim)
      return utils.tent_transform(points) if apply_tent_transform else points
def multilevel_crop_and_resize(features, boxes, output_size=7):
    """Crop and resize on multilevel feature pyramid.

  Generate the (output_size, output_size) set of pixels for each input box
  by first locating the box into the correct feature level, and then cropping
  and resizing it using the correspoding feature map of that level.

  Args:
    features: A dictionary with key as pyramid level and value as features. The
      features are in shape of [batch_size, height_l, width_l, num_filters].
    boxes: A 3-D Tensor of shape [batch_size, num_boxes, 4]. Each row represents
      a box with [y1, x1, y2, x2] in un-normalized coordinates.
    output_size: A scalar to indicate the output crop size.

  Returns:
    A 5-D tensor representing feature crop of shape
    [batch_size, num_boxes, output_size, output_size, num_filters].
  """

    with tf.name_scope('multilevel_crop_and_resize'):
        levels = list(features.keys())
        min_level = min(levels)
        max_level = max(levels)
        batch_size, max_feature_height, max_feature_width, num_filters = (
            features[min_level].get_shape().as_list())
        _, num_boxes, _ = boxes.get_shape().as_list()

        # Stack feature pyramid into a features_all of shape
        # [batch_size, levels, height, width, num_filters].
        features_all = []
        feature_heights = []
        feature_widths = []
        for level in range(min_level, max_level + 1):
            shape = features[level].get_shape().as_list()
            feature_heights.append(shape[1])
            feature_widths.append(shape[2])
            # Concat tensor of [batch_size, height_l * width_l, num_filters] for each
            # levels.
            features_all.append(
                tf.reshape(features[level], [batch_size, -1, num_filters]))
            features_r2 = tf.reshape(tf.concat(features_all, 1),
                                     [-1, num_filters])

        # Calculate height_l * width_l for each level.
        level_dim_sizes = [
            feature_widths[i] * feature_heights[i]
            for i in range(len(feature_widths))
        ]
        # level_dim_offsets is accumulated sum of level_dim_size.
        level_dim_offsets = [0]
        for i in range(len(feature_widths) - 1):
            level_dim_offsets.append(level_dim_offsets[i] + level_dim_sizes[i])
        batch_dim_size = level_dim_offsets[-1] + level_dim_sizes[-1]
        level_dim_offsets = tf.constant(level_dim_offsets, tf.int32)
        height_dim_sizes = tf.constant(feature_widths, tf.int32)

        # Assigns boxes to the right level.
        box_width = boxes[:, :, 3] - boxes[:, :, 1]
        box_height = boxes[:, :, 2] - boxes[:, :, 0]
        areas_sqrt = tf.sqrt(box_height * box_width)
        levels = tf.cast(tf.math.floordiv(
            tf.math.log(tf.divide(areas_sqrt, 224.0)), tf.math.log(2.0)) + 4.0,
                         dtype=tf.int32)
        # Maps levels between [min_level, max_level].
        levels = tf.minimum(max_level, tf.maximum(levels, min_level))

        # Projects box location and sizes to corresponding feature levels.
        scale_to_level = tf.cast(tf.pow(tf.constant(2.0),
                                        tf.cast(levels, tf.float32)),
                                 dtype=boxes.dtype)
        boxes /= tf.expand_dims(scale_to_level, axis=2)
        box_width /= scale_to_level
        box_height /= scale_to_level
        boxes = tf.concat([
            boxes[:, :, 0:2],
            tf.expand_dims(box_height, -1),
            tf.expand_dims(box_width, -1)
        ],
                          axis=-1)

        # Maps levels to [0, max_level-min_level].
        levels -= min_level
        level_strides = tf.pow([[2.0]], tf.cast(levels, tf.float32))
        boundary = tf.cast(
            tf.concat([
                tf.expand_dims([[tf.cast(max_feature_height, tf.float32)]] /
                               level_strides - 1,
                               axis=-1),
                tf.expand_dims([[tf.cast(max_feature_width, tf.float32)]] /
                               level_strides - 1,
                               axis=-1),
            ],
                      axis=-1), boxes.dtype)

        # Compute grid positions.
        kernel_y, kernel_x, box_gridy0y1, box_gridx0x1 = compute_grid_positions(
            boxes, boundary, output_size, sample_offset=0.5)

        x_indices = tf.cast(tf.reshape(
            box_gridx0x1, [batch_size, num_boxes, output_size * 2]),
                            dtype=tf.int32)
        y_indices = tf.cast(tf.reshape(
            box_gridy0y1, [batch_size, num_boxes, output_size * 2]),
                            dtype=tf.int32)

        batch_size_offset = tf.tile(
            tf.reshape(
                tf.range(batch_size) * batch_dim_size, [batch_size, 1, 1, 1]),
            [1, num_boxes, output_size * 2, output_size * 2])
        # Get level offset for each box. Each box belongs to one level.
        levels_offset = tf.tile(
            tf.reshape(tf.gather(level_dim_offsets, levels),
                       [batch_size, num_boxes, 1, 1]),
            [1, 1, output_size * 2, output_size * 2])
        y_indices_offset = tf.tile(
            tf.reshape(
                y_indices *
                tf.expand_dims(tf.gather(height_dim_sizes, levels), -1),
                [batch_size, num_boxes, output_size * 2, 1]),
            [1, 1, 1, output_size * 2])
        x_indices_offset = tf.tile(
            tf.reshape(x_indices, [batch_size, num_boxes, 1, output_size * 2]),
            [1, 1, output_size * 2, 1])
        indices = tf.reshape(
            batch_size_offset + levels_offset + y_indices_offset +
            x_indices_offset, [-1])

        # TODO(wangtao): replace tf.gather with tf.gather_nd and try to get similar
        # performance.
        features_per_box = tf.reshape(tf.gather(features_r2, indices), [
            batch_size, num_boxes, output_size * 2, output_size * 2,
            num_filters
        ])

        # Bilinear interpolation.
        features_per_box = feature_bilinear_interpolation(
            features_per_box, kernel_y, kernel_x)
        return features_per_box
예제 #14
0
 def positive_fcn():
     res = tf.divide(tf.math.reduce_sum(tf.multiply(x_m, y_m), axis=0),
                     tf.multiply(tf.math.sqrt(x_p), tf.math.sqrt(y_p)))
     return res
예제 #15
0
def lattice_rule_sample(generating_vectors: types.IntTensor,
                        dim: types.IntTensor,
                        num_results: types.IntTensor,
                        sequence_indices: types.IntTensor = None,
                        additive_shift: types.FloatTensor = None,
                        apply_tent_transform: bool = False,
                        validate_args: bool = False,
                        dtype: tf.DType = None,
                        name: str = None) -> types.RealTensor:
    r"""Constructs a lattice rule from a generating vector.

  #### Examples

  ```python
  import tensorflow as tf
  import tf_quant_finance as tff

  # Example: Sampling 1,000 points from 2D generating vectors.

  generating_vectors = tf.constant([1, 387275, 314993, 50301], dtype=tf.int32)

  dim = 2
  num_results = 1000

  tff.math.qmc.lattice_rule_sample(generating_vectors, dim, num_results)
  # ==> tf.Tensor([
  #             [0.,         0.        ],
  #             [0.001,      0.2749939 ],
  #             [0.002,      0.5499878 ],
  #             ...
  #             [0.99700004, 0.1689148 ],
  #             [0.998,      0.4439087 ],
  #             [0.9990001,  0.7189026 ],
  #         ], shape=(1000, 2), dtype=float32)
  ```

  Args:
    generating_vectors: Positive scalar `Tensor` of integers with rank 1
      representing the vector from which to sample points.
    dim: Positive scalar `Tensor` of integers with rank 0. The event size of the
      sampled points. Must not exceed the size of `generating_vectors`.
    num_results: Positive scalar `Tensor` of integers with rank 0. The maximum
      number of points to sample.
    sequence_indices: Optional positive scalar `Tensor` of integers with rank 1.
      The elements of the sequence to return specified by their position in the
      sequence.
      Default value: `None` which corresponds to the `[0, num_results)` range.
    additive_shift: Optional scalar `Tensor` of real values with the same
      `shape` as `generating_vectors`. The additive shift to add to all the
      points (modulo 1) before applying the tent transform.
      Default value: `None`.
    apply_tent_transform: Python `bool` indicating whether to apply a tent
      transform to the sampled points.
      Default value: `False`.
    validate_args: Python `bool` indicating whether to validate arguments.
      Default value: `False`.
    dtype: Optional `dtype`. The `dtype` of the output `Tensor` (either
      `float32` or `float64`).
      Default value: `None` which maps to `float32`.
    name: Python `str` name prefixed to ops created by this function.
      Default value: `None` which maps to `sample_lattice_rule`.

  Returns:
    A `Tensor` of samples from  the Sobol sequence with `shape`
    `(num_samples,)` where `num_samples = min(num_results,
    size(sequence_indices))`.
  """

    with tf.name_scope(name or 'sample_lattice_rule'):
        # shape: (?,)
        generating_vectors = tf.convert_to_tensor(generating_vectors,
                                                  name='generating_vectors')

        int_dtype = generating_vectors.dtype
        real_dtype = dtype or tf.float32

        dim = tf.convert_to_tensor(dim, dtype=int_dtype, name='dim')
        num_results = tf.convert_to_tensor(num_results,
                                           dtype=int_dtype,
                                           name='num_results')

        control_deps = []
        if validate_args:
            control_deps.append(
                tf.debugging.assert_equal(
                    tf.rank(generating_vectors),
                    1,
                    message='generating_vectors must have rank 1'))
            control_deps.append(
                tf.debugging.assert_less_equal(
                    dim,
                    tf.size(generating_vectors, out_type=int_dtype),
                    message='dim must not exceed the size of generating_vectors'
                ))
            control_deps.append(
                tf.debugging.assert_positive(
                    num_results, message='num_results must be positive'))

        with tf.control_dependencies(control_deps):
            # shape: (num_samples,)
            if sequence_indices is None:
                sequence_indices = tf.range(0, num_results)
            sequence_indices = tf.cast(sequence_indices,
                                       int_dtype,
                                       name='sequence_indices')

            unit = tf.ones(shape=(), dtype=real_dtype)

            # shape: (dim,)
            scaled_vector = tf.divide(
                # shape: (dim,)
                tf.cast(generating_vectors[:dim], real_dtype),
                # shape: ()
                tf.cast(num_results, real_dtype))

            # shape: (num_samples, dim)
            points = tf.multiply(
                # shape: (num_samples, 1)
                tf.expand_dims(tf.cast(sequence_indices, real_dtype), axis=1),
                # shape: (1, dim)
                tf.expand_dims(tf.math.floormod(scaled_vector, unit), axis=0))

            if additive_shift is not None:
                # shape: (num_results,)
                additive_shift = tf.cast(additive_shift,
                                         real_dtype,
                                         name='additive_shift')
                # shape: (num_samples, dim)
                points += additive_shift[:dim]

                # shape: (num_samples, dim)
            points = tf.math.floormod(points, unit)

            # shape: (num_samples, dim)
            return utils.tent_transform(
                points) if apply_tent_transform else points
def multilevel_crop_and_resize(features, boxes, output_size=7):
    """Crop and resize on multilevel feature pyramid.

  Generate the (output_size, output_size) set of pixels for each input box
  by first locating the box into the correct feature level, and then cropping
  and resizing it using the correspoding feature map of that level.

  Args:
    features: A dictionary with key as pyramid level and value as features.
      The features are in shape of [batch_size, height_l, width_l, num_filters].
    boxes: A 3-D Tensor of shape [batch_size, num_boxes, 4]. Each row
      represents a box with [y1, x1, y2, x2] in un-normalized coordinates.
    output_size: A scalar to indicate the output crop size.

  Returns:
    A 5-D tensor representing feature crop of shape
    [batch_size, num_boxes, output_size, output_size, num_filters].
  """
    with tf.name_scope('multilevel_crop_and_resize'):
        levels = features.keys()
        min_level = min(levels)
        max_level = max(levels)
        _, max_feature_height, max_feature_width, _ = (
            features[min_level].get_shape().as_list())
        # Stacks feature pyramid into a features_all of shape
        # [batch_size, levels, height, width, num_filters].
        features_all = []
        for level in range(min_level, max_level + 1):
            features_all.append(
                tf.image.pad_to_bounding_box(features[level], 0, 0,
                                             max_feature_height,
                                             max_feature_width))
        features_all = tf.stack(features_all, axis=1)

        # Assigns boxes to the right level.
        box_width = boxes[:, :, 3] - boxes[:, :, 1]
        box_height = boxes[:, :, 2] - boxes[:, :, 0]
        areas_sqrt = tf.sqrt(box_height * box_width)
        levels = tf.cast(tf.math.floordiv(
            tf.math.log(tf.divide(areas_sqrt, 224.0)), tf.math.log(2.0)) + 4.0,
                         dtype=tf.int32)
        # Maps levels between [min_level, max_level].
        levels = tf.minimum(max_level, tf.maximum(levels, min_level))

        # Projects box location and sizes to corresponding feature levels.
        scale_to_level = tf.cast(tf.pow(tf.constant(2.0),
                                        tf.cast(levels, tf.float32)),
                                 dtype=boxes.dtype)
        boxes /= tf.expand_dims(scale_to_level, axis=2)
        box_width /= scale_to_level
        box_height /= scale_to_level
        boxes = tf.concat([
            boxes[:, :, 0:2],
            tf.expand_dims(box_height, -1),
            tf.expand_dims(box_width, -1)
        ],
                          axis=-1)

        # Maps levels to [0, max_level-min_level].
        levels -= min_level
        level_strides = tf.pow([[2.0]], tf.cast(levels, tf.float32))
        boundary = tf.cast(
            tf.concat([
                tf.expand_dims([[tf.cast(max_feature_height, tf.float32)]] /
                               level_strides - 1,
                               axis=-1),
                tf.expand_dims([[tf.cast(max_feature_width, tf.float32)]] /
                               level_strides - 1,
                               axis=-1),
            ],
                      axis=-1), boxes.dtype)

        return selective_crop_and_resize(features_all, boxes, levels, boundary,
                                         output_size)
예제 #17
0
  def _head(self, neck_outputs):

    # <tf.float32>[time * batch_size, 1, hidden_dim]
    visual_feature = neck_outputs['visual_feature']
    # <tf.float32>[time * batch_size, num_tokens, hidden_dim]
    text_feature = neck_outputs['text_feature']

    # <tf.float32>[time, batch_size, 1, hidden_dim]
    visual_feature = tf.reshape(
        visual_feature,
        [self._current_num_timesteps, self._current_batch_size] +
        visual_feature.shape[1:].as_list())

    # <tf.float32>[batch_size, time, hidden_dim]
    visual_feature = tf.squeeze(visual_feature, axis=2)
    visual_feature = tf.transpose(visual_feature, [1, 0, 2])

    first_true = utils.get_first_true_column(
        tf.reshape(neck_outputs[constants.DISC_MASK],
                   [self._current_num_timesteps, self._current_batch_size]))

    # <tf.float32>[batch_size, num_tokens, hidden_dim]
    text_feature = tf.cond(
        tf.keras.backend.any(first_true),
        lambda: tf.boolean_mask(text_feature, tf.reshape(first_true, [-1])),
        lambda: tf.reshape(text_feature, [
            self._current_num_timesteps, self._current_batch_size
        ] + text_feature.shape[1:].as_list())[0, :, :, :])
    # visual_feature = tf.nn.l2_normalize(visual_feature, axis=2)
    # text_feature = tf.nn.l2_normalize(text_feature, axis=2)

    # <tf.float32>[batch_size, time, num_tokens]
    alpha_i_j = tf.matmul(visual_feature,
                          tf.transpose(text_feature, perm=[0, 2, 1]))
    # <tf.float32>[batch_size, time, num_tokens]
    ealpha_i_j = tf.exp(alpha_i_j)
    sum_i_j = tf.tile(
        tf.expand_dims(tf.reduce_sum(ealpha_i_j, 2), 2),
        [1, 1, tf.shape(ealpha_i_j)[2]])
    mask = tf.cast(
        tf.transpose(
            tf.reshape(neck_outputs[constants.DISC_MASK],
                       [self._current_num_timesteps, self._current_batch_size]),
            perm=[1, 0]), tf.float32)
    # <tf.float32>[batch, time, num_tokens]
    c_i_j = tf.divide(ealpha_i_j, sum_i_j)
    # <tf.float32>[batch, time]
    score = tf.reduce_sum(c_i_j * alpha_i_j, 2)

    escore = tf.exp(-1 * score) * mask
    sum_escore = tf.tile(
        tf.expand_dims(tf.reduce_sum(escore, 1), 1), [1, tf.shape(escore)[1]])
    score_weight = tf.divide(escore, sum_escore)
    similarities = tf.reduce_sum(mask * score * score_weight, 1)
    similarities = tf.expand_dims(similarities, axis=0)
    # [time_step, batch_size]
    similarities = tf.tile(similarities, [self._current_num_timesteps, 1])

    # Apply an affine transform.
    similarities = similarities * self.affine_a + self.affine_b

    output_a = tf.reshape(tf.convert_to_tensor(self.affine_a), [1, 1])
    output_b = tf.reshape(tf.convert_to_tensor(self.affine_b), [1, 1])

    output_a = tf.tile(output_a,
                       [self._current_num_timesteps, self._current_batch_size])
    output_b = tf.tile(output_b,
                       [self._current_num_timesteps, self._current_batch_size])

    return common.AgentOutput(
        policy_logits=similarities, baseline=(output_a, output_b))