Esempio n. 1
0
    def _apply_roll_biz_space(self, date_tensor, biz_days, is_bizday,
                              roll_convention):
        """Applies roll in business day space."""
        if roll_convention == constants.BusinessDayConvention.NONE:
            # If no business convention is specified, return the current business
            # day.
            return biz_days

        if roll_convention == constants.BusinessDayConvention.FOLLOWING:
            return tf.where(is_bizday, biz_days, biz_days + 1)

        if roll_convention == constants.BusinessDayConvention.PRECEDING:
            return biz_days

        if roll_convention == constants.BusinessDayConvention.MODIFIED_FOLLOWING:
            maybe_prev_biz_day = biz_days
            maybe_next_biz_day = tf.where(is_bizday, biz_days, biz_days + 1)
            maybe_next_biz_ordinal = self._from_biz_space(maybe_next_biz_day)
            take_previous = tf.not_equal(_get_month(maybe_next_biz_ordinal),
                                         date_tensor.month())
            return tf.where(take_previous, maybe_prev_biz_day,
                            maybe_next_biz_day)

        if roll_convention == constants.BusinessDayConvention.MODIFIED_PRECEDING:
            maybe_prev_biz_day = biz_days
            maybe_next_biz_day = tf.where(is_bizday, biz_days, biz_days + 1)
            maybe_prev_biz_ordinal = self._from_biz_space(maybe_prev_biz_day)
            take_next = tf.not_equal(_get_month(maybe_prev_biz_ordinal),
                                     date_tensor.month())
            return tf.where(take_next, maybe_next_biz_day, maybe_prev_biz_day)

        raise ValueError(
            'Unsupported roll convention: {}'.format(roll_convention))
Esempio n. 2
0
 def jointly_tied_pairs_body(first, t, i):
   not_equal = tf.math.logical_or(
       tf.not_equal(y_true[lexa[first]], y_true[lexa[i]]),
       tf.not_equal(y_pred[lexa[first]], y_pred[lexa[i]]))
   return (tf.where(not_equal, i, first),
           tf.where(not_equal, t + ((i - first) * (i - first - 1)) // 2,
                    t), i + 1)
Esempio n. 3
0
  def test_sparse_weights_nonzero_prob_of_one_works(self):
    true_weights = tf.constant([0., 0., 2., 0., -2.])
    model, observed_time_series, _ = self._build_test_model(
        num_timesteps=20,
        num_features=5,
        missing_prob=0.,
        true_noise_scale=0.1,
        weights=true_weights,
        weights_prior_scale=None,  # Default g-prior.
        sparse_weights_nonzero_prob=1.)

    @tf.function(autograph=False)
    def do_sampling():
      return gibbs_sampler.fit_with_gibbs_sampling(
          model,
          observed_time_series,
          num_results=100,
          num_warmup_steps=100,
          seed=test_util.test_seed(sampler_type='stateless'))

    samples = self.evaluate(do_sampling())
    mean_weights = tf.reduce_mean(samples.weights, axis=-2)
    nonzero_probs = tf.reduce_mean(
        tf.cast(tf.not_equal(samples.weights, 0.), tf.float32),
        axis=-2)
    # Increasing `num_timesteps` relative to `num_features` would give more
    # precise weight estimates, at the cost of longer test runtime.
    # TODO(axch, cgs): Can we use assertAllMeansClose here too?  The
    # samples are presumably not IID across axis=0, so the
    # statistical assumptions are not satisfied.
    self.assertAllClose(mean_weights, true_weights, atol=0.3)
    self.assertAllClose(nonzero_probs, [1., 1., 1., 1., 1.])
Esempio n. 4
0
    def scale_channel(im, c):
        """Scale the data in the channel to implement equalize."""
        im = tf.cast(im[:, :, c], tf.int32)
        # Compute the histogram of the image channel.
        histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)

        # For the purposes of computing the step, filter out the nonzeros.
        nonzero = tf.where(tf.not_equal(histo, 0))
        nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
        step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255

        def build_lut(histo, step):
            # Compute the cumulative sum, shifting by step // 2
            # and then normalization by step.
            lut = (tf.cumsum(histo) + (step // 2)) // step
            # Shift lut, prepending with 0.
            lut = tf.concat([[0], lut[:-1]], 0)
            # Clip the counts to be in range.  This is done
            # in the C code for image.point.
            return tf.clip_by_value(lut, 0, 255)

        # If step is zero, return the original image.  Otherwise, build
        # lut from the full histogram and step and then index from it.
        result = tf.cond(tf.equal(step, 0), lambda: im,
                         lambda: tf.gather(build_lut(histo, step), im))

        return tf.cast(result, tf.uint8)
Esempio n. 5
0
    def test_sparse_regression_recovers_plausible_weights(self):
        true_weights = tf.constant([0., 0., 2., 0., -2.])
        model, observed_time_series, _ = self._build_test_model(
            num_timesteps=20,
            num_features=5,
            missing_prob=0.,
            true_noise_scale=0.1,
            weights=true_weights,
            weights_prior_scale=None,  # Default g-prior.
            sparse_weights_nonzero_prob=0.4)

        @tf.function(autograph=False)
        def do_sampling():
            return gibbs_sampler.fit_with_gibbs_sampling(
                model,
                observed_time_series,
                num_results=100,
                num_warmup_steps=100,
                seed=test_util.test_seed(sampler_type='stateless'))

        samples = self.evaluate(do_sampling())
        mean_weights = tf.reduce_mean(samples.weights, axis=-2)
        nonzero_probs = tf.reduce_mean(tf.cast(
            tf.not_equal(samples.weights, 0.), tf.float32),
                                       axis=-2)
        # Increasing `num_timesteps` relative to `num_features` would give more
        # precise weight estimates, at the cost of longer test runtime.
        self.assertAllClose(mean_weights, true_weights, atol=0.3)
        self.assertAllClose(nonzero_probs, [0., 0., 1., 0., 1.], atol=0.2)
Esempio n. 6
0
 def call(self, inputs):
     boolean_mask = tf.reduce_any(tf.not_equal(inputs, self.mask_value),
                                  axis=-1,
                                  keepdims=True)
     outputs = inputs * tf.cast(boolean_mask, inputs.dtype)
     # Compute the mask and outputs simultaneously.
     outputs._keras_mask = tf.squeeze(boolean_mask, axis=-1)
     return outputs
def _calculate_spline_coeffs_clamped_or_first_derivative(
        dx,
        dd,
        superdiag,
        subdiag,
        diag_values,
        rhs,
        dtype,
        boundary_condition_type,
        left_boundary_value=None,
        right_boundary_value=None):
    """Calculates the coefficients for the spline interpolation if the boundary condition type is CLAMPED/FIXED_FIRST_DERIVATIVE."""
    zero = tf.zeros_like(dx[..., :1], dtype=dtype)
    one = tf.ones_like(dx[..., :1], dtype=dtype)
    diag_values = tf.concat([2.0 * dx[..., :1], diag_values, zero], axis=-1)
    superdiag = tf.concat([dx[..., :1], superdiag, zero], axis=-1)
    subdiag = tf.concat([zero, subdiag, dx[..., -1:]], axis=-1)

    # locate right boundary when duplicates exists
    dx = tf.concat((one, dx, zero), axis=-1)
    dx_right = dx[..., 1:]
    dx_left = dx[..., :-1]
    right_boundary = tf.math.logical_and(tf.equal(dx_right, 0),
                                         tf.not_equal(dx_left, 0))

    # For diag_values, at the right boundary, fill the value as 2.0 * dx.
    # For the right padding beyond boundary, fill the default value as 1.0.
    # No need to operate on super_diag/sub_diag,
    # since dx[..., -1:] is already zero
    diag_values = tf.where(right_boundary, 2.0 * dx_left, diag_values)
    diag_values = tf.where(tf.equal(dx_left, 0), one, diag_values)

    # build diagonals
    diagonals = tf.stack([superdiag, diag_values, subdiag], axis=-2)

    # build rhs
    left_boundary_tensor = tf.zeros_like(dx[..., :1], dtype=dtype)
    right_boundary_tensor = tf.zeros_like(dx[..., :1], dtype=dtype)
    if boundary_condition_type == BoundaryConditionType.FIXED_FIRST_DERIVATIVE:
        left_boundary_tensor = tf.convert_to_tensor(left_boundary_value,
                                                    dtype=dtype,
                                                    name="left_boundary_value")
        right_boundary_tensor = tf.convert_to_tensor(
            right_boundary_value, dtype=dtype, name="right_boundary_value")
    top_rhs = 3.0 * (dd[..., :1] - left_boundary_tensor[..., :1])
    rhs = tf.concat([top_rhs, rhs, zero], axis=-1)
    # For rhs, at the right boundary, fill the value as bottom_rhs.
    # For the right padding beyond boundary, fill the default value as 0.0.
    dd_left = tf.concat((one, dd), axis=-1)
    bottom_rhs = -3.0 * (dd_left - right_boundary_tensor[..., :1])
    rhs = tf.where(right_boundary, bottom_rhs, rhs)
    rhs = tf.where(tf.equal(dd_left, 0), zero, rhs)

    spline_coeffs = tf.linalg.tridiagonal_solve(diagonals,
                                                rhs,
                                                partial_pivoting=False)
    return spline_coeffs
 def box_loss(self, box_outputs, box_targets, num_positives):
   """Computes RetinaNet box regression loss."""
   # The delta is typically around the mean value of regression target.
   # for instances, the regression targets of 512x512 input with 6 anchors on
   # P3-P7 pyramid is about [0.1, 0.1, 0.2, 0.2].
   normalizer = num_positives * 4.0
   mask = tf.not_equal(box_targets, 0.0)
   box_loss = self._huber_loss(box_targets, box_outputs, sample_weight=mask)
   box_loss /= normalizer
   return box_loss
Esempio n. 9
0
def copy_lc_weights_2_to_3(lc_layer_2_from, lc_layer_3_to):
  lc_2_kernel, lc_2_bias = lc_layer_2_from.weights
  lc_2_kernel_masked = lc_2_kernel * lc_layer_2_from.kernel_mask

  lc_2_kernel_masked = keras.layers.local.make_2d(
      lc_2_kernel_masked, split_dim=keras.backend.ndim(lc_2_kernel_masked) // 2)
  lc_2_kernel_masked = keras.backend.transpose(lc_2_kernel_masked)
  lc_2_kernel_mask = tf.not_equal(lc_2_kernel_masked, 0)
  lc_2_kernel_flat = tf.compat.v1.boolean_mask(
      lc_2_kernel_masked, lc_2_kernel_mask)

  lc_2_kernel_flat = keras.backend.get_value(lc_2_kernel_flat)
  lc_2_bias = keras.backend.get_value(lc_2_bias)

  lc_layer_3_to.set_weights([lc_2_kernel_flat, lc_2_bias])
Esempio n. 10
0
        def body_fn(i, partial, outputs):
            """Body function for while_loop.

      Args:
        i: integer scalar
        partial: dictionary of Tensor (partially-constructed example)
        outputs: dictionary of TensorArray

      Returns:
        A triple containing the new values of the inputs.
      """
            can_append = True
            one_example = {}
            for k in keys:
                val = tf.cast(x[k][i], tf.int32)
                val = val[:tf.
                          reduce_sum(tf.cast(tf.not_equal(val, 0), tf.int32))]
                one_example[k] = val
            for k in keys:
                can_append = tf.logical_and(
                    can_append,
                    tf.less_equal(
                        tf.size(partial[k]) + tf.size(one_example[k]),
                        length[k]))

            def false_fn():
                return write_packed_example(partial, outputs)

            def true_fn():
                return partial, outputs

            partial, outputs = tf.cond(can_append, true_fn, false_fn)
            new_partial = {}
            for k in keys:
                new_seq = one_example[k][:length[k]]
                new_seq_len = tf.size(new_seq)
                new_partial[k] = tf.concat([partial[k], new_seq], 0)
                if _annotate_key(k):
                    new_partial[k + '_position'] = tf.concat([
                        partial[k + '_position'],
                        tf.range(new_seq_len, dtype=tf.int32)
                    ], 0)
            partial = new_partial
            return i + 1, partial, outputs
Esempio n. 11
0
    def body(left, right, lexicographic):
      # We make a single pass through the list using right and left, where right
      # advances and left chases it looking for spans that are equal in their
      # primary key to then institute a sort on the secondary key.
      not_equal = tf.not_equal(primary_ordered[left], primary_ordered[right])

      def secondary_sort():
        x = tf.concat([
            lexicographic[0:left],
            tf.gather(permutation[left:right],
                      tf.argsort(tf.gather(secondary,
                                           permutation[left:right]))),
            lexicographic[right:n],
        ],
                      axis=0)
        tensorshape_util.set_shape(x, [n])
        return x

      return (tf.where(not_equal, right, left), right + 1,
              tf.cond(not_equal, secondary_sort, lambda: lexicographic))
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
  """Calculate cross entropy loss while ignoring padding.
  Args:
    logits: Tensor of size [batch_size, length_logits, vocab_size]
    labels: Tensor of size [batch_size, length_labels]
    smoothing: Label smoothing constant, used to determine the on and off values
    vocab_size: int size of the vocabulary
  Returns:
    Returns the cross entropy loss and weight tensors: float32 tensors with
      shape [batch_size, max(length_logits, length_labels)]
  """
  with tf.name_scope("loss"):

    if labels is not None:
      # Calculate smoothing cross entropy
      with tf.name_scope("smoothing_cross_entropy"):
        confidence = 1.0 - smoothing
        vocab_float = tf.cast(vocab_size - 1, tf.float32)
        low_confidence = (1.0 - confidence) / vocab_float
        soft_targets = tf.one_hot(
            labels,
            depth=vocab_size,
            on_value=confidence,
            off_value=low_confidence)
        xentropy = tf.nn.softmax_cross_entropy_with_logits(
            logits=logits, labels=soft_targets)

        # Calculate the best (lowest) possible value of cross entropy, and
        # subtract from the cross entropy loss.
        normalizing_constant = -(
            confidence * tf.math.log(confidence) + vocab_float *
            low_confidence * tf.math.log(low_confidence + 1e-20))
        xentropy -= normalizing_constant

      weights = tf.cast(tf.not_equal(labels, 0), tf.float32)
      loss = tf.reduce_sum(xentropy) / tf.reduce_sum(weights)

    else:
      loss = tf.constant(0.0)

    return loss
Esempio n. 13
0
 def _rpn_box_loss(self,
                   box_outputs,
                   box_targets,
                   normalizer=1.0,
                   delta=1. / 9):
     """Computes box regression loss."""
     # The delta is typically around the mean value of regression target.
     # for instances, the regression targets of 512x512 input with 6 anchors on
     # P2-P6 pyramid is about [0.1, 0.1, 0.2, 0.2].
     with tf.compat.v1.name_scope('rpn_box_loss'):
         mask = tf.not_equal(box_targets, 0.0)
         # The loss is normalized by the sum of non-zero weights before additional
         # normalizer provided by the function caller.
         box_loss = tf.compat.v1.losses.huber_loss(
             box_targets,
             box_outputs,
             weights=mask,
             delta=delta,
             reduction=tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS)
         box_loss /= normalizer
         return box_loss
Esempio n. 14
0
def copy_lc_weights_2_to_1(lc_layer_2_from, lc_layer_1_to):
  lc_2_kernel, lc_2_bias = lc_layer_2_from.weights
  lc_2_kernel_masked = lc_2_kernel * lc_layer_2_from.kernel_mask

  data_format = lc_layer_2_from.data_format

  if data_format == 'channels_first':
    if isinstance(lc_layer_2_from, keras.layers.LocallyConnected1D):
      permutation = (3, 0, 1, 2)
    elif isinstance(lc_layer_2_from, keras.layers.LocallyConnected2D):
      permutation = (4, 5, 0, 1, 2, 3)
    else:
      raise NotImplementedError(lc_layer_2_from)

  elif data_format == 'channels_last':
    if isinstance(lc_layer_2_from, keras.layers.LocallyConnected1D):
      permutation = (2, 0, 1, 3)
    elif isinstance(lc_layer_2_from, keras.layers.LocallyConnected2D):
      permutation = (3, 4, 0, 1, 2, 5)
    else:
      raise NotImplementedError(lc_layer_2_from)

  else:
    raise NotImplementedError(data_format)

  lc_2_kernel_masked = keras.backend.permute_dimensions(
      lc_2_kernel_masked, permutation)

  lc_2_kernel_mask = tf.not_equal(
      lc_2_kernel_masked, 0)
  lc_2_kernel_flat = tf.compat.v1.boolean_mask(
      lc_2_kernel_masked, lc_2_kernel_mask)
  lc_2_kernel_reshaped = keras.backend.reshape(lc_2_kernel_flat,
                                               lc_layer_1_to.kernel.shape)

  lc_2_kernel_reshaped = keras.backend.get_value(lc_2_kernel_reshaped)
  lc_2_bias = keras.backend.get_value(lc_2_bias)

  lc_layer_1_to.set_weights([lc_2_kernel_reshaped, lc_2_bias])