예제 #1
0
  def _apply(self, x1, x2, param_expansion_ndims=0):
    if self.shift is None:
      dot_prod = util.sum_rightmost_ndims_preserving_shape(
          x1 * x2, ndims=self.feature_ndims)
    else:
      dot_prod = util.sum_rightmost_ndims_preserving_shape(
          (x1 - self.shift) * (x2 - self.shift),
          ndims=self.feature_ndims)

    if self.exponent is not None:
      exponent = util.pad_shape_right_with_ones(
          self.exponent, param_expansion_ndims)
      dot_prod **= exponent

    if self.slope_variance is not None:
      slope_variance = util.pad_shape_right_with_ones(
          self.slope_variance, param_expansion_ndims)
      dot_prod *= slope_variance ** 2.

    if self.bias_variance is not None:
      bias_variance = util.pad_shape_right_with_ones(
          self.bias_variance, param_expansion_ndims)
      dot_prod += bias_variance ** 2.

    return dot_prod
예제 #2
0
    def _apply(self, x1, x2, example_ndims=0):
        if self.shift is None:
            dot_prod = util.sum_rightmost_ndims_preserving_shape(
                x1 * x2, ndims=self.feature_ndims)
        else:
            shift = tf.convert_to_tensor(self.shift)
            dot_prod = util.sum_rightmost_ndims_preserving_shape(
                (x1 - shift) * (x2 - shift), ndims=self.feature_ndims)

        if self.exponent is not None:
            exponent = tf.convert_to_tensor(self.exponent)
            exponent = util.pad_shape_with_ones(exponent, example_ndims)
            dot_prod **= exponent

        if self.slope_variance is not None:
            slope_variance = tf.convert_to_tensor(self.slope_variance)
            slope_variance = util.pad_shape_with_ones(slope_variance,
                                                      example_ndims)
            dot_prod *= slope_variance**2.

        if self.bias_variance is not None:
            bias_variance = tf.convert_to_tensor(self.bias_variance)
            bias_variance = util.pad_shape_with_ones(bias_variance,
                                                     example_ndims)
            dot_prod += bias_variance**2.

        return dot_prod
예제 #3
0
 def testSumRightmostNdimsPreservingShapeDynamicRank(self):
     if tf.executing_eagerly(): return
     x = tf.placeholder_with_default(np.ones((5, 4, 3, 2)), shape=None)
     self.assertIsNone(
         util.sum_rightmost_ndims_preserving_shape(x, ndims=2).shape.ndims)
     self.assertAllEqual(
         self.evaluate(util.sum_rightmost_ndims_preserving_shape(
             x, ndims=2)).shape, [5, 4])
예제 #4
0
 def testSumRightmostNdimsPreservingShapeDynamicRank(self):
   x = tf.placeholder_with_default(np.ones((5, 4, 3, 2)), shape=None)
   self.assertIsNone(
       util.sum_rightmost_ndims_preserving_shape(x, ndims=2).shape.ndims,
       None)
   self.assertAllEqual(
       self.evaluate(
           util.sum_rightmost_ndims_preserving_shape(x, ndims=2)).shape,
       [5, 4])
예제 #5
0
  def testSumRightmostNdimsPreservingShapeStaticRank(self):
    x = np.ones((5, 4, 3, 2))
    self.assertAllEqual(
        util.sum_rightmost_ndims_preserving_shape(x, ndims=2).shape,
        [5, 4])

    x = tf.placeholder_with_default(np.ones((5, 4, 3, 2)),
                                    shape=[5, 4, None, None])
    self.assertAllEqual(
        util.sum_rightmost_ndims_preserving_shape(x, ndims=1).shape.as_list(),
        [5, 4, None])
예제 #6
0
    def testSumRightmostNdimsPreservingShapeStaticRank(self):
        x = np.ones((5, 4, 3, 2))
        self.assertAllEqual(
            util.sum_rightmost_ndims_preserving_shape(x, ndims=2).shape,
            [5, 4])

        x = tf.placeholder_with_default(np.ones((5, 4, 3, 2)),
                                        shape=[5, 4, None, None])
        self.assertAllEqual(
            util.sum_rightmost_ndims_preserving_shape(x,
                                                      ndims=1).shape.as_list(),
            [5, 4, 3 if tf.executing_eagerly() else None])
예제 #7
0
    def _apply(self, x1, x2, example_ndims=0):
        difference = util.sum_rightmost_ndims_preserving_shape(
            tf.math.squared_difference(x1, x2), ndims=self.feature_ndims)
        difference /= 2

        if self.length_scale is not None:
            length_scale = tf.convert_to_tensor(self.length_scale)
            length_scale = util.pad_shape_with_ones(length_scale,
                                                    ndims=example_ndims)
            difference /= length_scale**2

        if self.scale_mixture_rate is None:
            power = 1.
        else:
            scale_mixture_rate = tf.convert_to_tensor(self.scale_mixture_rate)
            power = util.pad_shape_with_ones(scale_mixture_rate,
                                             ndims=example_ndims)
            difference /= power

        result = (1. + difference)**-power

        if self.amplitude is not None:
            amplitude = tf.convert_to_tensor(self.amplitude)
            amplitude = util.pad_shape_with_ones(amplitude,
                                                 ndims=example_ndims)
            result *= amplitude**2
        return result
    def _apply(self, x1, x2, example_ndims=0):
        exponent = -0.5 * util.sum_rightmost_ndims_preserving_shape(
            tf.math.squared_difference(x1, x2), self.feature_ndims)
        if self.length_scale is not None:
            length_scale = util.pad_shape_with_ones(self.length_scale,
                                                    example_ndims)
            exponent /= length_scale**2

        if self.amplitude is not None:
            amplitude = util.pad_shape_with_ones(self.amplitude, example_ndims)
            exponent += 2. * tf.math.log(amplitude)

        return tf.exp(exponent)
  def _apply(self, x1, x2, param_expansion_ndims=0):
    exponent = -0.5 * util.sum_rightmost_ndims_preserving_shape(
        tf.squared_difference(x1, x2), self.feature_ndims)
    if self.length_scale is not None:
      length_scale = util.pad_shape_right_with_ones(
          self.length_scale, param_expansion_ndims)
      exponent /= length_scale**2

    if self.amplitude is not None:
      amplitude = util.pad_shape_right_with_ones(
          self.amplitude, param_expansion_ndims)
      exponent += 2. * tf.log(amplitude)

    return tf.exp(exponent)
예제 #10
0
  def _apply(self, x1, x2, param_expansion_ndims=0):
    norm = tf.sqrt(
        util.sum_rightmost_ndims_preserving_shape(
            tf.squared_difference(x1, x2), self.feature_ndims))
    if self.length_scale is not None:
      length_scale = util.pad_shape_right_with_ones(
          self.length_scale, ndims=param_expansion_ndims)
      norm /= length_scale
    result = tf.exp(-norm)

    if self.amplitude is not None:
      amplitude = util.pad_shape_right_with_ones(
          self.amplitude, ndims=param_expansion_ndims)
      result *= amplitude**2
    return result
예제 #11
0
  def _apply(self, x1, x2, example_ndims=0):
    # Use util.sqrt_with_finite_grads to avoid NaN gradients when `x1 == x2`.
    norm = util.sqrt_with_finite_grads(
        util.sum_rightmost_ndims_preserving_shape(
            tf.math.squared_difference(x1, x2), self.feature_ndims))
    if self.length_scale is not None:
      length_scale = util.pad_shape_with_ones(
          self.length_scale, ndims=example_ndims)
      norm /= length_scale
    series_term = np.sqrt(5) * norm
    log_result = tf.math.log1p(series_term + series_term**2 / 3.) - series_term

    if self.amplitude is not None:
      amplitude = util.pad_shape_with_ones(self.amplitude, example_ndims)
      log_result += 2. * tf.math.log(amplitude)
    return tf.exp(log_result)
예제 #12
0
    def _apply(self, x1, x2, param_expansion_ndims=0):
        # Use util.sqrt_with_finite_grads to avoid NaN gradients when `x1 == x2`.
        norm = util.sqrt_with_finite_grads(
            util.sum_rightmost_ndims_preserving_shape(
                tf.math.squared_difference(x1, x2), self.feature_ndims))
        if self.length_scale is not None:
            length_scale = util.pad_shape_right_with_ones(
                self.length_scale, ndims=param_expansion_ndims)
            norm /= length_scale
        log_result = -norm

        if self.amplitude is not None:
            amplitude = util.pad_shape_right_with_ones(
                self.amplitude, ndims=param_expansion_ndims)
            log_result += 2. * tf.math.log(amplitude)
        return tf.exp(log_result)
예제 #13
0
  def _apply(self, x1, x2, param_expansion_ndims=0):
    # Use util.sqrt_with_finite_grads to avoid NaN gradients when `x1 == x2`.
    norm = util.sqrt_with_finite_grads(
        util.sum_rightmost_ndims_preserving_shape(
            tf.squared_difference(x1, x2), self.feature_ndims))
    if self.length_scale is not None:
      length_scale = util.pad_shape_right_with_ones(
          self.length_scale, ndims=param_expansion_ndims)
      norm /= length_scale
    log_result = -norm

    if self.amplitude is not None:
      amplitude = util.pad_shape_right_with_ones(
          self.amplitude, ndims=param_expansion_ndims)
      log_result += 2. * tf.log(amplitude)
    return tf.exp(log_result)
예제 #14
0
    def _apply(self, x1, x2, param_expansion_ndims=0):
        x1 = tf.convert_to_tensor(x1)
        x2 = tf.convert_to_tensor(x2)

        exponent = -0.5 * util.sum_rightmost_ndims_preserving_shape(
            tf.squared_difference(x1, x2), self.feature_ndims)
        if self.length_scale is not None:
            length_scale = util.pad_shape_right_with_ones(
                self.length_scale, param_expansion_ndims)
            exponent /= length_scale**2

        if self.amplitude is not None:
            amplitude = util.pad_shape_right_with_ones(self.amplitude,
                                                       param_expansion_ndims)
            exponent += 2. * tf.log(amplitude)

        return tf.exp(exponent)
예제 #15
0
  def _apply(self, x1, x2, param_expansion_ndims=0):
    # Use util.sqrt_with_finite_grads to avoid NaN gradients when `x1 == x2`.
    norm = util.sqrt_with_finite_grads(
        util.sum_rightmost_ndims_preserving_shape(
            tf.squared_difference(x1, x2), self.feature_ndims))
    if self.length_scale is not None:
      length_scale = util.pad_shape_right_with_ones(
          self.length_scale, ndims=param_expansion_ndims)
      norm /= length_scale
    series_term = np.sqrt(5) * norm
    result = (1. + series_term + series_term**2 / 3.) * tf.exp(-series_term)

    if self.amplitude is not None:
      amplitude = util.pad_shape_right_with_ones(self.amplitude,
                                                 param_expansion_ndims)
      result *= amplitude**2
    return result
예제 #16
0
    def _apply(self, x1, x2, param_expansion_ndims=0):
        # Use util.sqrt_with_finite_grads to avoid NaN gradients when `x1 == x2`.norm = util.sqrt_with_finite_grads(
        #x1 = B,Np,D -> B,Np,1,D
        #x2 = B,N,D -> B,1,N,D
        #B, Np,N
        with tf.control_dependencies([
                tf.assert_equal(
                    tf.shape(self.heights)[-1] + 1,
                    tf.shape(self.edgescales)[-1])
        ]):
            norm = util.sqrt_with_finite_grads(
                util.sum_rightmost_ndims_preserving_shape(
                    tf.squared_difference(x1, x2), self.feature_ndims))
        #B(1),1,Np,N
        norm = tf.expand_dims(norm, -(param_expansion_ndims + 1))

        #B(1), H+1, 1, 1
        edgescales = util.pad_shape_right_with_ones(
            self.edgescales, ndims=param_expansion_ndims)
        norm *= edgescales
        norm *= 2 * np.pi

        zeros = tf.zeros(tf.shape(self.heights)[:-1],
                         dtype=self.heights.dtype)[..., None]
        # B(1),1+H+1
        heights = tf.concat([zeros, self.heights, zeros], axis=-1)
        # B(1), H+1
        dheights = heights[..., :-1] - heights[..., 1:]
        #B(1), H+1, 1, 1
        dheights = util.pad_shape_right_with_ones(dheights,
                                                  ndims=param_expansion_ndims)
        #B(1), H+1, 1, 1
        dheights *= edgescales

        def _sinc(x):
            return tf.sin(x) * tf.reciprocal(x)

        #B(1), H+1, N, Np
        sincs = tf.where(tf.less(norm, tf.constant(1e-15, dtype=norm.dtype)),
                         tf.ones_like(norm), _sinc(norm))
        #B(1), H+1, N, Np
        result = dheights * sincs
        #B(1), N,Np
        return tf.reduce_sum(result, axis=-3)
예제 #17
0
  def _apply(self, x1, x2, param_expansion_ndims=0):
    difference = np.pi * tf.abs(x1 - x2)

    if self.period is not None:
      # period acts as a batch of periods, and hence we must additionally
      # pad the shape with self.feature_ndims number of ones.
      period = util.pad_shape_right_with_ones(
          self.period, ndims=(param_expansion_ndims + self.feature_ndims))
      difference /= period
    log_kernel = util.sum_rightmost_ndims_preserving_shape(
        -2 * tf.sin(difference) ** 2, ndims=self.feature_ndims)

    if self.length_scale is not None:
      length_scale = util.pad_shape_right_with_ones(
          self.length_scale, ndims=param_expansion_ndims)
      log_kernel /= length_scale ** 2

    if self.amplitude is not None:
      amplitude = util.pad_shape_right_with_ones(
          self.amplitude, ndims=param_expansion_ndims)
      log_kernel += 2. * tf.log(amplitude)
    return tf.exp(log_kernel)
  def _apply(self, x1, x2, param_expansion_ndims=0):
    difference = util.sum_rightmost_ndims_preserving_shape(
        tf.squared_difference(x1, x2), ndims=self.feature_ndims)

    if self.length_scale is not None:
      length_scale = util.pad_shape_right_with_ones(
          self.length_scale, ndims=param_expansion_ndims)
      difference /= (length_scale ** 2)

    scale_mixture_rate = 1.
    if self.scale_mixture_rate is not None:
      scale_mixture_rate = util.pad_shape_right_with_ones(
          self.scale_mixture_rate, ndims=param_expansion_ndims)
      difference /= (2 * scale_mixture_rate)

    result = (1. + difference) ** -scale_mixture_rate

    if self.amplitude is not None:
      amplitude = util.pad_shape_right_with_ones(
          self.amplitude, ndims=param_expansion_ndims)
      result *= amplitude ** 2
    return result
예제 #19
0
    def _apply(self, x1, x2, param_expansion_ndims=0):
        difference = util.sum_rightmost_ndims_preserving_shape(
            tf.squared_difference(x1, x2), ndims=self.feature_ndims)

        if self.length_scale is not None:
            length_scale = util.pad_shape_right_with_ones(
                self.length_scale, ndims=param_expansion_ndims)
            difference /= length_scale**2

        scale_mixture_rate = 1.
        if self.scale_mixture_rate is not None:
            scale_mixture_rate = util.pad_shape_right_with_ones(
                self.scale_mixture_rate, ndims=param_expansion_ndims)
            difference /= scale_mixture_rate

        result = (1. + difference)**-scale_mixture_rate

        if self.amplitude is not None:
            amplitude = util.pad_shape_right_with_ones(
                self.amplitude, ndims=param_expansion_ndims)
            result *= amplitude**2
        return result
예제 #20
0
  def _apply(self, x1, x2, example_ndims=0):
    difference = np.pi * tf.abs(x1 - x2)

    if self.period is not None:
      # period acts as a batch of periods, and hence we must additionally
      # pad the shape with self.feature_ndims number of ones.
      period = util.pad_shape_with_ones(
          self.period, ndims=(example_ndims + self.feature_ndims))
      difference /= period
    log_kernel = util.sum_rightmost_ndims_preserving_shape(
        -2 * tf.sin(difference) ** 2, ndims=self.feature_ndims)

    if self.length_scale is not None:
      length_scale = util.pad_shape_with_ones(
          self.length_scale, ndims=example_ndims)
      log_kernel /= length_scale ** 2

    if self.amplitude is not None:
      amplitude = util.pad_shape_with_ones(
          self.amplitude, ndims=example_ndims)
      log_kernel += 2. * tf.math.log(amplitude)
    return tf.exp(log_kernel)