示例#1
0
    def _apply(self, x1, x2, param_expansion_ndims=0):
        print('\nLinear._apply')
        print('x1: ', type(x1), x1.dtype, x1.shape)
        print('x2: ', type(x2), x2.dtype, x2.shape)
        if self.origin is not None:
            x1 = x1 - self.origin
            x2 = x2 - self.origin
        print('x1: ', type(x1), x1.dtype, x1.shape)
        print('x2: ', type(x2), x2.dtype, x2.shape)
        print('x2.T: ', type(tf.transpose(x2)),
              tf.transpose(x2).dtype,
              tf.transpose(x2).shape)
        dot_prod = tf.tensordot(x1, tf.transpose(x2), axes=1)
        dot_prod = tf.reshape(dot_prod, [x1.shape[0], x2.shape[1]])
        # dot_prod = tf.matmul(x1, x2, transpose_b=True)
        print('dot_prod: ', type(dot_prod), dot_prod.dtype, dot_prod.shape)
        if self.bias is not None:
            bias = util.pad_shape_right_with_ones(self.bias,
                                                  param_expansion_ndims)
            dot_prod += bias**2
            print('dot_prod: ', type(dot_prod), dot_prod.dtype, dot_prod.shape)

        if self.amplitude is not None:
            amplitude = util.pad_shape_right_with_ones(self.amplitude,
                                                       param_expansion_ndims)
            dot_prod *= amplitude**2
            print('dot_prod: ', type(dot_prod), dot_prod.dtype, dot_prod.shape)

        return dot_prod
示例#2
0
  def _apply(self, x1, x2, param_expansion_ndims=0):
    if self.shift is None:
      dot_prod = util.sum_rightmost_ndims_preserving_shape(
          x1 * x2, ndims=self.feature_ndims)
    else:
      dot_prod = util.sum_rightmost_ndims_preserving_shape(
          (x1 - self.shift) * (x2 - self.shift),
          ndims=self.feature_ndims)

    if self.exponent is not None:
      exponent = util.pad_shape_right_with_ones(
          self.exponent, param_expansion_ndims)
      dot_prod **= exponent

    if self.slope_variance is not None:
      slope_variance = util.pad_shape_right_with_ones(
          self.slope_variance, param_expansion_ndims)
      dot_prod *= slope_variance ** 2.

    if self.bias_variance is not None:
      bias_variance = util.pad_shape_right_with_ones(
          self.bias_variance, param_expansion_ndims)
      dot_prod += bias_variance ** 2.

    return dot_prod
示例#3
0
  def testPadShapeRightWithOnesDynamicShape(self):
    # Test partially unknown shape
    x = tf.placeholder_with_default(np.ones([3], np.float32), [None])
    expanded = util.pad_shape_right_with_ones(x, 3)
    self.assertAllEqual(expanded.shape.as_list(), [None, 1, 1, 1])
    self.assertAllEqual(self.evaluate(expanded).shape, [3, 1, 1, 1])

    # Test totally unknown shape
    x = tf.placeholder_with_default(np.ones([3], np.float32), None)
    expanded = util.pad_shape_right_with_ones(x, 3)
    self.assertIsNone(expanded.shape.ndims)
    self.assertAllEqual(self.evaluate(expanded).shape, [3, 1, 1, 1])
示例#4
0
    def testPadShapeRightWithOnesDynamicShape(self):
        # Test partially unknown shape
        x = tf.placeholder_with_default(np.ones([3], np.float32), [None])
        expanded = util.pad_shape_right_with_ones(x, 3)
        self.assertAllEqual(expanded.shape.as_list(), [None, 1, 1, 1])
        self.assertAllEqual(self.evaluate(expanded).shape, [3, 1, 1, 1])

        # Test totally unknown shape
        x = tf.placeholder_with_default(np.ones([3], np.float32), None)
        expanded = util.pad_shape_right_with_ones(x, 3)
        self.assertIsNone(expanded.shape.ndims)
        self.assertAllEqual(self.evaluate(expanded).shape, [3, 1, 1, 1])
示例#5
0
    def _apply(self, x1, x2, param_expansion_ndims=0):
        exponent = -0.5 * util.sum_rightmost_ndims_preserving_shape(
            tf.math.squared_difference(x1, x2), self.feature_ndims)
        if self.length_scale is not None:
            length_scale = util.pad_shape_right_with_ones(
                self.length_scale, param_expansion_ndims)
            exponent /= length_scale**2

        if self.amplitude is not None:
            amplitude = util.pad_shape_right_with_ones(self.amplitude,
                                                       param_expansion_ndims)
            exponent += 2. * tf.math.log(amplitude)

        return tf.exp(exponent)
  def _apply(self, x1, x2, param_expansion_ndims=0):
    exponent = -0.5 * util.sum_rightmost_ndims_preserving_shape(
        tf.squared_difference(x1, x2), self.feature_ndims)
    if self.length_scale is not None:
      length_scale = util.pad_shape_right_with_ones(
          self.length_scale, param_expansion_ndims)
      exponent /= length_scale**2

    if self.amplitude is not None:
      amplitude = util.pad_shape_right_with_ones(
          self.amplitude, param_expansion_ndims)
      exponent += 2. * tf.log(amplitude)

    return tf.exp(exponent)
示例#7
0
  def _apply(self, x1, x2, param_expansion_ndims=0):
    norm = tf.sqrt(
        util.sum_rightmost_ndims_preserving_shape(
            tf.squared_difference(x1, x2), self.feature_ndims))
    if self.length_scale is not None:
      length_scale = util.pad_shape_right_with_ones(
          self.length_scale, ndims=param_expansion_ndims)
      norm /= length_scale
    result = tf.exp(-norm)

    if self.amplitude is not None:
      amplitude = util.pad_shape_right_with_ones(
          self.amplitude, ndims=param_expansion_ndims)
      result *= amplitude**2
    return result
示例#8
0
  def _apply(self, x1, x2, param_expansion_ndims=0):
    # Use util.sqrt_with_finite_grads to avoid NaN gradients when `x1 == x2`.
    norm = util.sqrt_with_finite_grads(
        util.sum_rightmost_ndims_preserving_shape(
            tf.squared_difference(x1, x2), self.feature_ndims))
    if self.length_scale is not None:
      length_scale = util.pad_shape_right_with_ones(
          self.length_scale, ndims=param_expansion_ndims)
      norm /= length_scale
    log_result = -norm

    if self.amplitude is not None:
      amplitude = util.pad_shape_right_with_ones(
          self.amplitude, ndims=param_expansion_ndims)
      log_result += 2. * tf.log(amplitude)
    return tf.exp(log_result)
示例#9
0
    def _apply(self, x1, x2, param_expansion_ndims=0):
        # Use util.sqrt_with_finite_grads to avoid NaN gradients when `x1 == x2`.
        norm = util.sqrt_with_finite_grads(
            util.sum_rightmost_ndims_preserving_shape(
                tf.math.squared_difference(x1, x2), self.feature_ndims))
        if self.length_scale is not None:
            length_scale = util.pad_shape_right_with_ones(
                self.length_scale, ndims=param_expansion_ndims)
            norm /= length_scale
        log_result = -norm

        if self.amplitude is not None:
            amplitude = util.pad_shape_right_with_ones(
                self.amplitude, ndims=param_expansion_ndims)
            log_result += 2. * tf.math.log(amplitude)
        return tf.exp(log_result)
  def _apply(self, x1, x2, param_expansion_ndims=0):
    x1 = tf.convert_to_tensor(x1)
    x2 = tf.convert_to_tensor(x2)

    multiplier = kernels_util.pad_shape_right_with_ones(
        self._multiplier, param_expansion_ndims)

    return multiplier * tf.reduce_sum(x1 + x2, axis=-1)
示例#11
0
    def _apply(self, x1, x2, param_expansion_ndims=0):
        x1 = tf.convert_to_tensor(value=x1)
        x2 = tf.convert_to_tensor(value=x2)

        multiplier = kernels_util.pad_shape_right_with_ones(
            self._multiplier, param_expansion_ndims)

        return multiplier * tf.reduce_sum(input_tensor=x1 + x2, axis=-1)
示例#12
0
  def _apply(self, x1, x2, param_expansion_ndims=0):
    # Use util.sqrt_with_finite_grads to avoid NaN gradients when `x1 == x2`.
    norm = util.sqrt_with_finite_grads(
        util.sum_rightmost_ndims_preserving_shape(
            tf.squared_difference(x1, x2), self.feature_ndims))
    if self.length_scale is not None:
      length_scale = util.pad_shape_right_with_ones(
          self.length_scale, ndims=param_expansion_ndims)
      norm /= length_scale
    series_term = np.sqrt(5) * norm
    result = (1. + series_term + series_term**2 / 3.) * tf.exp(-series_term)

    if self.amplitude is not None:
      amplitude = util.pad_shape_right_with_ones(self.amplitude,
                                                 param_expansion_ndims)
      result *= amplitude**2
    return result
示例#13
0
    def testPadShapeRightWithOnesCanBeGraphNoop(self):
        # First ensure graph actually *is* changed when we use non-trivial ndims.
        # Use an explicitly created graph, to make sure no whacky test fixture graph
        # reuse is going on in the background.
        g = tf.Graph()
        with g.as_default():
            x = tf.constant(np.ones([3], np.float32))
            graph_def = g.as_graph_def()
            x = util.pad_shape_right_with_ones(x, 3)
            self.assertNotEqual(graph_def, g.as_graph_def())

        # Now verify that graphdef is unchanged (no extra ops) when we pass ndims=0.
        g = tf.Graph()
        with g.as_default():
            x = tf.constant(np.ones([3], np.float32))
            graph_def = g.as_graph_def()
            x = util.pad_shape_right_with_ones(x, 0)
            self.assertEqual(graph_def, g.as_graph_def())
示例#14
0
    def _apply(self, x1, x2, param_expansion_ndims=0):
        x1 = tf.convert_to_tensor(x1)
        x2 = tf.convert_to_tensor(x2)

        exponent = -.5 * util.sum_rightmost_ndims_preserving_shape(
            tf.squared_difference(x1, x2), self.feature_ndims)
        if self.length_scale is not None:
            length_scale = util.pad_shape_right_with_ones(
                self.length_scale, param_expansion_ndims)
            exponent /= length_scale**2

        exponential = tf.exp(exponent)
        if self.amplitude is not None:
            amplitude = util.pad_shape_right_with_ones(self.amplitude,
                                                       param_expansion_ndims)
            return amplitude * exponential
        else:
            return exponential
示例#15
0
  def testPadShapeRightWithOnesCanBeGraphNoop(self):
    # First ensure graph actually *is* changed when we use non-trivial ndims.
    # Use an explicitly created graph, to make sure no whacky test fixture graph
    # reuse is going on in the background.
    g = tf.Graph()
    with g.as_default():
      x = tf.constant(np.ones([3], np.float32))
      graph_def = g.as_graph_def()
      x = util.pad_shape_right_with_ones(x, 3)
      self.assertNotEqual(graph_def, g.as_graph_def())

    # Now verify that graphdef is unchanged (no extra ops) when we pass ndims=0.
    g = tf.Graph()
    with g.as_default():
      x = tf.constant(np.ones([3], np.float32))
      graph_def = g.as_graph_def()
      x = util.pad_shape_right_with_ones(x, 0)
      self.assertEqual(graph_def, g.as_graph_def())
示例#16
0
    def _apply(self, x1, x2, param_expansion_ndims=0):
        # Use util.sqrt_with_finite_grads to avoid NaN gradients when `x1 == x2`.norm = util.sqrt_with_finite_grads(
        #x1 = B,Np,D -> B,Np,1,D
        #x2 = B,N,D -> B,1,N,D
        #B, Np,N
        with tf.control_dependencies([
                tf.assert_equal(
                    tf.shape(self.heights)[-1] + 1,
                    tf.shape(self.edgescales)[-1])
        ]):
            norm = util.sqrt_with_finite_grads(
                util.sum_rightmost_ndims_preserving_shape(
                    tf.squared_difference(x1, x2), self.feature_ndims))
        #B(1),1,Np,N
        norm = tf.expand_dims(norm, -(param_expansion_ndims + 1))

        #B(1), H+1, 1, 1
        edgescales = util.pad_shape_right_with_ones(
            self.edgescales, ndims=param_expansion_ndims)
        norm *= edgescales
        norm *= 2 * np.pi

        zeros = tf.zeros(tf.shape(self.heights)[:-1],
                         dtype=self.heights.dtype)[..., None]
        # B(1),1+H+1
        heights = tf.concat([zeros, self.heights, zeros], axis=-1)
        # B(1), H+1
        dheights = heights[..., :-1] - heights[..., 1:]
        #B(1), H+1, 1, 1
        dheights = util.pad_shape_right_with_ones(dheights,
                                                  ndims=param_expansion_ndims)
        #B(1), H+1, 1, 1
        dheights *= edgescales

        def _sinc(x):
            return tf.sin(x) * tf.reciprocal(x)

        #B(1), H+1, N, Np
        sincs = tf.where(tf.less(norm, tf.constant(1e-15, dtype=norm.dtype)),
                         tf.ones_like(norm), _sinc(norm))
        #B(1), H+1, N, Np
        result = dheights * sincs
        #B(1), N,Np
        return tf.reduce_sum(result, axis=-3)
示例#17
0
    def _apply(self, x1, x2, param_expansion_ndims=0):
        dot_prod = tf.tensordot(x1, tf.transpose(x2), axes=1)
        dot_prod = tf.reshape(dot_prod, [x1.shape[0], x2.shape[1]])
        if self.bias is not None:
            bias = util.pad_shape_right_with_ones(self.bias,
                                                  param_expansion_ndims)
            dot_prod += bias**2.

        if self.power is not None:
            power = util.pad_shape_right_with_ones(self.power,
                                                   param_expansion_ndims)
            dot_prod **= power

        if self.amplitude is not None:
            amplitude = util.pad_shape_right_with_ones(self.amplitude,
                                                       param_expansion_ndims)
            dot_prod *= amplitude**2.

        return dot_prod
  def _apply(self, x1, x2, param_expansion_ndims=0):
    difference = util.sum_rightmost_ndims_preserving_shape(
        tf.squared_difference(x1, x2), ndims=self.feature_ndims)

    if self.length_scale is not None:
      length_scale = util.pad_shape_right_with_ones(
          self.length_scale, ndims=param_expansion_ndims)
      difference /= (length_scale ** 2)

    scale_mixture_rate = 1.
    if self.scale_mixture_rate is not None:
      scale_mixture_rate = util.pad_shape_right_with_ones(
          self.scale_mixture_rate, ndims=param_expansion_ndims)
      difference /= (2 * scale_mixture_rate)

    result = (1. + difference) ** -scale_mixture_rate

    if self.amplitude is not None:
      amplitude = util.pad_shape_right_with_ones(
          self.amplitude, ndims=param_expansion_ndims)
      result *= amplitude ** 2
    return result
示例#19
0
    def _apply(self, x1, x2, param_expansion_ndims=0):
        difference = util.sum_rightmost_ndims_preserving_shape(
            tf.squared_difference(x1, x2), ndims=self.feature_ndims)

        if self.length_scale is not None:
            length_scale = util.pad_shape_right_with_ones(
                self.length_scale, ndims=param_expansion_ndims)
            difference /= length_scale**2

        scale_mixture_rate = 1.
        if self.scale_mixture_rate is not None:
            scale_mixture_rate = util.pad_shape_right_with_ones(
                self.scale_mixture_rate, ndims=param_expansion_ndims)
            difference /= scale_mixture_rate

        result = (1. + difference)**-scale_mixture_rate

        if self.amplitude is not None:
            amplitude = util.pad_shape_right_with_ones(
                self.amplitude, ndims=param_expansion_ndims)
            result *= amplitude**2
        return result
示例#20
0
  def _apply(self, x1, x2, param_expansion_ndims=0):
    difference = np.pi * tf.abs(x1 - x2)

    if self.period is not None:
      # period acts as a batch of periods, and hence we must additionally
      # pad the shape with self.feature_ndims number of ones.
      period = util.pad_shape_right_with_ones(
          self.period, ndims=(param_expansion_ndims + self.feature_ndims))
      difference /= period
    log_kernel = util.sum_rightmost_ndims_preserving_shape(
        -2 * tf.sin(difference) ** 2, ndims=self.feature_ndims)

    if self.length_scale is not None:
      length_scale = util.pad_shape_right_with_ones(
          self.length_scale, ndims=param_expansion_ndims)
      log_kernel /= length_scale ** 2

    if self.amplitude is not None:
      amplitude = util.pad_shape_right_with_ones(
          self.amplitude, ndims=param_expansion_ndims)
      log_kernel += 2. * tf.math.log(amplitude)
    return tf.exp(log_kernel)
示例#21
0
  def _apply(self, x1, x2, param_expansion_ndims=0):
    difference = np.pi * tf.abs(x1 - x2)

    if self.period is not None:
      # period acts as a batch of periods, and hence we must additionally
      # pad the shape with self.feature_ndims number of ones.
      period = util.pad_shape_right_with_ones(
          self.period, ndims=(param_expansion_ndims + self.feature_ndims))
      difference /= period
    log_kernel = util.sum_rightmost_ndims_preserving_shape(
        -2 * tf.sin(difference) ** 2, ndims=self.feature_ndims)

    if self.length_scale is not None:
      length_scale = util.pad_shape_right_with_ones(
          self.length_scale, ndims=param_expansion_ndims)
      log_kernel /= length_scale ** 2

    if self.amplitude is not None:
      amplitude = util.pad_shape_right_with_ones(
          self.amplitude, ndims=param_expansion_ndims)
      log_kernel += 2. * tf.log(amplitude)
    return tf.exp(log_kernel)
示例#22
0
 def testPadShapeRightWithOnes(self):
     # Test nominal behavior.
     x = np.ones([3], np.float32)
     self.assertAllEqual(
         self.evaluate(util.pad_shape_right_with_ones(x, 3)).shape,
         [3, 1, 1, 1])
示例#23
0
 def testPadShapeRightWithOnes(self):
   # Test nominal behavior.
   x = np.ones([3], np.float32)
   self.assertAllEqual(
       self.evaluate(util.pad_shape_right_with_ones(x, 3)).shape,
       [3, 1, 1, 1])