def __init__(self, kernels, name=None):
    """Create a kernel which is the sum of `kernels`.

    The input list is 'flattened' in the sense that any entries which are also
    of type `_SumKernel` will have their list of kernels appended to this
    instance's list of kernels. This will reduce the stack depth when actually
    evaluating the sum over kernel applications.

    Args:
      kernels: Python `list` of `PositiveSemidefiniteKernel` instances.
      name: Python `str` name prefixed to Ops created by this class.
    Raises:
      ValueError: `kernels` is an empty list, or `kernels` don't all have the
      same `feature_ndims`.
    """
    if not kernels:
      raise ValueError("Can't create _SumKernel over empty list.")
    if len(set([k.feature_ndims for k in kernels])) > 1:
      raise ValueError(
          "Can't sum kernels with different feature_ndims. Got:\n%s" %
          str([k.feature_ndims for k in kernels]))
    self._kernels = _flatten_summand_list(kernels)
    if name is None:
      name = 'SumKernel'
    # We have ensured the list is non-empty and all feature_ndims are the same.
    super(_SumKernel, self).__init__(
        feature_ndims=kernels[0].feature_ndims,
        dtype=util.maybe_get_common_dtype(
            [None if k.dtype is None else k for k in kernels]),
        name=name,
        validate_args=any([k.validate_args for k in kernels]))
Esempio n. 2
0
    def __init__(self,
                 kernels,
                 locs,
                 slopes,
                 weight_fn=util.sum_rightmost_ndims_preserving_shape,
                 validate_args=False,
                 name='ChangePoint'):
        """Construct a ChangePoint kernel instance.

    Args:
      kernels: List of size `[N]` of `PositiveSemidefiniteKernel` instances to
        interpolate between.
      locs: Ascending Floating-point `Tensor` of shape broadcastable to
        `[..., N - 1]` that controls the regions for the interpolation.
        If `kernels` are a list of 1-D kernels with the default `weight_fn`,
        then between `locs[i - 1]` and `locs[i]`, this kernel acts like
        `kernels[i]`.
      slopes: Positive Floating-point `Tensor` of shape broadcastable to
        `[..., N - 1]` that controls how smooth the interpolation between
        kernels is (larger `slopes` means more discrete transitions).
      weight_fn: Python `callable` which takes an input `x` and `feature_ndims`
        argument, and returns a `Tensor` where a scalar is returned for
        each right-most `feature_ndims` of the input.
        (in other words, if `x` is a batch of inputs, `weight_fn` returns
        a batch of scalar, with the same batch shape).
        Default value: Sums over the last `feature_ndims` of the input `x`.
      validate_args: If `True`, parameters are checked for validity despite
        possibly degrading runtime performance
      name: Python `str` name prefixed to Ops created by this class.
    """

        parameters = dict(locals())
        with tf.name_scope(name):
            dtype = util.maybe_get_common_dtype([kernels, locs, slopes])

            if not callable(weight_fn):
                raise TypeError(f'fn is not callable: {weight_fn}')
            if len(kernels) < 2:
                raise ValueError(
                    f'Expecting at least 2 kernels, got {len(kernels)}')
            if not all(k.feature_ndims == kernels[0].feature_ndims
                       for k in kernels):
                raise ValueError(
                    'Expect that all `kernels` have the same feature_ndims')
            self._kernels = kernels
            self._locs = tensor_util.convert_nonref_to_tensor(locs,
                                                              name='locs',
                                                              dtype=dtype)
            self._slopes = tensor_util.convert_nonref_to_tensor(slopes,
                                                                name='slopes',
                                                                dtype=dtype)
            self._weight_fn = weight_fn
            super(ChangePoint, self).__init__(kernels[0].feature_ndims,
                                              dtype=dtype,
                                              name=name,
                                              validate_args=validate_args,
                                              parameters=parameters)
    def __init__(self,
                 amplitude=None,
                 length_scale=None,
                 inverse_length_scale=None,
                 feature_ndims=1,
                 validate_args=False,
                 name='ExponentiatedQuadratic'):
        """Construct an ExponentiatedQuadratic kernel instance.

    Args:
      amplitude: floating point `Tensor` that controls the maximum value
        of the kernel. Must be broadcastable with `length_scale` and inputs to
        `apply` and `matrix` methods. Must be greater than zero. A value of
        `None` is treated like 1.
        Default value: None
      length_scale: floating point `Tensor` that controls how sharp or wide the
        kernel shape is. This provides a characteristic "unit" of length against
        which `||x - y||` can be compared for scale. Must be broadcastable with
        `amplitude` and inputs to `apply` and `matrix` methods. A value of
        `None` is treated like 1. Only one of `length_scale` or
        `inverse_length_scale` should be provided.
        Default value: None
      inverse_length_scale: Non-negative floating point `Tensor` that is
        treated as `1 / length_scale`. Only one of `length_scale` or
        `inverse_length_scale` should be provided.
        Default value: None
      feature_ndims: Python `int` number of rightmost dims to include in the
        squared difference norm in the exponential.
      validate_args: If `True`, parameters are checked for validity despite
        possibly degrading runtime performance
      name: Python `str` name prefixed to Ops created by this class.
    """
        parameters = dict(locals())
        if (length_scale is not None) and (inverse_length_scale is not None):
            raise ValueError('Must specify at most one of `length_scale` and '
                             '`inverse_length_scale`.')
        with tf.name_scope(name):
            dtype = util.maybe_get_common_dtype(
                [amplitude, length_scale, inverse_length_scale])
            self._amplitude = tensor_util.convert_nonref_to_tensor(
                amplitude, name='amplitude', dtype=dtype)
            self._length_scale = tensor_util.convert_nonref_to_tensor(
                length_scale, name='length_scale', dtype=dtype)
            self._inverse_length_scale = tensor_util.convert_nonref_to_tensor(
                inverse_length_scale, name='inverse_length_scale', dtype=dtype)
            super(ExponentiatedQuadratic,
                  self).__init__(feature_ndims,
                                 dtype=dtype,
                                 name=name,
                                 validate_args=validate_args,
                                 parameters=parameters)
Esempio n. 4
0
  def __init__(
      self,
      amplitude=None,
      length_scale=None,
      scale_mixture_rate=None,
      feature_ndims=1,
      validate_args=False,
      name='RationalQuadratic'):
    """Construct a RationalQuadratic kernel instance.

    Args:
      amplitude: Positive floating point `Tensor` that controls the maximum
        value of the kernel. Must be broadcastable with `length_scale` and
        `scale_mixture_rate` and inputs to `apply` and `matrix` methods. A
        value of `None` is treated like 1.
        Default value: None
      length_scale: Positive floating point `Tensor` that controls how sharp or
        wide the kernel shape is. This provides a characteristic "unit" of
        length against which `||x - y||` can be compared for scale. Must be
        broadcastable with `amplitude`, `scale_mixture_rate`  and inputs to
        `apply` and `matrix` methods. A value of `None` is treated like 1.
        Default value: None
      scale_mixture_rate: Positive floating point `Tensor` that controls how the
        ExponentiatedQuadratic kernels are mixed.  Must be broadcastable with
        `amplitude`, `length_scale` and inputs to `apply` and `matrix` methods.
        A value of `None` is treated like 1.
        Default value: None
      feature_ndims: Python `int` number of rightmost dims to include in the
        squared difference norm in the exponential.
      validate_args: If `True`, parameters are checked for validity despite
        possibly degrading runtime performance
      name: Python `str` name prefixed to Ops created by this class.
    """
    parameters = dict(locals())
    with tf.name_scope(name) as name:
      dtype = util.maybe_get_common_dtype(
          [amplitude, scale_mixture_rate, length_scale])

      self._amplitude = tensor_util.convert_nonref_to_tensor(
          amplitude, name='amplitude', dtype=dtype)
      self._scale_mixture_rate = tensor_util.convert_nonref_to_tensor(
          scale_mixture_rate, name='scale_mixture_rate', dtype=dtype)
      self._length_scale = tensor_util.convert_nonref_to_tensor(
          length_scale, name='length_scale', dtype=dtype)

      super(RationalQuadratic, self).__init__(
          feature_ndims,
          dtype=dtype,
          name=name,
          validate_args=validate_args,
          parameters=parameters)
Esempio n. 5
0
    def _init_params(self, amplitude, length_scale):
        """Shared init logic for `amplitude` and `length_scale` params.

    Args:
      amplitude: `Tensor` (or convertible) or `None` to convert, validate.
      length_scale: `Tensor` (or convertible) or `None` to convert, validate.

    Returns:
      dtype: The common `DType` of the parameters.
    """
        dtype = util.maybe_get_common_dtype([amplitude, length_scale])
        self._amplitude = tensor_util.convert_nonref_to_tensor(
            amplitude, name='amplitude', dtype=dtype)
        self._length_scale = tensor_util.convert_nonref_to_tensor(
            length_scale, name='length_scale', dtype=dtype)
        return dtype
Esempio n. 6
0
    def __init__(self,
                 logits,
                 locs,
                 scales,
                 feature_ndims=1,
                 validate_args=False,
                 name='SpectralMixture'):
        """Construct a SpectralMixture kernel instance.

    Args:
      logits: Floating-point `Tensor` of shape `[..., M]`, whose softmax
        represents the mixture weights for the spectral density. Must
        be broadcastable with `locs` and `scales`.
      locs: Floating-point `Tensor` of shape `[..., M, F1, F2, ... FN]`, which
        represents the location parameter of each of the `M` mixture components.
        `N` is `feature_ndims`. Must be broadcastable with `logits` and
        `scales`.
      scales: Positive Floating-point `Tensor` of shape
        `[..., M, F1, F2, ..., FN]`, which represents the scale parameter of
        each of the `M` mixture components. `N` is `feature_ndims`. Must be
        broadcastable with `locs` and `logits`. These parameters act like
        inverse length scale parameters.
      feature_ndims: Python `int` number of rightmost dims to include in the
        squared difference norm in the exponential.
      validate_args: If `True`, parameters are checked for validity despite
        possibly degrading runtime performance
      name: Python `str` name prefixed to Ops created by this class.
    """
        parameters = dict(locals())
        with tf.name_scope(name):
            dtype = util.maybe_get_common_dtype([logits, locs, scales])
            self._logits = tensor_util.convert_nonref_to_tensor(logits,
                                                                name='logits',
                                                                dtype=dtype)
            self._locs = tensor_util.convert_nonref_to_tensor(locs,
                                                              name='locs',
                                                              dtype=dtype)
            self._scales = tensor_util.convert_nonref_to_tensor(scales,
                                                                name='scales',
                                                                dtype=dtype)
            super(SpectralMixture, self).__init__(feature_ndims,
                                                  dtype=dtype,
                                                  name=name,
                                                  validate_args=validate_args,
                                                  parameters=parameters)
Esempio n. 7
0
  def __init__(self,
               amplitude=None,
               length_scale=None,
               period=None,
               feature_ndims=1,
               validate_args=False,
               name='ExpSinSquared'):
    """Construct a ExpSinSquared kernel instance.

    Args:
      amplitude: Positive floating point `Tensor` that controls the maximum
        value of the kernel. Must be broadcastable with `period`, `length_scale`
        and inputs to `apply` and `matrix` methods. A value of `None` is treated
        like 1.
      length_scale: Positive floating point `Tensor` that controls how sharp or
        wide the kernel shape is. This provides a characteristic "unit" of
        length against which `|x - y|` can be compared for scale. Must be
        broadcastable with `amplitude`, `period`  and inputs to `apply` and
        `matrix` methods. A value of `None` is treated like 1.
      period: Positive floating point `Tensor` that controls the period of the
        kernel. Must be broadcastable with `amplitude`, `length_scale` and
        inputs to `apply` and `matrix` methods.  A value of `None` is treated
        like 1.
      feature_ndims: Python `int` number of rightmost dims to include in kernel
        computation.
      validate_args: If `True`, parameters are checked for validity despite
        possibly degrading runtime performance
      name: Python `str` name prefixed to Ops created by this class.
    """
    parameters = dict(locals())
    with tf.name_scope(name):
      dtype = util.maybe_get_common_dtype(
          [amplitude, period, length_scale])
      self._amplitude = tensor_util.convert_nonref_to_tensor(
          amplitude, name='amplitude', dtype=dtype)
      self._period = tensor_util.convert_nonref_to_tensor(
          period, name='period', dtype=dtype)
      self._length_scale = tensor_util.convert_nonref_to_tensor(
          length_scale, name='length_scale', dtype=dtype)
      super(ExpSinSquared, self).__init__(
          feature_ndims,
          dtype=dtype,
          name=name,
          validate_args=validate_args,
          parameters=parameters)
Esempio n. 8
0
 def __init__(self,
              noise=None,
              feature_ndims=1,
              validate_args=False,
              name="WhiteNoise"):
     parameters = dict(locals())
     with tf.name_scope(name):
         dtype = util.maybe_get_common_dtype([noise])
         self._noise = tensor_util.convert_nonref_to_tensor(noise,
                                                            dtype=dtype,
                                                            name="noise")
     super(_WhiteNoise, self).__init__(
         feature_ndims,
         dtype=dtype,
         name=name,
         validate_args=validate_args,
         parameters=parameters,
     )
Esempio n. 9
0
 def __init__(self,
              coef=None,
              feature_ndims=1,
              validate_args=False,
              name="Constant"):
     parameters = dict(locals())
     with tf.name_scope(name):
         dtype = util.maybe_get_common_dtype([coef])
         self._coef = tensor_util.convert_nonref_to_tensor(coef,
                                                           dtype=dtype,
                                                           name="coef")
     super(_Constant, self).__init__(
         feature_ndims,
         dtype=dtype,
         name=name,
         validate_args=validate_args,
         parameters=parameters,
     )
Esempio n. 10
0
 def __init__(self,
              amplitude=None,
              length_scale=None,
              feature_ndims=1,
              validate_args=False,
              name='ExponentiatedQuadratic'):
     parameters = dict(locals())
     with tf.name_scope(name):
         dtype = util.maybe_get_common_dtype([amplitude, length_scale])
     self._amplitude = tensor_util.convert_nonref_to_tensor(
         amplitude, name='amplitude', dtype=dtype)
     self._length_scale = tensor_util.convert_nonref_to_tensor(
         length_scale, name='length_scale', dtype=dtype)
     super(ExponentiatedQuadratic,
           self).__init__(feature_ndims,
                          dtype=dtype,
                          name=name,
                          validate_args=validate_args,
                          parameters=parameters)
Esempio n. 11
0
 def __init__(self,
              df,
              amplitude=None,
              length_scale=None,
              feature_ndims=1,
              validate_args=False,
              name='GeneralizedMatern'):
   parameters = dict(locals())
   with tf.name_scope(name) as name:
     super(GeneralizedMatern, self)._init_params(amplitude, length_scale)
     dtype = util.maybe_get_common_dtype([self._amplitude, df])
     self._df = tensor_util.convert_nonref_to_tensor(
         df, name='df', dtype=dtype)
     super(GeneralizedMatern, self).__init__(
         feature_ndims,
         dtype=dtype,
         name=name,
         validate_args=validate_args,
         parameters=parameters)
Esempio n. 12
0
  def __init__(
      self,
      kernel,
      amplitudes,
      validate_args=False,
      name='AdditiveKernel'):
    """Instantiates an `AdditiveKernel`.

    Args:
      kernel: An instance of `PositiveSemidefiniteKernel`s that are defined
        within this class (specifically they allow for reinterpreting
        batch dimensions as feature dimensions) that act on inputs of
        the form `[B1, ...., Bk, D, 1]`; that is, `kernel` is a batch of
        D-kernels, each acting on 1-dimensional inputs. We assume that the
        kernel has a batch dimension broadcastable with `[D]`. `kernel` must
        inherit from `tf.__internal__.CompositeTensor`.
      amplitudes: `Tensor` of shape `[B1, ...., Bk, M]`, where `M` is the order
        of the additive kernel. `M` must be statically identifiable.
      validate_args: Python `bool`, default `False`. When `True` kernel
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      name: Python `str` name prefixed to Ops created by this class. Default:
        subclass name.
    Raises:
      TypeError: if `kernel` is not an instance of
        `tf.__internal__.CompositeTensor`.
    """
    parameters = dict(locals())
    with tf.name_scope(name):
      if not isinstance(kernel, tf.__internal__.CompositeTensor):
        raise TypeError('`kernel` must inherit from '
                        '`tf.__internal__.CompositeTensor`.')
      dtype = util.maybe_get_common_dtype([kernel, amplitudes])
      self._kernel = kernel
      self._amplitudes = tensor_util.convert_nonref_to_tensor(
          amplitudes, name='amplitudes', dtype=dtype)
      super(AdditiveKernel, self).__init__(
          feature_ndims=self.kernel.feature_ndims,
          dtype=dtype,
          name=name,
          validate_args=validate_args,
          parameters=parameters)
Esempio n. 13
0
 def __init__(self,
              concentration,
              rate,
              feature_ndims=1,
              validate_args=False,
              name='ExponentialCurve'):
     # These are the concentration and rate parameters of the Gamma distribution.
     # The kernel is defined as the integral of exp(-s (t + t')) over s drawn
     # from a gamma distribution.
     parameters = dict(locals())
     dtype = util.maybe_get_common_dtype([concentration, rate])
     self._concentration = tensor_util.convert_nonref_to_tensor(
         concentration, name='concentration', dtype=dtype)
     self._rate = tensor_util.convert_nonref_to_tensor(rate,
                                                       name='rate',
                                                       dtype=dtype)
     super(ExponentialCurve, self).__init__(feature_ndims=feature_ndims,
                                            dtype=dtype,
                                            name=name,
                                            validate_args=validate_args,
                                            parameters=parameters)
Esempio n. 14
0
 def __init__(
     self,
     length_scale=None,
     amplitude=None,
     feature_ndims=1,
     validate_args=False,
     name="Cosine",
 ):
     parameters = locals()
     with tf.name_scope(name):
         dtype = util.maybe_get_common_dtype([length_scale, amplitude])
         self._length_scale = tensor_util.convert_nonref_to_tensor(
             length_scale, dtype=dtype)
         self._amplitude = tensor_util.convert_nonref_to_tensor(amplitude,
                                                                dtype=dtype)
     super(_Cosine, self).__init__(
         feature_ndims=feature_ndims,
         dtype=dtype,
         name=name,
         validate_args=validate_args,
         parameters=parameters,
     )
Esempio n. 15
0
    def _init_params(self, amplitude, length_scale, inverse_length_scale):
        """Shared init logic for `amplitude` and `length_scale` params.

    Args:
      amplitude: `Tensor` (or convertible) or `None` to convert, validate.
      length_scale: `Tensor` (or convertible) or `None` to convert, validate.
      inverse_length_scale: `Tensor` (or convertible) or `None` to convert,
        validate.

    Returns:
      dtype: The common `DType` of the parameters.
    """
        if (length_scale is not None) and (inverse_length_scale is not None):
            raise ValueError('Must specify at most one of `length_scale` and '
                             '`inverse_length_scale`.')
        dtype = util.maybe_get_common_dtype([amplitude, length_scale])
        self._amplitude = tensor_util.convert_nonref_to_tensor(
            amplitude, name='amplitude', dtype=dtype)
        self._length_scale = tensor_util.convert_nonref_to_tensor(
            length_scale, name='length_scale', dtype=dtype)
        self._inverse_length_scale = tensor_util.convert_nonref_to_tensor(
            inverse_length_scale, name='inverse_length_scale', dtype=dtype)
        return dtype
Esempio n. 16
0
    def __init__(self,
                 bias_variance=None,
                 slope_variance=None,
                 shift=None,
                 exponent=None,
                 feature_ndims=1,
                 validate_args=False,
                 name='Polynomial'):
        """Construct a Polynomial kernel instance.

    Args:
      bias_variance: Positive floating point `Tensor` that controls the
        variance from the origin. If bias = 0, there is no variance and the
        fitted function goes through the origin.  Must be broadcastable with
        `slope_variance`, `shift`, `exponent`, and inputs to `apply` and
        `matrix` methods. A value of `None` is treated like 0.
        Default Value: `None`
      slope_variance: Positive floating point `Tensor` that controls the
        variance of the regression line slope that is the basis for the
        polynomial. Must be broadcastable with `bias_variance`, `shift`,
        `exponent`, and inputs to `apply` and `matrix` methods. A value of
        `None` is treated like 1.
        Default Value: `None`
      shift: Floating point `Tensor` that contols the intercept with the
        x-axis of the linear function to be exponentiated to get this
        polynomial. Must be broadcastable with `bias_variance`,
        `slope_variance`, `exponent` and inputs to `apply` and `matrix`
        methods. A value of `None` is treated like 0, which results in having
        the intercept at the origin.
        Default Value: `None`
      exponent: Positive floating point `Tensor` that controls the exponent
        (also known as the degree) of the polynomial function. Must be
        broadcastable with `bias_variance`, `slope_variance`, `shift`,
        and inputs to `apply` and `matrix` methods. A value of `None` is
        treated like 1, which results in a linear kernel.
        Default Value: `None`
      feature_ndims: Python `int` number of rightmost dims to include in
        kernel computation.
        Default Value: 1
      validate_args: If `True`, parameters are checked for validity despite
        possibly degrading runtime performance.
        Default Value: `False`
      name: Python `str` name prefixed to Ops created by this class.
        Default Value: `'Polynomial'`
    """
        with tf.name_scope(name):
            dtype = util.maybe_get_common_dtype(
                [bias_variance, slope_variance, shift, exponent])
            self._bias_variance = tensor_util.convert_nonref_to_tensor(
                bias_variance, name='bias_variance', dtype=dtype)
            self._slope_variance = tensor_util.convert_nonref_to_tensor(
                slope_variance, name='slope_variance', dtype=dtype)
            self._shift = tensor_util.convert_nonref_to_tensor(shift,
                                                               name='shift',
                                                               dtype=dtype)
            self._exponent = tensor_util.convert_nonref_to_tensor(
                exponent, name='exponent', dtype=dtype)
            super(Polynomial, self).__init__(feature_ndims,
                                             dtype=dtype,
                                             name=name,
                                             validate_args=validate_args)