コード例 #1
0
    def __init__(self, diff_parameters: DiffParameters):
        super(Diffusion, self).__init__()

        def get_bijector():
            if gpflow.config.default_positive_bijector() == 'exp':
                return tfp.bijectors.Exp()
            elif gpflow.config.default_positive_bijector() == 'softplus':
                return tfp.bijectors.Softplus()
            else:
                raise ValueError(
                    "Unexpected value in default_positive_bijector()")

        assert len(diff_parameters.alphas) == len(
            diff_parameters.betas), "len(alphas) != len(betas)"
        self.dimension = len(diff_parameters.alphas)
        alphas = diff_parameters.alphas
        betas = diff_parameters.betas
        self._alphas = gpflow.Parameter(
            tf.ones_like(alphas, dtype=tf_floatx()),
            # alphas,
            transform=get_bijector(),
            name='alphas')
        self._betas = gpflow.Parameter(
            # TODO
            tf.ones_like(betas, dtype=tf_floatx()),
            # betas,
            transform=get_bijector(),
            name='betas')
        self.prior_distribution = tfd.Gamma(alphas, betas)
コード例 #2
0
ファイル: test_base.py プロジェクト: GPflow/GPflow
def test_construct_parameter_from_existing_parameter_override_trainable(
        trainable):
    initial_parameter = gpflow.Parameter([1.2, 1.1], trainable=trainable)
    new_parameter = gpflow.Parameter(initial_parameter,
                                     trainable=not trainable)

    assert new_parameter.trainable is not trainable
コード例 #3
0
    def __init__(self,
                 eigenpairs,
                 kappa=4,
                 sigma_f=1,
                 vertex_dim=0,
                 point_kernel=None,
                 active_dims=None,
                 dtype=tf.float64):

        self.eigenvectors, self.eigenvalues = eigenpairs
        self.num_verticies = tf.cast(tf.shape(self.eigenvectors)[0],
                                     dtype=dtype)
        self.vertex_dim = vertex_dim
        if vertex_dim != 0:
            self.point_kernel = point_kernel
        else:
            self.point_kernel = None
        self.dtype = dtype

        self.kappa = gpflow.Parameter(kappa,
                                      dtype=self.dtype,
                                      transform=gpflow.utilities.positive(),
                                      name='kappa')
        self.sigma_f = gpflow.Parameter(sigma_f,
                                        dtype=self.dtype,
                                        transform=gpflow.utilities.positive(),
                                        name='sigma_f')
        super().__init__(active_dims=active_dims)
コード例 #4
0
    def update(self, dataset: Dataset, *, jitter: float = DEFAULTS.JITTER) -> None:
        """
        Update the model given the specified ``dataset``. Does not train the model.

        :param dataset: The data with which to update the model.
        :param jitter: The size of the jitter to use when stabilizing the Cholesky decomposition of
            the covariance matrix.
        """
        model = self.model

        x, y = self.model.data[0].value(), self.model.data[1].value()
        assert_data_is_compatible(dataset, Dataset(x, y))

        f_mu, f_cov = self.model.predict_f(dataset.query_points, full_cov=True)  # [N, L], [L, N, N]

        # GPflow's VGP model is hard-coded to use the whitened representation, i.e.
        # q_mu and q_sqrt parametrise q(v), and u = f(X) = L v, where L = cholesky(K(X, X))
        # Hence we need to back-transform from f_mu and f_cov to obtain the updated
        # new_q_mu and new_q_sqrt:
        Knn = model.kernel(dataset.query_points, full_cov=True)  # [N, N]
        jitter_mat = jitter * tf.eye(len(dataset), dtype=Knn.dtype)
        Lnn = tf.linalg.cholesky(Knn + jitter_mat)  # [N, N]
        new_q_mu = tf.linalg.triangular_solve(Lnn, f_mu)  # [N, L]
        tmp = tf.linalg.triangular_solve(Lnn[None], f_cov)  # [L, N, N], L⁻¹ f_cov
        S_v = tf.linalg.triangular_solve(Lnn[None], tf.linalg.matrix_transpose(tmp))  # [L, N, N]
        new_q_sqrt = tf.linalg.cholesky(S_v + jitter_mat)  # [L, N, N]

        model.data[0].assign(dataset.query_points)
        model.data[1].assign(dataset.observations)
        model.num_data = len(dataset)
        model.q_mu = gpflow.Parameter(new_q_mu)
        model.q_sqrt = gpflow.Parameter(new_q_sqrt, transform=gpflow.utilities.triangular())
コード例 #5
0
ファイル: test_base.py プロジェクト: GPflow/GPflow
def test_construct_parameter_from_existing_parameter_override_name():
    initial_parameter = gpflow.Parameter([1.2, 1.1])
    transform = tfp.bijectors.Sigmoid(tf.constant(0.0, dtype=tf.float64),
                                      tf.constant(2.0, dtype=tf.float64))
    new_parameter = gpflow.Parameter(initial_parameter, transform=transform)

    assert new_parameter.name == transform.name
コード例 #6
0
 def __init__(self, variance=1.0, lengthscales=1.0, **kwargs):
     super().__init__()
     self.variance = gpflow.Parameter(variance,
                                      transform=gpflow.utilities.positive())
     self.lengthscales = gpflow.Parameter(
         lengthscales, transform=gpflow.utilities.positive())
     self._validate_ard_active_dims(self.lengthscales)
コード例 #7
0
ファイル: test_base.py プロジェクト: GPflow/GPflow
def test_construct_parameter_from_existing_parameter_override_prior():
    initial_parameter = gpflow.Parameter([1.2, 1.1])

    prior = tfp.distributions.Normal(0.0, 1.0)
    new_parameter = gpflow.Parameter(initial_parameter, prior=prior)

    assert new_parameter.prior == prior
コード例 #8
0
    def __init__(self, input_dim, Q=1, active_dims=None, name='sm'):
        """
        - Q (int): The number of mixtures.

        References:
        http://hips.seas.harvard.edu/files/wilson-extrapolation-icml-2013_0.pdf
        http://www.cs.cmu.edu/~andrewgw/typo.pdf
        """
        mixture_weights = np.random.random((Q))
        mixture_means = np.random.random((Q, input_dim))
        mixture_scales = np.random.random((input_dim, Q))

        super().__init__(active_dims, name=name)
        self.num_mixtures = int(Q)
        self.mixture_weights = gpflow.Parameter(
            mixture_weights,
            transform=gpflow.utilities.positive(),
            name='mixture_weights')
        self.mixture_scales = gpflow.Parameter(
            mixture_scales,
            transform=gpflow.utilities.positive(),
            name='mixture_scales')
        self.mixture_means = gpflow.Parameter(
            mixture_means,
            transform=gpflow.utilities.positive(),
            name='mixture_means')
コード例 #9
0
    def __init__(self, data, Y_var):
        super().__init__(active_dims=[0])
        self.Y_var = Y_var
        self.num_genes = data.m_obs.shape[1]
        #         l_affine = tfb.AffineScalar(shift=tf.cast(1., tf.float64),
        #                             scale=tf.cast(4-1., tf.float64))
        #         l_sigmoid = tfb.Sigmoid()
        #         l_logistic = tfb.Chain([l_affine, l_sigmoid])

        self.lengthscale = gpflow.Parameter(1.414, transform=positive())

        D_affine = tfb.AffineScalar(shift=tf.cast(0.1, tf.float64),
                                    scale=tf.cast(1.5 - 0.1, tf.float64))
        D_sigmoid = tfb.Sigmoid()
        D_logistic = tfb.Chain([D_affine, D_sigmoid])
        S_affine = tfb.AffineScalar(shift=tf.cast(0.1, tf.float64),
                                    scale=tf.cast(4. - 0.1, tf.float64))
        S_sigmoid = tfb.Sigmoid()
        S_logistic = tfb.Chain([S_affine, S_sigmoid])

        self.D = gpflow.Parameter(np.random.uniform(0.9, 1, self.num_genes),
                                  transform=positive(),
                                  dtype=tf.float64)
        #         self.D[3].trainable = False
        #         self.D[3].assign(0.8)
        self.S = gpflow.Parameter(np.random.uniform(1, 1, self.num_genes),
                                  transform=positive(),
                                  dtype=tf.float64)
        #         self.S[3].trainable = False
        #         self.S[3].assign(1)
        self.kervar = gpflow.Parameter(np.float64(1), transform=positive())
        self.noise_term = gpflow.Parameter(
            0.1353 * tf.ones(self.num_genes, dtype='float64'),
            transform=positive())
コード例 #10
0
ファイル: test_base.py プロジェクト: GPflow/GPflow
def test_construct_parameter_from_existing_parameter_check_transform():
    transform = tfp.bijectors.Sigmoid(tf.constant(0.0, dtype=tf.float64),
                                      tf.constant(2.0, dtype=tf.float64))
    initial_parameter = gpflow.Parameter([1.2, 1.1], transform=transform)
    new_parameter = gpflow.Parameter(initial_parameter)

    assert new_parameter.transform == transform
コード例 #11
0
 def __init__(self, a, b, M):
     # [a, b] defining the interval of the Fourier representation:
     self.a = gpflow.Parameter(a, dtype=gpflow.default_float())
     self.b = gpflow.Parameter(b, dtype=gpflow.default_float())
     # integer array defining the frequencies, ω_m = 2π (b - a)/m:
     self.ms = np.arange(M)
     self.omegas = 2.0 * np.pi * self.ms / (b - a)
コード例 #12
0
    def __init__(self, latent_dim: int,
                 input_dim: int,
                 network_dims: int,
                 activation_func: Optional[Callable] = None):
        """
        Encoder that uses GPflow params to encode the features.
        Creates an MLP with input dimensions `input_dim` and produces
        2 * `latent_dim` outputs. Unlike the standard encoder, this 
        expects an input of NR shape, and converts that to an output which is
        (N*R)L, where L is the latent dim.
        
        :param latent_dim: dimension of the latent variable, i.e L
        :param input_dim: the MLP acts on data of `input_dim` dimensions, i.e. R
        :param network_dims: dimensions of inner MLPs, e.g. [10, 20, 10]
        :param activation_func: TensorFlow operation that can be used
            as non-linearity between the layers (default: tanh).
        """
        super().__init__()
        self.latent_dim = tf.convert_to_tensor([latent_dim], tf.int32)
        self.activation_func = activation_func or tf.nn.tanh

        self.layer_dims = [input_dim, *network_dims, latent_dim * 2]

        Ws, bs = [], []

        for input_dim, output_dim in zip(self.layer_dims[:-1], self.layer_dims[1:]):
            xavier_std = (2. / (input_dim + output_dim)) ** 0.5
            W = np.random.randn(input_dim, output_dim) * xavier_std
            Ws.append(gpflow.Parameter(W, dtype=gpflow.config.default_float()))
            bs.append(gpflow.Parameter(np.zeros(output_dim), dtype=gpflow.config.default_float()))

        self.Ws, self.bs = Ws, bs
コード例 #13
0
ファイル: csm.py プロジェクト: zphilip/mogptk
    def __init__(self,
                 input_dim,
                 output_dim,
                 Rq,
                 active_dims=None,
                 name='csm'):
        """
        - input_dim (int): The number of input dimensions.
        - output_dim (int): The number of output dimensions.
        - Rq (int): The number of subcomponents.
        - active_dims (list of int): Apply kernel to specified dimensions only.
        """
        constant = np.random.random((Rq, output_dim))
        mean = np.random.random(input_dim)
        variance = np.random.random(input_dim)
        phase = np.zeros((Rq, output_dim))

        MultiKernel.__init__(self,
                             input_dim,
                             output_dim,
                             active_dims,
                             name=name)
        self.constant = gpflow.Parameter(constant,
                                         transform=gpflow.utilities.positive(),
                                         name="constant")
        self.mean = gpflow.Parameter(mean,
                                     transform=gpflow.utilities.positive(),
                                     name="mean")
        self.variance = gpflow.Parameter(variance,
                                         transform=gpflow.utilities.positive(),
                                         name="variance")
        self.phase = gpflow.Parameter(phase, name="phase")
コード例 #14
0
ファイル: osgpr.py プロジェクト: aerubanov/Proj_Air_Quality
    def __init__(self,
                 data: RegressionData,
                 kernel: Kernel,
                 mu_old: Optional[tf.Tensor],
                 Su_old: Optional[tf.Tensor],
                 Kaa_old: Optional[tf.Tensor],
                 Z_old: Optional[tf.Tensor],
                 inducing_variable: Union[InducingPoints, np.ndarray],
                 mean_function=Zero()):
        """
        Z is a matrix of pseudo inputs, size M x D
        kern, mean_function are appropriate gpflow objects
        mu_old, Su_old are mean and covariance of old q(u)
        Z_old is the old inducing inputs
        This method only works with a Gaussian likelihood.
        """
        X, Y = data
        self.X = X
        self.Y = Y
        likelihood = Gaussian()
        self.inducing_variable = gpflow.models.util.inducingpoint_wrapper(inducing_variable)

        GPModel.__init__(self, kernel, likelihood, mean_function, inducing_variable.size)

        self.num_data = X.shape[0]
        self.num_latent = Y.shape[1]

        self.mu_old = gpflow.Parameter(mu_old, trainable=False)
        self.M_old = Z_old.shape[0]
        self.Su_old = gpflow.Parameter(Su_old, trainable=False)
        self.Kaa_old = gpflow.Parameter(Kaa_old, trainable=False)
        self.Z_old = gpflow.Parameter(Z_old, trainable=False)
コード例 #15
0
    def update(self, dataset: Dataset):
        model = self.model
        x, y = model.data
        assert dataset.query_points.shape[-1] == x.shape[-1]
        assert dataset.observations.shape[-1] == y.shape[-1]
        data = (dataset.query_points, dataset.observations)
        num_data = data[0].shape[0]

        f_mu, f_cov = self.model.predict_f(dataset.query_points, full_cov=True)  # [N, L], [L, N, N]
        assert self.model.q_sqrt.shape.ndims == 3

        # GPflow's VGP model is hard-coded to use the whitened representation, i.e.
        # q_mu and q_sqrt parametrise q(v), and u = f(X) = L v, where L = cholesky(K(X, X))
        # Hence we need to backtransform from f_mu and f_cov to obtain the updated
        # new_q_mu and new_q_sqrt:
        Knn = model.kernel(dataset.query_points, full_cov=True)  # [N, N]
        jitter_mat = gpflow.config.default_jitter() * tf.eye(num_data, dtype=Knn.dtype)
        Lnn = tf.linalg.cholesky(Knn + jitter_mat)  # [N, N]
        new_q_mu = tf.linalg.triangular_solve(Lnn, f_mu)  # [N, L]
        tmp = tf.linalg.triangular_solve(Lnn[None], f_cov)  # [L, N, N], L⁻¹ f_cov
        S_v = tf.linalg.triangular_solve(Lnn[None], tf.linalg.matrix_transpose(tmp))  # [L, N, N]
        new_q_sqrt = tf.linalg.cholesky(S_v + jitter_mat)  # [L, N, N]

        model.data = data
        model.num_data = num_data
        model.q_mu = gpflow.Parameter(new_q_mu)
        model.q_sqrt = gpflow.Parameter(new_q_sqrt, transform=gpflow.utilities.triangular())
コード例 #16
0
ファイル: test_base.py プロジェクト: GPflow/GPflow
def test_construct_parameter_from_existing_parameter_value_becomes_invalid():
    initial_parameter = gpflow.Parameter(0.0)
    transform = tfp.bijectors.Reciprocal()

    with pytest.raises(tf.errors.InvalidArgumentError) as exc:
        gpflow.Parameter(initial_parameter, transform=transform)

    assert "gpflow.Parameter" in exc.value.message
コード例 #17
0
ファイル: test_base.py プロジェクト: vatsalaggarwal/GPflow
def test_parameter_assign_validation():
    with pytest.raises(tf.errors.InvalidArgumentError):
        param = gpflow.Parameter(0.0, transform=positive())

    param = gpflow.Parameter(0.1, transform=positive())
    param.assign(0.2)
    with pytest.raises(tf.errors.InvalidArgumentError):
        param.assign(0.0)
コード例 #18
0
ファイル: kernels.py プロジェクト: henrymoss/FlowMO
    def __init__(self,
                 active_dims=[0],
                 gap_decay=0.1,
                 match_decay=0.9,
                 max_subsequence_length=3,
                 alphabet=[],
                 maxlen=0,
                 batch_size=1000):
        super().__init__(active_dims=active_dims)

        # constrain decay kernel params to between 0 and 1
        self.logistic_gap = tfb.Chain([
            tfb.Shift(tf.cast(0, tf.float64))(tfb.Scale(tf.cast(1,
                                                                tf.float64))),
            tfb.Sigmoid()
        ])
        self.logisitc_match = tfb.Chain([
            tfb.AffineScalar(shift=tf.cast(0, tf.float64),
                             scale=tf.cast(1, tf.float64)),
            tfb.Sigmoid()
        ])
        self.gap_decay_param = gpflow.Parameter(gap_decay,
                                                transform=self.logistic_gap,
                                                name="gap_decay")
        self.match_decay_param = gpflow.Parameter(
            match_decay, transform=self.logisitc_match, name="match_decay")

        # use will use copies of the kernel params to stop building expensive computation graph
        # we instead efficientely calculate gradients using dynamic programming
        # These params are updated at every call to K and K_diag (to check if parameters have been updated)
        self.match_decay = self.match_decay_param.numpy()
        self.gap_decay = self.gap_decay_param.numpy()
        self.match_decay_unconstrained = self.match_decay_param.unconstrained_variable.numpy(
        )
        self.gap_decay_unconstrained = self.gap_decay_param.unconstrained_variable.numpy(
        )

        self.order_coefs = tf.ones(max_subsequence_length, dtype=tf.float64)

        # store additional kernel parameters
        self.max_subsequence_length = tf.constant(max_subsequence_length)
        self.alphabet = tf.constant(alphabet)
        self.alphabet_size = tf.shape(self.alphabet)[0]
        self.maxlen = tf.constant(maxlen)
        self.batch_size = tf.constant(batch_size)

        # build a lookup table of the alphabet to encode input strings
        self.table = tf.lookup.StaticHashTable(
            initializer=tf.lookup.KeyValueTensorInitializer(
                keys=tf.constant(["PAD"] + alphabet),
                values=tf.constant(range(0,
                                         len(alphabet) + 1)),
            ),
            default_value=0)

        # initialize helful construction matricies to be lazily computed once needed
        self.D = None
        self.dD_dgap = None
コード例 #19
0
ファイル: RVFF_IV_1D.py プロジェクト: AndreasICL/MSc_Project
    def __init__(self, a, b, M, jitter=None):
        self.length = M
        # [a, b] defining the interval of the Fourier representation:
        self.a = gpflow.Parameter(a, dtype=gpflow.default_float())
        self.b = gpflow.Parameter(b, dtype=gpflow.default_float())
        self.jitter = jitter

        self.phis = gpflow.Parameter(np.random.uniform(0, 2 * np.pi, size=M))
        self.omegas = gpflow.Parameter(np.random.uniform(0, 0.5 * M, size=M))
コード例 #20
0
 def __init__(self, a, b, M):
     """
     `a` and `b` define the interval [a, b] of the Fourier representation.
     `M` specifies the number of frequencies to use.
     """
     # [a, b] defining the interval of the Fourier representation:
     self.a = gpflow.Parameter(a, dtype=gpflow.default_float())
     self.b = gpflow.Parameter(b, dtype=gpflow.default_float())
     # integer array defining the frequencies, ω_m = 2π (b - a)/m:
     self.ms = np.arange(M)
コード例 #21
0
 def __init__(self, args):
     super().__init__(active_dims=[0])
     self.var = gpflow.Parameter(10.0, transform=positive())
     self.mag = gpflow.Parameter(1.0, transform=positive())
     self.args = args
     self.re = REMatchKernel(metric="polynomial",
                             degree=3,
                             gamma=1,
                             coef0=0,
                             alpha=0.5,
                             threshold=1e-6,
                             normalize_kernel=True)
コード例 #22
0
    def __init__(self, X, Y, name=None):
        super().__init__(name=name)
        self.X = X.copy()  # Contains the covariates and decision point
        self.Y = Y.copy()  # Y is the minute-level step counts

        self.num_data, self.input_dim = X.shape
        _, self.num_classes = Y.shape

        # Parameters
        self.W = gpflow.Parameter(
            np.random.randn(self.input_dim, self.num_classes))
        self.b = gpflow.Parameter(np.random.rand(self.num_classes))
コード例 #23
0
ファイル: rewards.py プロジェクト: roksikonja/rl
 def __init__(self, state_dim, W=None, t=None):
     Reward.__init__(self)
     self.state_dim = state_dim
     if W is not None:
         self.W = gpflow.Parameter(
             np.reshape(W, (state_dim, state_dim)), trainable=False
         )
     else:
         self.W = gpflow.Parameter(np.eye(state_dim), trainable=False)
     if t is not None:
         self.t = gpflow.Parameter(np.reshape(t, (1, state_dim)), trainable=False)
     else:
         self.t = gpflow.Parameter(np.zeros((1, state_dim)), trainable=False)
コード例 #24
0
    def __init__(self, input_dim, output_dim, active_dims=None, name='conv'):
        """
        - input_dim (int): The number of input dimensions.
        - output_dim (int): The number of output dimensions.
        - active_dims (list of int): Apply kernel to specified dimensions only.
        """

        constant = np.random.random((output_dim))
        variance = np.ones((input_dim, output_dim)) * 10

        MultiKernel.__init__(self, input_dim, output_dim, active_dims, name=name)
        self.constant = gpflow.Parameter(constant, transform=gpflow.utilities.positive(), name="constant")
        self.variance = gpflow.Parameter(variance, transform=gpflow.utilities.positive(), name="variance")
コード例 #25
0
ファイル: model_interfaces.py プロジェクト: vdutor/trieste
 def update(self, dataset: Dataset):
     model = self.model
     x, y = model.data
     assert dataset.query_points.shape[-1] == x.shape[-1]
     assert dataset.observations.shape[-1] == y.shape[-1]
     data = (dataset.query_points, dataset.observations)
     num_data = data[0].shape[0]
     num_latent_gps = model.num_latent_gps
     model.data = data
     model.num_data = num_data
     model.q_mu = gpflow.Parameter(np.zeros((num_data, num_latent_gps)))
     q_sqrt = np.eye(num_data)
     q_sqrt = np.repeat(q_sqrt[None], num_latent_gps, axis=0)
     model.q_sqrt = gpflow.Parameter(q_sqrt, transform=gpflow.utilities.triangular())
コード例 #26
0
ファイル: models.pct.py プロジェクト: r-ashwin/docs
    def __init__(self, X, Y, name=None):
        super().__init__(name=name)  # always call the parent constructor

        self.X = X.copy()  # X is a NumPy array of inputs
        self.Y = Y.copy(
        )  # Y is a 1-of-k (one-hot) representation of the labels

        self.num_data, self.input_dim = X.shape
        _, self.num_classes = Y.shape

        # make some parameters
        self.W = gpflow.Parameter(
            np.random.randn(self.input_dim, self.num_classes))
        self.b = gpflow.Parameter(np.random.randn(self.num_classes))
コード例 #27
0
ファイル: HGPLVM.py プロジェクト: michaelStettler/h-GPLVM
    def __init__(self, data, latent_data, x_data_mean, kernel):
        super().__init__()
        print("HGPLVM")
        self.iter = 0
        self.kernel0 = kernel[0]
        self.kernel1 = kernel[1]
        self.mean_function = Zero()
        self.likelihood0 = gpflow.likelihoods.Gaussian(1.0)
        self.likelihood1 = gpflow.likelihoods.Gaussian(1.0)

        # make some parameters
        self.data = (gpflow.Parameter(x_data_mean),
                     gpflow.Parameter(latent_data), data)
        print("gpr_data", np.shape(self.data[0]), np.shape(self.data[1]),
              np.shape(self.data[2]))
コード例 #28
0
ファイル: test_base.py プロジェクト: vatsalaggarwal/GPflow
def test_cast_to_dtype_precision_issue():
    """
    TensorFlow's tf.cast(value, dtype) implicitly does a tf.convert_to_tensor(value)
    *before* the cast when the value is not a tensor already. When value is a python float,
    this results in the following behaviour:

    >>> tf.cast(0.2, tf.float64)
    <tf.Tensor: id=37, shape=(), dtype=float64, numpy=0.20000000298023224>
    
    instead of the expected expansion of 0.2 to float64 precision that you get when
    passing in an object that already carries dtype information, such as a numpy array
    (which has float64 precision by default):

    >>> tf.cast(np.array(0.2), tf.float64)
    <tf.Tensor: id=40, shape=(), dtype=float64, numpy=0.2>

    This affected *all* gpflow.Parameter objects, resulting in numerical discrepancies
    between GPflow 1 and 2, due to the pass through _cast_to_dtype, which is now fixed.
    This is the corresponding regression test.
    """
    p = gpflow.Parameter(0.2, dtype=np.float64)
    actual_value = p.numpy()
    assert actual_value.dtype == np.float64
    expected_value = np.float64(0.2)
    assert actual_value == expected_value
コード例 #29
0
ファイル: test_models.py プロジェクト: uri-granta/trieste
def test_find_best_model_initialization_changes_params_with_sigmoid_bijectors(
    gpflow_interface_factory: ModelFactoryType, dim: int
) -> None:
    x = tf.constant(
        np.arange(1, 1 + 10 * dim).reshape(-1, dim), dtype=gpflow.default_float()
    )  # shape: [10, dim]
    model, _ = gpflow_interface_factory(x, fnc_3x_plus_10(x)[:, 0:1])
    model.model.kernel = gpflow.kernels.RBF(lengthscales=[0.2] * dim)

    if isinstance(model, (VariationalGaussianProcess, SparseVariational)):
        pytest.skip("find_best_model_initialization is only implemented for the GPR models.")

    upper = tf.cast([10.0] * dim, dtype=tf.float64)
    lower = upper / 100
    model.model.kernel.lengthscales = gpflow.Parameter(
        model.model.kernel.lengthscales, transform=tfp.bijectors.Sigmoid(low=lower, high=upper)
    )

    model.find_best_model_initialization(2)

    npt.assert_allclose(1.0, model.model.kernel.variance)
    npt.assert_array_equal(dim, model.model.kernel.lengthscales.shape)
    npt.assert_raises(
        AssertionError, npt.assert_allclose, [0.2, 0.2], model.model.kernel.lengthscales
    )
コード例 #30
0
ファイル: test_models.py プロジェクト: uri-granta/trieste
def test_find_best_model_initialization_improves_likelihood(
    gpflow_interface_factory: ModelFactoryType, dim: int
) -> None:
    x = tf.constant(
        np.arange(1, 1 + 10 * dim).reshape(-1, dim), dtype=gpflow.default_float()
    )  # shape: [10, dim]
    model, _ = gpflow_interface_factory(x, fnc_3x_plus_10(x)[:, 0:1])
    model.model.kernel = gpflow.kernels.RBF(variance=1.0, lengthscales=[0.2] * dim)

    if isinstance(model, (VariationalGaussianProcess, SparseVariational)):
        pytest.skip("find_best_model_initialization is only implemented for the GPR models.")

    model.model.kernel.variance.prior = tfp.distributions.LogNormal(
        loc=np.float64(-2.0), scale=np.float64(1.0)
    )
    upper = tf.cast([10.0] * dim, dtype=tf.float64)
    lower = upper / 100
    model.model.kernel.lengthscales = gpflow.Parameter(
        model.model.kernel.lengthscales, transform=tfp.bijectors.Sigmoid(low=lower, high=upper)
    )

    pre_init_loss = model.model.training_loss()
    model.find_best_model_initialization(100)
    post_init_loss = model.model.training_loss()

    npt.assert_array_less(post_init_loss, pre_init_loss)