예제 #1
0
 def __init__(self, 
              data: Tuple[tf.Tensor, tf.Tensor], 
              m: int = 20, 
              alpha: np.float = 1./np.sqrt(2.), 
              eps_sq: np.float = 1,
              sigma_n_sq: np.float = 1,
              sigma_f_sq: np.float = 1):
                             
     self.num_data = tf.cast(data[1].shape[0], default_float())
     self.data = (tf.cast(tf.squeeze(data[0]), default_float()), tf.cast(data[1], default_float()))
     self.const = tf.cast(0.5*data[1].size*np.log(2*np.pi), default_float())
     
     D = data[0].shape[1]        
     self.flag_1d = D == 1
     self.alpha = tf.cast(alpha, default_float())
     self.alpha_sq = tf.square(self.alpha)
     self.m = tf.cast(m, default_float())
     self.this_range = tf.constant(np.asarray(list(product(range(1, m + 1), repeat=D))).squeeze(), dtype=default_float())
     
     self.this_range_1 = self.this_range - 1.
     self.this_range_1_2 = self.this_range_1 if self.flag_1d else tf.range(m, dtype=default_float())
     self.this_range_1_int = tf.cast(self.this_range_1, tf.int32)
     self.tf_range_dnn_out = tf.range(D)
     self.this_range_1_ln2 = np.log(2.)*self.this_range_1
             
     self.vander_range = tf.range(m+1, dtype=default_float())
     self.eye_k = tf.eye(m**D, dtype=default_float())
     self.yTy = tf.reduce_sum(tf.math.square(self.data[1])) 
     self.coeff_n_tf = tf.constant(np.load(os.path.dirname(os.path.realpath(__file__)) + '/hermite_coeff.npy')[:m, :m], dtype=default_float())
     
     eps_sq = eps_sq*np.ones(D) if D > 1 else eps_sq       
     self.eps_sq = Parameter(eps_sq, transform=positive(), dtype=default_float())
     self.sigma_f_sq = Parameter(sigma_f_sq, transform=positive(), dtype=default_float())
     self.sigma_n_sq = Parameter(sigma_n_sq, transform=positive(), dtype=default_float())
예제 #2
0
    def __init__(self, data, Y_var):
        super().__init__(active_dims=[0])
        self.Y_var = Y_var
        self.num_genes = data.m_obs.shape[1]
        #         l_affine = tfb.AffineScalar(shift=tf.cast(1., tf.float64),
        #                             scale=tf.cast(4-1., tf.float64))
        #         l_sigmoid = tfb.Sigmoid()
        #         l_logistic = tfb.Chain([l_affine, l_sigmoid])

        self.lengthscale = gpflow.Parameter(1.414, transform=positive())

        D_affine = tfb.AffineScalar(shift=tf.cast(0.1, tf.float64),
                                    scale=tf.cast(1.5 - 0.1, tf.float64))
        D_sigmoid = tfb.Sigmoid()
        D_logistic = tfb.Chain([D_affine, D_sigmoid])
        S_affine = tfb.AffineScalar(shift=tf.cast(0.1, tf.float64),
                                    scale=tf.cast(4. - 0.1, tf.float64))
        S_sigmoid = tfb.Sigmoid()
        S_logistic = tfb.Chain([S_affine, S_sigmoid])

        self.D = gpflow.Parameter(np.random.uniform(0.9, 1, self.num_genes),
                                  transform=positive(),
                                  dtype=tf.float64)
        #         self.D[3].trainable = False
        #         self.D[3].assign(0.8)
        self.S = gpflow.Parameter(np.random.uniform(1, 1, self.num_genes),
                                  transform=positive(),
                                  dtype=tf.float64)
        #         self.S[3].trainable = False
        #         self.S[3].assign(1)
        self.kervar = gpflow.Parameter(np.float64(1), transform=positive())
        self.noise_term = gpflow.Parameter(
            0.1353 * tf.ones(self.num_genes, dtype='float64'),
            transform=positive())
예제 #3
0
def test_parameter_assign_validation():
    with pytest.raises(tf.errors.InvalidArgumentError):
        param = gpflow.Parameter(0.0, transform=positive())

    param = gpflow.Parameter(0.1, transform=positive())
    param.assign(0.2)
    with pytest.raises(tf.errors.InvalidArgumentError):
        param.assign(0.0)
예제 #4
0
    def _init_variational_parameters(self, num_inducing, q_mu, q_sqrt, q_diag):
        """
        Constructs the mean and cholesky of the covariance of the variational Gaussian posterior.
        If a user passes values for `q_mu` and `q_sqrt` the routine checks if they have consistent
        and correct shapes. If a user does not specify any values for `q_mu` and `q_sqrt`, the routine
        initializes them, their shape depends on `num_inducing` and `q_diag`.

        Note: most often the comments refer to the number of observations (=output dimensions) with P,
        number of latent GPs with L, and number of inducing points M. Typically P equals L,
        but when certain multioutput kernels are used, this can change.

        Parameters
        ----------
        :param num_inducing: int
            Number of inducing variables, typically refered to as M.
        :param q_mu: np.array or None
            Mean of the variational Gaussian posterior. If None the function will initialise
            the mean with zeros. If not None, the shape of `q_mu` is checked.
        :param q_sqrt: np.array or None
            Cholesky of the covariance of the variational Gaussian posterior.
            If None the function will initialise `q_sqrt` with identity matrix.
            If not None, the shape of `q_sqrt` is checked, depending on `q_diag`.
        :param q_diag: bool
            Used to check if `q_mu` and `q_sqrt` have the correct shape or to
            construct them with the correct shape. If `q_diag` is true,
            `q_sqrt` is two dimensional and only holds the square root of the
            covariance diagonal elements. If False, `q_sqrt` is three dimensional.
        """
        q_mu = np.zeros(
            (num_inducing, self.num_latent_gps)) if q_mu is None else q_mu
        self.q_mu = Parameter(q_mu, dtype=default_float())  # [M, P]

        if q_sqrt is None:
            if self.q_diag:
                ones = np.ones((num_inducing, self.num_latent_gps),
                               dtype=default_float())
                self.q_sqrt = Parameter(ones, transform=positive())  # [M, P]
            else:
                q_sqrt = [
                    np.eye(num_inducing, dtype=default_float())
                    for _ in range(self.num_latent_gps)
                ]
                q_sqrt = np.array(q_sqrt)
                self.q_sqrt = Parameter(q_sqrt,
                                        transform=triangular())  # [P, M, M]
        else:
            if q_diag:
                assert q_sqrt.ndim == 2
                self.num_latent_gps = q_sqrt.shape[1]
                self.q_sqrt = Parameter(q_sqrt,
                                        transform=positive())  # [M, L|P]
            else:
                assert q_sqrt.ndim == 3
                self.num_latent_gps = q_sqrt.shape[0]
                num_inducing = q_sqrt.shape[1]
                self.q_sqrt = Parameter(q_sqrt,
                                        transform=triangular())  # [L|P, M, M]
예제 #5
0
    def __init__(self,
                 m=1,
                 active_dims=[0],
                 gap_decay=0.1,
                 match_decay=0.9,
                 max_subsequence_length=3,
                 alphabet=[],
                 maxlen=0):
        super().__init__(active_dims=active_dims)
        # constrain decay kernel params to between 0 and 1
        logistic_gap = tfb.Chain([
            tfb.Shift(tf.cast(0, tf.float64))(tfb.Scale(tf.cast(1,
                                                                tf.float64))),
            tfb.Sigmoid()
        ])
        logisitc_match = tfb.Chain([
            tfb.AffineScalar(shift=tf.cast(0, tf.float64),
                             scale=tf.cast(1, tf.float64)),
            tfb.Sigmoid()
        ])
        self.gap_decay = Parameter(gap_decay,
                                   transform=logistic_gap,
                                   name="gap_decay")
        self.match_decay = Parameter(match_decay,
                                     transform=logisitc_match,
                                     name="match_decay")

        # prepare order coefs params
        order_coefs = tf.ones(max_subsequence_length)
        self.order_coefs = Parameter(order_coefs,
                                     transform=positive(),
                                     name="order_coefs")

        # get split weights
        self.m = m
        split_weights = tf.ones(2 * self.m - 1)
        self.split_weights = Parameter(split_weights,
                                       transform=positive(),
                                       name="order_coefs")

        # store additional kernel parameters
        self.max_subsequence_length = tf.constant(max_subsequence_length)
        self.alphabet = tf.constant(alphabet)
        self.alphabet_size = tf.shape(self.alphabet)[0]
        self.maxlen = tf.cast(tf.math.ceil(maxlen / self.m), dtype=tf.int32)
        self.full_maxlen = tf.constant(maxlen)
        # build a lookup table of the alphabet to encode input strings
        self.table = tf.lookup.StaticHashTable(
            initializer=tf.lookup.KeyValueTensorInitializer(
                keys=tf.constant(["PAD"] + alphabet),
                values=tf.constant(range(0,
                                         len(alphabet) + 1)),
            ),
            default_value=0)
예제 #6
0
    def __init__(self, variance=1.0, lengthscale=1.0, f_list=None):
        """
        :param variance: the (initial) value for the variance parameter
        :param lengthscale: the (initial) value for the lengthscale parameter(s),
            to induce ARD behaviour this must be initialised as an array the same
            length as the the number of active dimensions e.g. [1., 1., 1.]
        :param f_list: list with information of the functional inputs
        """

        self.variance = Parameter(variance, transform=positive())
        self.lengthscale = Parameter(lengthscale, transform=positive())
        self.f_list = f_list  # list with functional information
예제 #7
0
 def __init__(self, args):
     super().__init__(active_dims=[0])
     self.var = gpflow.Parameter(10.0, transform=positive())
     self.mag = gpflow.Parameter(1.0, transform=positive())
     self.args = args
     self.re = REMatchKernel(metric="polynomial",
                             degree=3,
                             gamma=1,
                             coef0=0,
                             alpha=0.5,
                             threshold=1e-6,
                             normalize_kernel=True)
예제 #8
0
    def __init__(self, 
                 data: Tuple[tf.Tensor, tf.Tensor], 
                 m: int = 20, 
                 d: int = 1,
                 alpha: np.float = 1./np.sqrt(2.), 
                 eps_sq: np.float = 1,
                 sigma_n_sq: np.float = 1,
                 sigma_f_sq: np.float = 1,
                 dir_weights: str = None):
                    
        if data[1].dtype == np.float64:
            K_bd.set_floatx('float64')
        else:
            set_default_float(np.float32)

        self.num_data = tf.cast(data[1].shape[0], default_float())
        self.data = (tf.cast(data[0], default_float()), tf.cast(data[1], default_float()))
        self.const = tf.cast(0.5*data[1].size*np.log(2*np.pi), default_float())
               
        self.flag_1d = d == 1
        self.alpha = tf.cast(alpha, default_float())
        self.alpha_sq = tf.square(self.alpha)
        self.m = tf.cast(m, default_float())
        self.this_range = tf.constant(np.asarray(list(product(range(1, m + 1), repeat=d))).squeeze(), dtype=default_float())
        self.this_range_1 = self.this_range - 1.
        self.this_range_1_2 = self.this_range_1 if self.flag_1d else tf.range(m, dtype=default_float())
        self.this_range_1_int = tf.cast(self.this_range_1, tf.int32)
        self.tf_range_dnn_out = tf.range(d)
        self.this_range_1_ln2 = np.log(2.)*self.this_range_1

        self.vander_range = tf.range(m+1, dtype=default_float())
        self.eye_k = tf.eye(m**d, dtype=default_float())
        self.yTy = tf.reduce_sum(tf.math.square(self.data[1])) 
        self.coeff_n_tf = tf.constant(np.load(os.path.dirname(os.path.realpath(__file__)) + '/hermite_coeff.npy')[:m, :m], dtype=default_float())
        
        eps_sq = eps_sq*np.ones(d) if d > 1 else eps_sq       
        self.eps_sq = Parameter(eps_sq, transform=positive(), dtype=default_float())
        self.sigma_f_sq = Parameter(sigma_f_sq, transform=positive(), dtype=default_float())
        self.sigma_n_sq = Parameter(sigma_n_sq, transform=positive(), dtype=default_float())
       
        model = models.Sequential()
        model.add(layers.Dense(512, activation='tanh', input_dim=data[0].shape[1]))        
        model.add(layers.Dense(256, activation='tanh'))
        model.add(layers.Dense(64, activation='tanh'))
        model.add(layers.Dense(d))      
        
        if dir_weights is not None:
            model.load_weights(dir_weights)
        self.neural_net = model
예제 #9
0
    def __init__(self,rank=1,active_dims=[0],gap_decay=0.1, match_decay=0.9,max_subsequence_length=3,
                 alphabet = [], maxlen=0):
        super().__init__(active_dims=active_dims)
        # constrain decay kernel params to between 0 and 1
        logistic_gap = tfb.Chain([tfb.Shift(tf.cast(0,tf.float64))(tfb.Scale(tf.cast(1,tf.float64))),tfb.Sigmoid()])
        logisitc_match = tfb.Chain([tfb.AffineScalar(shift=tf.cast(0,tf.float64),scale=tf.cast(1,tf.float64)),tfb.Sigmoid()])
        self.gap_decay= Parameter(gap_decay, transform=logistic_gap ,name="gap_decay")
        self.match_decay = Parameter(match_decay, transform=logisitc_match,name="match_decay")

        # prepare similarity matrix parameters
        self.rank=rank
        W = 0.1 * tf.ones((len(alphabet), self.rank))
        kappa = tf.ones(len(alphabet))

        self.W = Parameter(W,name="W")
        self.kappa = Parameter(kappa, transform=positive(),name="kappa")
  
        # store additional kernel parameters
        self.max_subsequence_length = tf.constant(max_subsequence_length)
        self.alphabet =  tf.constant(alphabet)
        self.alphabet_size=tf.shape(self.alphabet)[0]
        self.maxlen =  tf.constant(maxlen)

        # build a lookup table of the alphabet to encode input strings
        self.table = tf.lookup.StaticHashTable(
            initializer=tf.lookup.KeyValueTensorInitializer(
                keys=tf.constant(["PAD"]+alphabet),
                values=tf.constant(range(0,len(alphabet)+1)),),default_value=0)
예제 #10
0
    def __init__(self,
                 data: RegressionData,
                 kernel,
                 noise_variance: float = 1.0,
                 parallel=False,
                 max_parallel=10000):

        self.noise_variance = Parameter(noise_variance, transform=positive())
        ts, ys = data_input_to_tensor(data)
        super().__init__(kernel, None, None, num_latent_gps=ys.shape[-1])
        self.data = ts, ys
        filter_spec = kernel.get_spec(ts.shape[0])
        filter_ys_spec = tf.TensorSpec((ts.shape[0], 1),
                                       config.default_float())
        smoother_spec = kernel.get_spec(None)
        smoother_ys_spec = tf.TensorSpec((None, 1), config.default_float())

        if not parallel:
            self._kf = tf.function(
                partial(kf, return_loglikelihood=True, return_predicted=False),
                input_signature=[filter_spec, filter_ys_spec])
            self._kfs = tf.function(
                kfs, input_signature=[smoother_spec, smoother_ys_spec])
        else:
            self._kf = tf.function(
                partial(pkf,
                        return_loglikelihood=True,
                        max_parallel=ts.shape[0]),
                input_signature=[filter_spec, filter_ys_spec])
            self._kfs = tf.function(
                partial(pkfs, max_parallel=max_parallel),
                input_signature=[smoother_spec, smoother_ys_spec])
예제 #11
0
        def grad(dy, variables=None):
            # get gradients of unconstrained params
            grads = {}
            if self.symmetric:
                grads['gap_decay:0'] = tf.reduce_sum(
                    tf.multiply(
                        dy,
                        dk_dgap * tf.math.exp(
                            self.logistic_gap.forward_log_det_jacobian(
                                self.gap_decay_unconstrained, 0))))
                grads['match_decay:0'] = tf.reduce_sum(
                    tf.multiply(
                        dy,
                        dk_dmatch * tf.math.exp(
                            self.logisitc_match.forward_log_det_jacobian(
                                self.match_decay_unconstrained, 0))))
                dy_tiled = tf.tile(tf.expand_dims(dy, 1),
                                   (1, self.max_subsequence_length))
                grads['order_coefs:0'] = tf.reduce_sum(
                    tf.multiply(
                        dy_tiled,
                        dk_dorder_coefs *
                        tf.math.exp(positive().forward_log_det_jacobian(
                            self.order_coefs_unconstrained, 0))), 0)

                gradient = [grads[v.name] for v in variables]
            else:
                gradient = [None for v in variables]
            return ((None, None), gradient)
예제 #12
0
def test_positive_lower(env_lower, override_lower):
    expected_lower = override_lower or env_lower
    with as_context(
            Config(positive_bijector="softplus", positive_minimum=env_lower)):
        bijector = positive(lower=override_lower)
        assert isinstance(bijector, tfp.bijectors.Chain)
        assert np.isclose(bijector.bijectors[0].shift, expected_lower)
예제 #13
0
 def grad(dy, variables=None):
     # get gradients of unconstrained params
     grads = {}
     if self.symmetric:
         grads['gap_decay:0'] = tf.reduce_sum(
             tf.multiply(
                 dy,
                 dk_dgap * tf.math.exp(
                     self.logistic_gap.forward_log_det_jacobian(
                         self.gap_decay_unconstrained, 0))))
         grads['match_decay:0'] = tf.reduce_sum(
             tf.multiply(
                 dy,
                 dk_dmatch * tf.math.exp(
                     self.logisitc_match.forward_log_det_jacobian(
                         self.match_decay_unconstrained, 0))))
         dy_tiled = tf.tile(tf.expand_dims(dy, 1),
                            (1, self.alphabet_size))
         grads['kappa:0'] = tf.reduce_sum(
             tf.multiply(
                 dy_tiled,
                 dk_dkappa *
                 tf.math.exp(positive().forward_log_det_jacobian(
                     self.kappa_unconstrained, 0))), 0)
         dy_tiled = tf.tile(tf.expand_dims(dy, 1),
                            (1, self.alphabet_size * self.rank))
         grads_temp = tf.reduce_sum(tf.multiply(dy_tiled, dk_dW), 0)
         grads['W:0'] = tf.reshape(grads_temp,
                                   (-1, self.alphabet_size, self.rank))
         gradient = [grads[v.name] for v in variables]
     else:
         gradient = [None for v in variables]
     return ((None, None), gradient)
예제 #14
0
def test_positive_calculation_order():
    value, lower = -10.0, 10.0
    expected = np.exp(value) + lower
    with as_context(Config(positive_bijector="exp", positive_minimum=lower)):
        result = positive()(value).numpy()
    assert np.isclose(result, expected)
    assert result >= lower
예제 #15
0
    def __init__(self,
                 variance,
                 lengthscales,
                 name='Kernel',
                 active_dims=None):
        """ Kernel Constructor.

        Args:
            variance: An (L,L) symmetric, positive definite matrix for the signal variance.
            lengthscales: An (L,M) matrix of positive definite lengthscales.
            is_lengthscales_trainable: Whether the lengthscales of this kernel are trainable.
            name: The name of this kernel.
            active_dims: Which of the input dimensions are used. The default None means all of them.
        """
        super(AnisotropicStationary, self).__init__(
            active_dims=active_dims, name=name
        )  # Do not call gf.kernels.AnisotropicStationary.__init__()!
        self.variance = Variance(value=np.atleast_2d(variance),
                                 name=name + 'Variance')
        self._L = self.variance.shape[0]
        lengthscales = data_input_to_tensor(lengthscales)
        lengthscales_shape = tuple(tf.shape(lengthscales).numpy())
        self._M = 1 if lengthscales_shape in ((), (1, ), (1, 1), (
            self._L, )) else lengthscales_shape[-1]
        lengthscales = tf.reshape(
            tf.broadcast_to(lengthscales, (self._L, self._M)),
            (self._L, 1, self._M))
        self.lengthscales = Parameter(lengthscales,
                                      transform=positive(),
                                      trainable=False,
                                      name=name + 'Lengthscales')
        self._validate_ard_active_dims(self.lengthscales[0, 0])
예제 #16
0
    def __init__(self,
                 variance=1.0,
                 lengthscale=1.0,
                 f_list=None,
                 f_list2=None,
                 distances=None,
                 **kwargs):
        for kwarg in kwargs:
            if kwarg not in {'name', 'active_dims'}:
                raise TypeError('Unknown keyword argument:', kwarg)

        super().__init__(**kwargs)
        self.variance = gpflow.Parameter(variance, transform=positive())
        self.lengthscale = gpflow.Parameter(lengthscale, transform=positive())
        self.f_list = f_list  # list with functional information
        self.f_list2 = f_list2  # list with functional information
        self.distances = distances  # matrix with precomputed distances
예제 #17
0
 def __init__(self,
              variance=1.0,
              lengthscale=1.0,
              alpha=1.0,
              active_dims=None):
     super().__init__(variance=variance,
                      lengthscale=lengthscale,
                      active_dims=active_dims)
     self.alpha = Parameter(alpha, transform=positive())
예제 #18
0
    def __init__(self,
                 variance=1.0,
                 lengthscale_x=1.0,
                 lengthscale_f=1.0,
                 f_list=None,
                 f_list2=None,
                 **kwargs):
        for kwarg in kwargs:
            if kwarg not in {'name', 'active_dims'}:
                raise TypeError('Unknown keyword argument:', kwarg)

        super().__init__(**kwargs)
        #super().__init__(active_dims=[0])
        self.variance = gpflow.Parameter(variance, transform=positive())
        self.lengthscale_x = gpflow.Parameter(lengthscale_x,
                                              transform=positive())
        self.lengthscale_f = gpflow.Parameter(lengthscale_f,
                                              transform=positive())
        self.f_list = f_list  # list with functional information
        self.f_list2 = f_list2  # list with functional information
예제 #19
0
 def __init__(self, **kwargs):
     """
     :param kwargs: accepts `name` and `active_dims`, which is a list or
         slice of indices which controls which columns of X are used (by
         default, all columns are used).
     """
     for kwarg in kwargs:
         if kwarg not in {"name", "active_dims"}:
             raise TypeError("Unknown keyword argument:", kwarg)
     super().__init__(**kwargs)
     self.variance = gpflow.Parameter(1.0, transform=positive())
예제 #20
0
 def __init__(self, input_dim=1, variance=1.0, lengthscales=1.0, 
             num_qual=0, dist='manhattan',**kwargs): 
     """
     :param variance: the (initial) value for the variance parameter
     :param lengthscale: the (initial) value for the lengthscale parameter(s),
         to induce ARD behaviour this must be initialised as an array the same
         length as the the number of active dimensions e.g. [1., 1., 1.]
     :param num_qual: the number of qualitative variables to optimiser, these
         must be in the end columns of the input array
     :param kwargs: accepts `name` and `active_dims`, which is a list of
         length input_dim which controls which columns of X are used
     """
     for kwarg in kwargs:
         if kwarg not in {'name', 'active_dims'}:
             raise TypeError('Unknown keyword argument:', kwarg)
     super().__init__(**kwargs)
     self.variance = gpf.Parameter(variance, transform=positive())
     self.lengthscales = gpf.Parameter(lengthscales, transform=positive())
     self._validate_ard_active_dims(self.lengthscales)
     self.num_qual = num_qual
     self.dist = dist
예제 #21
0
 def __init__(self, 
              data: Tuple[tf.Tensor, tf.Tensor],  
              m: int = 100, 
              lengthscales = None,
              sigma_n_sq: np.float = 1,
              sigma_f_sq: np.float = 1,
              randn = None):
                             
     self.num_data = tf.cast(data[1].size, default_float())
     self.data = (tf.cast(data[0], default_float()), tf.cast(data[1], default_float()))
     self.const = tf.cast(0.5*data[1].size*np.log(2*np.pi), default_float())
                    
     self.eye_2m = tf.eye(2*m, dtype=default_float())
     self.yTy = tf.reduce_sum(tf.math.square(self.data[1])) 
     self.m_float = tf.cast(m, default_float())
     self.randn = tf.random.normal(shape=[m, data[0].shape[1]], dtype=default_float()) if randn is None else tf.cast(randn[:, None], default_float())
     
     lengthscales0 = np.ones(data[0].shape[1]) if lengthscales is None else lengthscales
     self.lengthscales = Parameter(lengthscales0, transform=positive(), dtype=default_float())
     self.sigma_f_sq = Parameter(sigma_f_sq, transform=positive(), dtype=default_float())
     self.sigma_n_sq = Parameter(sigma_n_sq, transform=positive(), dtype=default_float())
예제 #22
0
 def __init__(self, 
              data: Tuple[tf.Tensor, tf.Tensor],  
              m: int = 100, 
              d: int = 4,
              lengthscales = None,
              sigma_n_sq: np.float = 1,
              sigma_f_sq: np.float = 1,
              dir_weights: str = None):
                 
     if data[1].dtype == np.float64:
         K_bd.set_floatx('float64')
     else:
         set_default_float(np.float32)
         
     self.num_data = tf.cast(data[1].shape[0], default_float())
     self.data = (tf.cast(data[0], default_float()), tf.cast(data[1], default_float()))
     self.const = tf.cast(0.5*data[1].size*np.log(2*np.pi), default_float())
                    
     self.eye_2m = tf.eye(2*m, dtype=default_float())
     self.yTy = tf.reduce_sum(tf.math.square(self.data[1])) 
     self.m_float = tf.cast(m, default_float())
     self.randn = tf.random.normal(shape=[m, d], dtype=default_float())
     
     lengthscales0 = np.ones(d) if lengthscales is None else lengthscales
     self.lengthscales = Parameter(lengthscales0, transform=positive(), dtype=default_float())
     self.sigma_f_sq = Parameter(sigma_f_sq, transform=positive(), dtype=default_float())
     self.sigma_n_sq = Parameter(sigma_n_sq, transform=positive(), dtype=default_float())
    
     model = models.Sequential()
     model.add(layers.Dense(512, activation='tanh', input_dim=data[0].shape[1]))        
     model.add(layers.Dense(256, activation='tanh'))
     model.add(layers.Dense(64, activation='tanh'))
     model.add(layers.Dense(d))      
     
     if dir_weights is not None:
         model.load_weights(dir_weights)
     self.neural_net = model
예제 #23
0
 def __init__(self,
              kernels,
              lengthscale_f,
              f_list=None,
              f_list2=None,
              distances=None,
              W=None,
              name=None):
     Combination.__init__(self, kernels, name)
     self.lengthscale_f = gpflow.Parameter(lengthscale_f,
                                           transform=positive())
     self.f_list = f_list  # list with functional information
     self.f_list2 = f_list2  # list with functional information
     self.distances = distances  # matrix with precomputed distances
     self.W = W
예제 #24
0
파일: kernels.py 프로젝트: roksikonja/tsipy
    def __init__(
        self,
        labels: Tuple[int, ...],
        variances: Optional[np.ndarray] = None,
        active_dims: List[int] = None,
    ):
        super().__init__(active_dims=active_dims)
        self.labels = np.array(labels)
        self.n = len(labels)

        if variances is None:
            variances = tf.ones((self.n,))

        self.variances = gpf.Parameter(
            variances, transform=positive(), dtype=gpf.default_float()
        )
예제 #25
0
 def create_models(self, data):
     self.models = []
     for i in range(self.num_outputs):
         kernel = gpflow.kernels.SquaredExponential(
             lengthscales=tf.ones([
                 data[0].shape[1],
             ], dtype=float_type))
         transformed_lengthscales = Parameter(
             kernel.lengthscales, transform=positive(lower=1e-3))
         kernel.lengthscales = transformed_lengthscales
         kernel.lengthscales.prior = tfd.Gamma(f64(1.1), f64(1 / 10.0))
         if i == 0:
             self.models.append(
                 FakeGPR((data[0], data[1][:, i:i + 1]), kernel))
         else:
             self.models.append(
                 FakeGPR((data[0], data[1][:, i:i + 1]), kernel,
                         self.models[-1].X))
예제 #26
0
    def __init__(
            self,
            value,
            name: str = 'Variance',
            cholesky_diagonal_lower_bound: float = CHOLESKY_DIAGONAL_LOWER_BOUND
    ):
        """ Construct a non-diagonal covariance matrix. Mutable only through it's properties cholesky_diagonal and cholesky_lower_triangle.

        Args:
            value: A symmetric, positive definite matrix, expressed in tensorflow or numpy.
            cholesky_diagonal_lower_bound: Lower bound on the diagonal of the Cholesky decomposition.
        """
        super().__init__(name=name)
        value = data_input_to_tensor(value)
        self._shape = (value.shape[-1], value.shape[-1])
        self._broadcast_shape = (value.shape[-1], 1, value.shape[-1], 1)
        if value.shape != self._shape:
            raise ValueError('Variance must have shape (L,L).')

        cholesky = tf.linalg.cholesky(value)

        self._cholesky_diagonal = tf.linalg.diag_part(cholesky)
        if min(self._cholesky_diagonal) <= cholesky_diagonal_lower_bound:
            raise ValueError(
                f'The Cholesky diagonal of {name} must be strictly greater than {cholesky_diagonal_lower_bound}.'
            )
        self._cholesky_diagonal = Parameter(
            self._cholesky_diagonal,
            transform=positive(lower=cholesky_diagonal_lower_bound),
            name=name + '.cholesky_diagonal')

        mask = sum([
            list(range(i * self._shape[0], i * (self._shape[0] + 1)))
            for i in range(1, self._shape[0])
        ],
                   start=[])
        self._cholesky_lower_triangle = Parameter(
            tf.gather(tf.reshape(cholesky, [-1]), mask),
            name=name + '.cholesky_lower_triangle')

        self._row_lengths = tuple(range(self._shape[0]))
예제 #27
0
 def __init__(self):
     super().__init__(active_dims=[0])
     self.var = gpflow.Parameter(1.0, transform=positive())
     self.mag = gpflow.Parameter(1.0, transform=positive())
예제 #28
0
    def __init__(
        self,
        inducing_variable: gpflow.inducing_variables.InducingVariables,
        kernel: gpflow.kernels.Kernel,
        domain: np.ndarray,
        q_mu: np.ndarray,
        q_S: np.ndarray,
        *,
        beta0: float = 1e-6,
        num_observations: int = 1,
        num_events: Optional[int] = None,
    ):
        """
        D = number of dimensions
        M = size of inducing variables (number of inducing points)

        :param inducing_variable: inducing variables (here only implemented for a gpflow
            .inducing_variables.InducingPoints instance, with Z of shape M x D)
        :param kernel: the kernel (here only implemented for a gpflow.kernels
            .SquaredExponential instance)
        :param domain: lower and upper bounds of (hyper-rectangular) domain
            (D x 2)

        :param q_mu: initial mean vector of the variational distribution q(u)
            (length M)
        :param q_S: how to initialise the covariance matrix of the variational
            distribution q(u)  (M x M)

        :param beta0: a constant offset, corresponding to initial value of the
            prior mean of the GP (but trainable); should be sufficiently large
            so that the GP does not go negative...

        :param num_observations: number of observations of sets of events
            under the distribution

        :param num_events: total number of events, defaults to events.shape[0]
            (relevant when feeding in minibatches)
        """
        super().__init__(kernel, likelihood=None)  # custom likelihood

        # observation domain  (D x 2)
        self.domain = domain
        if domain.ndim != 2 or domain.shape[1] != 2:
            raise ValueError("domain must be of shape D x 2")

        self.num_observations = num_observations
        self.num_events = num_events

        if not (isinstance(kernel, gpflow.kernels.SquaredExponential)
                and isinstance(inducing_variable,
                               gpflow.inducing_variables.InducingPoints)):
            raise NotImplementedError(
                "This VBPP implementation can only handle real-space "
                "inducing points together with the SquaredExponential "
                "kernel.")
        self.kernel = kernel
        self.inducing_variable = inducing_variable

        self.beta0 = Parameter(beta0, transform=positive(),
                               name="beta0")  # constant mean offset

        # variational approximate Gaussian posterior q(u) = N(u; m, S)
        self.q_mu = Parameter(q_mu, name="q_mu")  # mean vector  (length M)

        # covariance:
        L = np.linalg.cholesky(
            q_S)  # S = L L^T, with L lower-triangular  (M x M)
        self.q_sqrt = Parameter(L, transform=triangular(), name="q_sqrt")

        self.psi_jitter = 0.0
예제 #29
0
    def __init__(self,
                 active_dims=[0],
                 gap_decay=0.1,
                 match_decay=0.9,
                 max_subsequence_length=3,
                 alphabet=[],
                 maxlen=0,
                 batch_size=100):
        super().__init__(active_dims=active_dims)
        # constrain decay kernel params to between 0 and 1
        self.logistic_gap = tfb.Chain([
            tfb.Shift(tf.cast(0, tf.float64))(tfb.Scale(tf.cast(1,
                                                                tf.float64))),
            tfb.Sigmoid()
        ])
        self.logisitc_match = tfb.Chain([
            tfb.AffineScalar(shift=tf.cast(0, tf.float64),
                             scale=tf.cast(1, tf.float64)),
            tfb.Sigmoid()
        ])
        self.gap_decay_param = Parameter(gap_decay,
                                         transform=self.logistic_gap,
                                         name="gap_decay")
        self.match_decay_param = Parameter(match_decay,
                                           transform=self.logisitc_match,
                                           name="match_decay")
        self.order_coefs_param = Parameter(tf.ones(max_subsequence_length,
                                                   dtype=tf.float64),
                                           transform=positive(),
                                           name="order_coefs")

        # use will use copies of the kernel params to stop building expensive computation graph
        # we instead efficientely calculate gradients using dynamic programming
        # These params are updated at every call to K and K_diag (to check if parameters have been updated)
        self.match_decay = self.match_decay_param.numpy()
        self.gap_decay = self.gap_decay_param.numpy()
        self.order_coefs = self.order_coefs_param.numpy()
        self.match_decay_unconstrained = self.match_decay_param.unconstrained_variable.numpy(
        )
        self.gap_decay_unconstrained = self.gap_decay_param.unconstrained_variable.numpy(
        )
        self.order_coefs_unconstrained = self.order_coefs_param.unconstrained_variable.numpy(
        )

        # store additional kernel parameters
        self.max_subsequence_length = tf.constant(max_subsequence_length)
        self.alphabet = tf.constant(alphabet)
        self.alphabet_size = tf.shape(self.alphabet)[0]
        self.maxlen = tf.constant(maxlen)
        self.batch_size = tf.constant(batch_size)

        # build a lookup table of the alphabet to encode input strings
        self.table = tf.lookup.StaticHashTable(
            initializer=tf.lookup.KeyValueTensorInitializer(
                keys=tf.constant(["PAD"] + alphabet),
                values=tf.constant(range(0,
                                         len(alphabet) + 1)),
            ),
            default_value=0)

        # initialize helful construction matricies to be lazily computed once needed
        self.D = None
        self.dD_dgap = None
예제 #30
0
def test_positive_bijector(env_bijector, override_bijector, expected_class):
    with as_context(
            Config(positive_bijector=env_bijector, positive_minimum=None)):
        bijector = positive(base=override_bijector)
        assert isinstance(bijector, expected_class)