def __init__(self, kern, Z, num_outputs, mean_function):
        """
        A sparse variational GP layer in whitened representation. This layer holds the kernel,
        variational parameters, inducing points and mean function.

        The underlying model at inputs X is
        f = Lv + mean_function(X), where v \sim N(0, I) and LL^T = kern.K(X)

        The variational distribution over the inducing points is
        q(v) = N(q_mu, q_sqrt q_sqrt^T)

        The layer holds D_out independent GPs with the same kernel and inducing points.

        :kern: The kernel for the layer (input_dim = D_in)
        :param q_mu: mean initialization (M, D_out)
        :param q_sqrt: sqrt of variance initialization (D_out,M,M)
        :param Z: Inducing points (M, D_in)
        :param mean_function: The mean function
        :return:
        """
        Parameterized.__init__(self)
        M = Z.shape[0]

        q_mu = np.zeros((M, num_outputs))
        self.q_mu = Parameter(q_mu)

        q_sqrt = np.tile(np.eye(M)[None, :, :], [num_outputs, 1, 1])
        transform = transforms.LowerTriangular(M, num_matrices=num_outputs)
        self.q_sqrt = Parameter(q_sqrt, transform=transform)

        self.feature = InducingPoints(Z)
        self.kern = kern
        self.mean_function = mean_function
Exemplo n.º 2
0
    def _init_variational_parameters(self, q_mu, q_sqrt):
        q_mu = np.zeros(
            (self.num_inducing, self.num_latent)) if q_mu is None else q_mu
        self.q_mu = Parameter(q_mu, dtype=settings.float_type)  # M x K

        if q_sqrt is None:
            if self.q_diag:
                self.q_sqrt = Parameter(np.ones(
                    (self.num_inducing, self.num_latent),
                    dtype=settings.float_type),
                                        transform=transforms.positive)  # M x K
            else:
                q_sqrt = np.array([
                    np.eye(self.num_inducing, dtype=settings.float_type)
                    for _ in range(self.num_latent)
                ])
                self.q_sqrt = Parameter(q_sqrt,
                                        transform=transforms.LowerTriangular(
                                            self.num_inducing,
                                            self.num_latent))  # K x M x M
        else:
            if self.q_diag:
                assert q_sqrt.ndim == 2
                self.q_sqrt = Parameter(q_sqrt,
                                        transform=transforms.positive)  # M x K
            else:
                assert q_sqrt.ndim == 3
                self.q_sqrt = Parameter(q_sqrt,
                                        transform=transforms.LowerTriangular(
                                            self.num_inducing,
                                            self.num_classes))  # K x M x M
Exemplo n.º 3
0
    def __init__(self, input_dim,
                 order=0,
                 variance=1.0, weight_variances=1., bias_variance=1.,
                 active_dims=None, ARD=None, name=None):
        """
        - input_dim is the dimension of the input to the kernel
        - order specifies the activation function of the neural network
          the function is a rectified monomial of the chosen order.
        - variance is the initial value for the variance parameter
        - weight_variances is the initial value for the weight_variances parameter
          defaults to 1.0 (ARD=False) or np.ones(input_dim) (ARD=True).
        - bias_variance is the initial value for the bias_variance parameter
          defaults to 1.0.
        - active_dims is a list of length input_dim which controls which
          columns of X are used.
        - ARD specifies whether the kernel has one weight_variance per dimension
          (ARD=True) or a single weight_variance (ARD=False).
        """
        super().__init__(input_dim, active_dims, name=name)

        if order not in self.implemented_orders:
            raise ValueError('Requested kernel order is not implemented.')
        self.order = order

        self.variance = Parameter(variance, transform=transforms.positive,
                                  dtype=settings.float_type)
        self.bias_variance = Parameter(bias_variance, transform=transforms.positive,
                                       dtype=settings.float_type)
        weight_variances, self.ARD = self._validate_ard_shape("weight_variances", weight_variances, ARD)
        self.weight_variances = Parameter(weight_variances, transform=transforms.positive,
                                          dtype=settings.float_type)
Exemplo n.º 4
0
    def __init__(self, input_dim, output_dim, num_inducing, kernel,
                 mean_function=None, multitask=False, name=None):
        """
        input_dim is an integer
        output_dim is an integer
        num_inducing is the number of inducing inputs
        kernel is a kernel object (or list of kernel objects)
        """

        super(Layer, self).__init__(name=name)

        self.input_dim = input_dim
        self.output_dim = output_dim
        self.num_inducing = num_inducing
        if multitask:
            Z = np.zeros((self.num_inducing, self.input_dim + 1))
        else:
            Z = np.zeros((self.num_inducing, self.input_dim))

        self.feature = inducingpoint_wrapper(None, Z)

        if isinstance(kernel, list):
            self.kernel = ParamList(kernel)
        else:
            self.kernel = kernel

        self.mean_function = mean_function or Zero(output_dim=self.output_dim)

        shape = (self.num_inducing, self.output_dim)

        self.q_mu = Parameter(np.zeros(shape))

        q_sqrt = np.vstack([np.expand_dims(np.eye(self.num_inducing), 0)
                            for _ in range(self.output_dim)])
        self.q_sqrt = Parameter(q_sqrt)
Exemplo n.º 5
0
    def __init__(self, input_dim, output_dim, rank, active_dims=None, name=None):
        """
        A Coregionalization kernel. The inputs to this kernel are _integers_
        (we cast them from floats as needed) which usually specify the
        *outputs* of a Coregionalization model.

        The parameters of this kernel, W, kappa, specify a positive-definite
        matrix B.

          B = W W^T + diag(kappa) .

        The kernel function is then an indexing of this matrix, so

          K(x, y) = B[x, y] .

        We refer to the size of B as "num_outputs x num_outputs", since this is
        the number of outputs in a coregionalization model. We refer to the
        number of columns on W as 'rank': it is the number of degrees of
        correlation between the outputs.

        NB. There is a symmetry between the elements of W, which creates a
        local minimum at W=0. To avoid this, it's recommended to initialize the
        optimization (or MCMC chain) using a random W.
        """
        assert input_dim == 1, "Coregion kernel in 1D only"
        super().__init__(input_dim, active_dims, name=name)

        self.output_dim = output_dim
        self.rank = rank
        self.W = Parameter(np.zeros((self.output_dim, self.rank), dtype=settings.float_type))
        self.kappa = Parameter(np.ones(self.output_dim, dtype=settings.float_type), transform=transforms.positive)
Exemplo n.º 6
0
	def __init__(self, dims, activation=None, name='fc'):
		Layer.__init__(self)
		self.dims = dims
		self.activation = activation
		self._name = name
		self.weights = Parameter(utils.truncated_normal(scale=0.1, size=tuple(self.dims), dtype=settings.float_type), name='weights')
		self.bias = Parameter(0.1*np.ones((self.dims[1]), dtype=settings.float_type), name='bias')
Exemplo n.º 7
0
 def __init__(self, input_dim, output_dim, 
              spectral_constant = None, spectral_mean = None, 
              spectral_variance = None, spectral_delay = None, 
              spectral_phase = None, active_dims = None):
     
     #input_dim: Input Dimension as integer
     #output_dim: Output Dimension as integer
     
     #spectral_constant: tensor of rank 1 of length output_dim
     #spectral_mean: tensor of rank 2 of shape (input_dim, output_dim)
     #spectral_variance: tensor of rank 2 of shape (input_dim, output_dim)
     #spectral_delay: tensor of rank 2 of shape (input_dim, output_dim)
     #spectral_phase: tensor of rank 1 of length output_dim
     
     #TODO: ADD AUTOMATIC INITIALIZATION
     if spectral_constant is None:
         spectral_constant = np.random.randn(output_dim)
     if spectral_mean is None:
         spectral_mean = np.ones([input_dim, output_dim])
     if spectral_variance is None:
         spectral_variance = np.ones([input_dim, output_dim])
     if spectral_delay is None:
         spectral_delay = np.zeros([input_dim, output_dim])
     if spectral_phase is None:
         spectral_phase = np.zeros(output_dim)
         
     
     MultiKern.__init__(self, input_dim, output_dim, active_dims)
     self.constant = Parameter(spectral_constant)
     self.mean = Parameter(spectral_mean)
     self.variance = Parameter(spectral_variance, transforms.positive)
     self.delay = Parameter(spectral_delay, FixDelay(input_dim, output_dim))
     self.phase = Parameter(spectral_phase, FixPhase())
     self.kerns = [[self._kernel_factory(i,j) for j in range(output_dim)] for i in range(output_dim)]
Exemplo n.º 8
0
    def _init_variational_parameters(self, num_inducing, q_mu, q_sqrt, q_diag):
        """
        Constructs the mean and cholesky of the covariance of the variational Gaussian posterior.
        If a user passes values for `q_mu` and `q_sqrt` the routine checks if they have consistent
        and correct shapes. If a user does not specify any values for `q_mu` and `q_sqrt`, the routine
        initializes them, their shape depends on `num_inducing` and `q_diag`.
        Note: most often the comments refer to the number of observations (=output dimensions) with P,
        number of latent GPs with L, and number of inducing points M. Typically P equals L,
        but when certain multi-output kernels are used, this can change.
        Parameters
        ----------
        :param num_inducing: int
            Number of inducing variables, typically referred to as M.
        :param q_mu: np.array or None
            Mean of the variational Gaussian posterior. If None the function will initialise
            the mean with zeros. If not None, the shape of `q_mu` is checked.
        :param q_sqrt: np.array or None
            Cholesky of the covariance of the variational Gaussian posterior.
            If None the function will initialise `q_sqrt` with identity matrix.
            If not None, the shape of `q_sqrt` is checked, depending on `q_diag`.
        :param q_diag: bool
            Used to check if `q_mu` and `q_sqrt` have the correct shape or to
            construct them with the correct shape. If `q_diag` is true,
            `q_sqrt` is two dimensional and only holds the square root of the
            covariance diagonal elements. If False, `q_sqrt` is three dimensional.
        """
        q_mu = np.zeros(
            (num_inducing, self.num_latent)) if q_mu is None else q_mu
        self.q_mu = Parameter(q_mu, dtype=settings.float_type)  # M x P

        if q_sqrt is None:
            if self.q_diag:
                self.q_sqrt = Parameter(np.ones(
                    (num_inducing, self.num_latent),
                    dtype=settings.float_type),
                                        transform=transforms.positive)  # M x P
            else:
                q_sqrt = np.array([
                    np.eye(num_inducing, dtype=settings.float_type)
                    for _ in range(self.num_latent)
                ])
                self.q_sqrt = Parameter(q_sqrt,
                                        transform=transforms.LowerTriangular(
                                            num_inducing,
                                            self.num_latent))  # P x M x M
        else:
            if q_diag:
                assert q_sqrt.ndim == 2
                self.num_latent = q_sqrt.shape[1]
                self.q_sqrt = Parameter(
                    q_sqrt, transform=transforms.positive)  # M x L/P
            else:
                assert q_sqrt.ndim == 3
                self.num_latent = q_sqrt.shape[0]
                num_inducing = q_sqrt.shape[1]
                self.q_sqrt = Parameter(q_sqrt,
                                        transform=transforms.LowerTriangular(
                                            num_inducing,
                                            self.num_latent))  # L/P x M x M
Exemplo n.º 9
0
    def setup_variational_parameters(self):
        self.Z = Parameter(self.inducing_locations) # M x D

        self.q_mu = Parameter(np.zeros((self.num_inducing, 1))) # M x 1

        q_sqrt = np.tile(np.eye(self.num_inducing)[None, :, :], [1, 1, 1])
        transform = transforms.LowerTriangular(self.num_inducing, num_matrices=1)
        self.q_sqrt = Parameter(q_sqrt, transform=transform) # 1 x M x M
Exemplo n.º 10
0
	def __init__(self, kernel_shape, strides=(1, 1, 1, 1), padding='SAME', activation=None, name=''):
		Layer.__init__(self)
		self.kernel_shape = kernel_shape
		self.strides = strides
		self.padding = padding
		self.activation = activation
		self._name = name
		self.kernel = Parameter(utils.truncated_normal(scale=0.1, size=self.kernel_shape, dtype=settings.float_type), name='kernel')
		self.bias = Parameter(0.1*np.ones((kernel_shape[-1]), dtype=settings.float_type), name='bias')
Exemplo n.º 11
0
 def __init__(self, p=1.0, alpha=1.0, K=100, **kwargs):
     super().__init__(**kwargs)
     self.alpha = Parameter(alpha,
                            transform=transforms.positive,
                            dtype=settings.float_type)
     self.p = Parameter(p,
                        transform=transforms.positive,
                        dtype=settings.float_type)
     self.K = K
Exemplo n.º 12
0
    def __init__(self,
                 layer_id,
                 kern,
                 U,
                 Z,
                 num_outputs,
                 mean_function,
                 white=False,
                 **kwargs):
        """
        A sparse variational GP layer in whitened representation. This layer holds the kernel,
        variational parameters, inducing points and mean function.
        The underlying model at inputs X is
        f = Lv + mean_function(X), where v \sim N(0, I) and LL^T = kern.K(X)
        The variational distribution over the inducing points is
        q(v) = N(q_mu, q_sqrt q_sqrt^T)
        The layer holds D_out independent GPs with the same kernel and inducing points.
        :param kern: The kernel for the layer (input_dim = D_in)
        :param Z: Inducing points (M, D_in)
        :param num_outputs: The number of GP outputs (q_mu is shape (M, num_outputs))
        :param mean_function: The mean function
        :return:
        """
        Layer.__init__(self, layer_id, U, num_outputs, **kwargs)

        #Initialize using kmeans

        self.dim_in = U[0].shape[1] if layer_id == 0 else num_outputs
        self.Z = Z if Z is not None else np.random.normal(
            0, 0.01, (100, self.dim_in))

        self.num_inducing = self.Z.shape[0]

        q_mu = np.zeros((self.num_inducing, num_outputs))
        self.q_mu = Parameter(q_mu)

        q_sqrt = np.tile(
            np.eye(self.num_inducing)[None, :, :], [num_outputs, 1, 1])
        transform = transforms.LowerTriangular(self.num_inducing,
                                               num_matrices=num_outputs)
        self.q_sqrt = Parameter(q_sqrt, transform=transform)

        self.feature = InducingPoints(self.Z)
        self.kern = kern
        self.mean_function = mean_function

        self.num_outputs = num_outputs
        self.white = white

        if not self.white:  # initialize to prior
            Ku = self.kern.compute_K_symm(self.Z)
            Lu = np.linalg.cholesky(Ku +
                                    np.eye(self.Z.shape[0]) * settings.jitter)
            self.q_sqrt = np.tile(Lu[None, :, :], [num_outputs, 1, 1])

        self.needs_build_cholesky = True
Exemplo n.º 13
0
    def __init__(self, kern, num_outputs, mean_function,
                Z=None,
                feature=None,
                white=False, input_prop_dim=None,
                q_mu=None,
                q_sqrt=None, **kwargs):
        """
        A sparse variational GP layer in whitened representation. This layer holds the kernel,
        variational parameters, inducing points and mean function.

        The underlying model at inputs X is
        f = Lv + mean_function(X), where v \sim N(0, I) and LL^T = kern.K(X)

        The variational distribution over the inducing points is
        q(v) = N(q_mu, q_sqrt q_sqrt^T)

        The layer holds D_out independent GPs with the same kernel and inducing points.

        :param kern: The kernel for the layer (input_dim = D_in)
        :param Z: Inducing points (M, D_in)
        :param num_outputs: The number of GP outputs (q_mu is shape (M, num_outputs))
        :param mean_function: The mean function
        :return:
        """
        Layer.__init__(self, input_prop_dim, **kwargs)
        if feature is None:
            feature = InducingPoints(Z)

        self.num_inducing = len(feature)

        self.feature = feature
        self.kern = kern
        self.mean_function = mean_function

        self.num_outputs = num_outputs
        self.white = white

        if q_mu is None:
            q_mu = np.zeros((self.num_inducing, num_outputs), dtype=settings.float_type)
        self.q_mu = Parameter(q_mu)

        if q_sqrt is None:
            if not self.white:  # initialize to prior
                with gpflow.params_as_tensors_for(feature):
                    Ku = conditionals.Kuu(feature, self.kern, jitter=settings.jitter)
                    Lu = tf.linalg.cholesky(Ku)
                    Lu = self.enquire_session().run(Lu)
                    q_sqrt = np.tile(Lu[None, :, :], [num_outputs, 1, 1])
            else:
                q_sqrt = np.tile(np.eye(self.num_inducing, dtype=settings.float_type)[None, :, :], [num_outputs, 1, 1])

        transform = transforms.LowerTriangular(self.num_inducing, num_matrices=num_outputs)
        self.q_sqrt = Parameter(q_sqrt, transform=transform)

        self.needs_build_cholesky = True
Exemplo n.º 14
0
    def initialize_(self, train_x, train_y):
        '''
        TODO: how to use autoflow here. 
        explicitly converting to tensor for debugging purposes
        '''
#        train_x = tf.convert_to_tensor(train_x,dtype=tf.float32)
#        train_y = tf.convert_to_tensor(train_y,dtype=tf.float32)
        
        self.input_dim = np.shape(train_x)[1]
        
        if np.size(train_x.shape) == 1:
            train_x = np.expand_dims(train_x,-1)
        if np.size(train_x.shape) == 2:
            train_x = np.expand_dims(train_x,0)
        
        train_x_sort = np.copy(train_x)
        train_x_sort.sort(axis=1)

        max_dist = np.squeeze(train_x_sort[:,-1, :] - train_x_sort[:,0, :])
        
        min_dist_sort = np.squeeze(np.abs(train_x_sort[:,1:, :] - train_x_sort[:,:-1, :]))
        min_dist = np.zeros([self.input_dim],dtype=float)

        # min of each data column could be zero. Hence, picking minimum which is not zero
        for ind in np.arange(self.input_dim):
            min_dist[ind] = min_dist_sort[np.amin(np.where(min_dist_sort[:,ind]>0),axis=1),ind]
        
        # for random restarts during batch processing. We need to initialize at every 
        # batch. Lock the seed here.
        seed= np.random.randint(low=1,high=10**10)
        np.random.seed(seed)
        
        #Inverse of lengthscales should be drawn from truncated Gaussian |N(0, max_dist^2)|
        # dim: Q x D
        #self.mixture_scales = tf.multiply(,tf.cast(max_dist,dtype=tf.float32)**(-1)
        
        self.mixture_scales = (np.multiply(np.abs(np.random.randn(self.num_mixtures,\
                      self.input_dim)),np.expand_dims(max_dist,axis=0)))**(-1)
        self.mixture_scales = Parameter(self.mixture_scales,\
                                        transform=transforms.positive)
        
        # Draw means from Unif(0, 0.5 / minimum distance between two points), dim: Q x D
        # the nyquist is half of maximum frequency. TODO
        nyquist = np.divide(0.5,min_dist)
        self.mixture_means = np.multiply(np.random.rand(self.num_mixtures\
                            ,self.input_dim),np.expand_dims(nyquist,0))
        self.mixutre_means = Parameter(self.mixture_means)
        
        # Mixture weights should be roughly the std of the y values divided by 
        # the number of mixtures
        # dim: 1 x Q
        self.mixture_weights= np.divide(np.std(train_y,axis=0),\
                            self.num_mixtures)*np.ones(self.num_mixtures)
        self.mixture_weights= Parameter(self.mixture_weights)
        return None
Exemplo n.º 15
0
 def __init__(self, delta=1e-3, a=0., **kwargs):
     super().__init__(**kwargs)
     self.delta = Parameter(delta,
                            transforms.Logistic(),
                            trainable=False,
                            dtype=settings.float_type,
                            prior=priors.Beta(0.2, 5.))
     self.a = Parameter(a,
                        transforms.positive,
                        trainable=False,
                        dtype=settings.float_type)
Exemplo n.º 16
0
 def __init__(self,
              input_dim,
              active_dims=None,
              name=None,
              v_w=1.0,
              v_b=1.0,
              depth=1):
     super().__init__(input_dim, active_dims, name=name)
     self.v_b = Parameter(v_b, transform=gpflow.transforms.positive)
     self.v_w = Parameter(v_w, transform=gpflow.transforms.positive)
     self.depth = depth
Exemplo n.º 17
0
 def __init__(self, input_dim, period=1.0, variance=1.0,
              lengthscales=1.0, active_dims=None, name=None):
     # No ARD support for lengthscale or period yet
     super().__init__(input_dim, active_dims, name=name)
     self.variance = Parameter(variance, transform=transforms.positive,
                               dtype=settings.float_type)
     self.lengthscales = Parameter(lengthscales, transform=transforms.positive,
                                   dtype=settings.float_type)
     self.ARD = False
     self.period = Parameter(period, transform=transforms.positive,
                             dtype=settings.float_type)
Exemplo n.º 18
0
    def __init__(self, A=None, b=None, **kwargs):
        """
        A is a matrix which maps each element of X to Y, b is an additive
        constant.
        If X has N rows and D columns, and Y is intended to have Q columns,
        then A must be D x Q, b must be a vector of length Q.
        """
        A = np.ones((1, 1)) if A is None else A
        b = np.zeros(1) if b is None else b

        MeanFunction.__init__(self, **kwargs)
        self.A = Parameter(np.atleast_2d(A), dtype=settings.float_type)
        self.b = Parameter(b, dtype=settings.float_type)
Exemplo n.º 19
0
    def __init__(self,
                 input_shape: List[int],
                 filter_sizes: List[List[int]],
                 recurse_kern: ElementwiseExKern,
                 pooling_layers: List[int],
                 var_weight: float = 1.0,
                 var_bias: float = 1.0,
                 padding: List[str] = "SAME",
                 strides: List[List[int]] = None,
                 data_format: str = "NCHW",
                 active_dims: slice = None,
                 skip_freq: int = -1,
                 name: str = None):
        input_dim = np.prod(input_shape)
        super(DeepKernel, self).__init__(input_dim, active_dims, name=name)

        self.filter_sizes = np.copy(filter_sizes).astype(np.int32)
        self.n_layers = len(filter_sizes)
        self.input_shape = list(np.copy(input_shape))
        self.recurse_kern = recurse_kern
        self.skip_freq = skip_freq

        inferred_data_format = "NC" + "DHW"[4-len(input_shape):]
        if inferred_data_format != data_format:
            raise ValueError(("Inferred and supplied data formats "
                              "inconsistent: {} vs {}")
                             .format(data_format, inferred_data_format))
        self.data_format = data_format

        if not isinstance(padding, list):
            self.padding = [padding] * len(self.filter_sizes)
        else:
            self.padding = padding
        if len(self.padding) != len(self.filter_sizes):
            raise ValueError(("Mismatching number of layers in `padding` vs "
                              "`filter_sizes`: {} vs {}").format(
                                  len(self.padding), len(self.filter_sizes)))

        if strides is None:
            self.strides = np.ones([self.n_layers, len(input_shape)-1],
                                   dtype=np.int32)
        else:
            self.strides = np.copy(strides).astype(np.int32)
        if len(self.strides) != self.n_layers:
            raise ValueError(("Mismatching number of layers in `strides`: "
                              "{} vs {}").format(
                                  len(self.strides), self.n_layers))

        self.var_weight = Parameter(var_weight, gpflow.transforms.positive)
        self.var_bias = Parameter(var_bias, gpflow.transforms.positive)
Exemplo n.º 20
0
    def __init__(self, input_dim, L_p=None, active_dims=None, name=None):
        """
        - input_dim is the dimension of the input to the kernel
        - variance is the (initial) value for the variance parameter(s)
          if ARD=True, there is one variance per input
        - active_dims is a list of length input_dim which controls
          which columns of X are used.
        """
        super().__init__(input_dim, active_dims, name=name)

        self.L_p = Parameter(
            np.eye(input_dim, dtype=settings.float_type),
            dtype=settings.float_type) if L_p is None else Parameter(
                L_p, dtype=settings.float_type)
Exemplo n.º 21
0
 def __init__(self, antenna1, antenna2,Na,Nd,Nt,Nf, freqs, Vmod, var=1.,dropout=0.2,name=None):
     super(VisibilityLikelihood, self).__init__(name)
     assert len(antenna1) == len(antenna2)
     self.antenna1 = antenna1
     self.antenna2 = antenna2
     self.freqs = freqs
     self.Vmod = Vmod
     self.Na = Na
     self.Nt = Nt
     self.Nd = Nd
     self.Nf = Nf
     self.c = Parameter(np.zeros([Na,Nt,1,1]),prior=priors.Gaussian(0.,1.))
     self.variance_V = var
     self.var_scale = Parameter(1.,transform=positive) 
     self.dropout = dropout
Exemplo n.º 22
0
    def __init__(self,
                 dim_in,
                 dim_out,
                 kern,
                 likelihood,
                 dim_h,
                 num_h,
                 feat=None,
                 mean_function=None,
                 q_diag=False,
                 whiten=True,
                 Z=None,
                 num_data=None,
                 observed_config_space_dim=0,
                 latent_to_conf_space_kernel=None,
                 latent_to_conf_space_likelihood=None,
                 **kwargs):
        super(MLSVGP, self).__init__(dim_in=dim_in,
                                     dim_out=dim_out,
                                     kern=kern,
                                     likelihood=likelihood,
                                     feat=feat,
                                     mean_function=mean_function,
                                     q_diag=q_diag,
                                     whiten=whiten,
                                     Z=Z,
                                     num_data=num_data,
                                     **kwargs)

        self.dim_h = dim_h
        self.num_h = num_h
        self.observed_config_space = observed_config_space_dim
        self.mean_psi = Zero(observed_config_space_dim)
        self.configuration_kernel = latent_to_conf_space_kernel
        self.configuration_likelihood = latent_to_conf_space_likelihood

        # Initialize task variables
        H_mu = np.random.randn(num_h, dim_h)
        H_var = np.log(np.ones_like(H_mu) * 0.1)
        H_init = np.hstack([H_mu, H_var])
        self.H = Parameter(H_init, dtype=settings.float_type, name="H")

        # Create placeholders
        self.H_ids_ph = tf.placeholder(tf.int32, [None])
        self.H_unique_ph = tf.placeholder(tf.int32, [None])
        self.H_scale = tf.placeholder(settings.float_type, [])
        self.psi_ph = tf.placeholder(dtype=settings.float_type,
                                     shape=[None, self.observed_config_space])
Exemplo n.º 23
0
    def __init__(self, model, cost, n_agents, dim_states, dim_actions,
                 dim_angles, episode_length, planning_horizon, **kwargs):

        super(MultiAgentMPC, self).__init__(**kwargs)

        self.model = model
        self.cost = cost
        self.n_agents = n_agents
        self.dim_states = dim_states
        self.dim_actions = dim_actions
        self.dim_angles = dim_angles
        self.dim_states_tf = dim_states + dim_angles
        self.episode_length = episode_length
        self.planning_horizon = planning_horizon

        init_policy = np.random.randn(n_agents,
                                      episode_length + planning_horizon,
                                      dim_actions)
        self.policy = Parameter(init_policy, dtype=settings.float_type)

        # Create placeholders
        self.state_mu_ph = tf.placeholder(settings.float_type, [1, dim_states])
        self.state_var_ph = tf.placeholder(settings.float_type,
                                           [1, dim_states, dim_states])
        self.current_step_ph = tf.placeholder(tf.int32, [])
        self.agent_id_ph = tf.placeholder(tf.int32, [])
        self.policy_ph = tf.placeholder(
            settings.float_type,
            [1, episode_length + planning_horizon, dim_actions])

        self.inp_mu = tf.placeholder(settings.float_type, [1, model.dim_in])
        self.inp_std = tf.placeholder(settings.float_type, [1, model.dim_in])
        self.out_mu = tf.placeholder(settings.float_type, [1, model.dim_out])
        self.out_std = tf.placeholder(settings.float_type, [1, model.dim_out])
Exemplo n.º 24
0
    def __init__(self,
                 dim_in,
                 dim_out,
                 kern,
                 likelihood,
                 dim_h,
                 num_h,
                 feat=None,
                 mean_function=None,
                 num_latent=None,
                 q_diag=False,
                 whiten=True,
                 minibatch_size=None,
                 Z=None,
                 num_data=None,
                 max_lik_h=False,
                 **kwargs):

        # Only used to initialize the SVGP class
        X_init = np.zeros(shape=(1, dim_in))
        Y_init = np.zeros(shape=(1, dim_out))

        SVGP.__init__(self,
                      X=X_init,
                      Y=Y_init,
                      kern=kern,
                      likelihood=likelihood,
                      feat=feat,
                      mean_function=mean_function,
                      num_latent=num_latent,
                      q_diag=q_diag,
                      whiten=whiten,
                      minibatch_size=minibatch_size,
                      Z=Z,
                      num_data=num_data,
                      **kwargs)

        self.dim_in = dim_in
        self.dim_out = dim_out
        self.num_latent = dim_out
        self.dim_h = dim_h
        self.num_h = num_h
        self.max_lik_h = max_lik_h

        # Initialize task variables
        h_mu = np.zeros((1, dim_h))
        h_var = np.log(np.ones_like(h_mu) * 0.1)
        H_init = np.hstack([h_mu, h_var])
        for h in range(num_h):
            setattr(self, "H_{}".format(h),
                    Parameter(H_init, dtype=settings.float_type))

        # Create placeholders
        self.X_ph = tf.placeholder(settings.float_type, [None, dim_in])
        self.X_var_ph = tf.placeholder(settings.float_type, [None, dim_in])
        self.Y_ph = tf.placeholder(settings.float_type, [None, dim_out])
        self.H_ids_ph = tf.placeholder(tf.int32, [None])
        self.num_steps = tf.placeholder(tf.int32, [])
        self.data_scale = tf.placeholder(settings.float_type, [])
        self.task_scale = tf.placeholder(settings.float_type, [])
Exemplo n.º 25
0
    def __init__(self, *args, priors=None, extra_factors=None, **kwargs):
        super().__init__(*args, **kwargs)

        self._advi_values = {}
        self.factors = {}

        # use additional posterior factors, create corresponding initial values
        if extra_factors:
            self.factors.update(extra_factors)
            for name, factor in extra_factors.items():
                factor_shape = factor.sample.shape
                self._advi_values[name] = (np.zeros(factor_shape),
                                           np.full(factor_shape, -5))

        # transform hyperparameters to use approximate posterior samples
        for parameter, prior in priors.items():
            factor = build_factor(parameter.pathname, prior, parameter.shape)
            self.factors[parameter.pathname] = factor
            self._advi_values[parameter.pathname] = (np.zeros(
                parameter.shape), np.full(parameter.shape, -5))

            parent = parameter.parent
            name = next(key for key, value in parent.children.items()
                        if value is parameter)
            parent.unset_child(name, parameter)

            new_parameter = Parameter(factor.sample, trainable=False)
            parent.set_child(name, new_parameter)

        self._advi_initializables = [(tensor,
                                      tf.is_variable_initialized(tensor))
                                     for tensor in self.advi_tensors]
Exemplo n.º 26
0
    def __init__(self, n_outputs):
        super(MultiOutputMeanFunction, self).__init__()

        c = np.zeros(n_outputs)
        c = np.reshape(c, (1, -1))

        self.c = Parameter(c)
Exemplo n.º 27
0
    def __init__(self,
                 X,
                 Y,
                 W,
                 kern,
                 feat=None,
                 mean_function=None,
                 Z=None,
                 **kwargs):
        """
        X is a data matrix, size N x D
        Y is a data matrix, size N x R
        Z is a matrix of pseudo inputs, size M x D
        kern, mean_function are appropriate GPflow objects
        This method only works with a Gaussian likelihood.
        """
        X = DataHolder(X)
        Y = DataHolder(Y)
        likelihood = likelihoods.Gaussian()
        GPModel.__init__(self, X, Y, kern, likelihood, mean_function, **kwargs)
        self.feature = features.inducingpoint_wrapper(feat, Z)
        self.num_data = X.shape[0]

        self.W_prior = tf.ones(W.shape, dtype=settings.float_type) / W.shape[1]
        self.W = Parameter(W)
        self.num_inducing = Z.shape[0] * W.shape[1]
Exemplo n.º 28
0
    def __init__(self,
                 x0,
                 a=400.,
                 b=20.,
                 l=10.,
                 tec_scale=1e-3,
                 active_dims=None,
                 name=None):
        super().__init__(4, active_dims, name=name)
        self.tec_scale = tec_scale
        # b**2 exp(2 g) sec1 sec2 K(f(x),f(x'))

        self.x0 = Parameter(x0, dtype=settings.float_type, trainable=False)

        #        g_prior = log_normal_solve(1.,np.log(100.))
        #        self.expg = Parameter(1.,
        #                transforms.Exp(),
        #                dtype=settings.float_type,
        #                prior=LogNormal(g_prior[0],g_prior[1]**2),
        #                name='thinlayer_expg')#per 10^10

        kern_sigma = 0.005 / tec_scale
        v_prior = log_normal_solve(kern_sigma**2, 0.1 * kern_sigma**2)

        #        v_prior = log_normal_solve(,0.5)
        self.variance = Parameter(np.exp(v_prior[0]),
                                  transform=transforms.positive,
                                  dtype=settings.float_type,
                                  prior=LogNormal(v_prior[0], v_prior[1]**2),
                                  name='thinlayer_var')

        l_prior = log_normal_solve(10., 20.)
        self.lengthscales = Parameter(
            l,
            transform=transforms.Rescale(10.)(transforms.positive),
            dtype=settings.float_type,
            prior=LogNormal(l_prior[0], l_prior[1]**2),
            name='thinlayer_l')

        a_scale = 400.  # 300 km scale
        a_prior = log_normal_solve(400., 200.)
        self.a = Parameter(a,
                           transform=transforms.Rescale(a_scale)(
                               transforms.positive),
                           dtype=settings.float_type,
                           prior=LogNormal(a_prior[0], a_prior[1]**2),
                           name='thinlayer_a')
Exemplo n.º 29
0
    def __init__(self,
                 X,
                 Y,
                 W1,
                 W1_index,
                 W2,
                 W2_index,
                 kern,
                 feat=None,
                 mean_function=None,
                 Z=None,
                 **kwargs):
        """
        X is a data matrix, size N x D
        Y is a data matrix, size N x R
        Z is a matrix of pseudo inputs, size M x D
        W1, size NxK
        W1_index PxL

        W2, size NxL
        W2_index PxL

        kern, mean_function are appropriate GPflow objects
        This method only works with a Gaussian likelihood.
        """
        X = DataHolder(X)
        Y = DataHolder(Y, fix_shape=True)
        likelihood = likelihoods.Gaussian()
        GPModel.__init__(self, X, Y, kern, likelihood, mean_function, **kwargs)
        self.feature = features.inducingpoint_wrapper(feat, Z)
        self.num_data = X.shape[0]

        self.W1_prior = Parameter(np.log(
            np.ones(W1.shape[1], dtype=settings.float_type) / W1.shape[1]),
                                  trainable=False)
        self.W1 = Parameter(W1)
        self.W1_index = DataHolder(W1_index, dtype=np.int32, fix_shape=True)
        self.K = W1.shape[1]

        self.W2_prior = Parameter(np.log(
            np.ones(W2.shape[1], dtype=settings.float_type) / W2.shape[1]),
                                  trainable=False)
        self.W2 = Parameter(W2)
        self.W2_index = DataHolder(W2_index, dtype=np.int32, fix_shape=True)
        self.L = W2.shape[1]

        self.num_inducing = Z.shape[0]
Exemplo n.º 30
0
 def __init__(self, dispersion=1.0, **kwargs):
     """
     :param scale float: scale parameter
     :param df float: degrees of freedom
     """
     super().__init__(**kwargs)
     self.dispersion = Parameter(
         dispersion, transform=transforms.positive, dtype=settings.float_type)