Exemplo n.º 1
0
 def __init__(self,input_dim,variance=1.,lengthscale=1.,mu=None,v=None,active_dims=None):
     super(MFRBF, self).__init__(input_dim, active_dims, 'MFRBF')
     self.mu = mu
     self.v = v
     self.variance = Param('variance', variance)
     self.lengthscale = Param('lengtscale', lengthscale)
     self.link_parameters(self.variance, self.lengthscale)
Exemplo n.º 2
0
 def __init__(self,input_dim,variance=1.,lengthscale=1.,mu=None,v=None,active_dims=None):
     super(MFCosine, self).__init__(input_dim, active_dims, 'MFCosine')
     #assert input_dim == 1, "For this kernel we assume input_dim=1"
     self.mu = mu
     self.v = v
     self.variance = Param('variance', variance)
     self.lengthscale = Param('lengtscale', lengthscale)
     self.link_parameters(self.variance, self.lengthscale)
Exemplo n.º 3
0
 def __init__(self,
              input_dim,
              variance1=1.,
              lengthscale=1.,
              variance2=1.,
              active_dims=None):
     super(deepRBF, self).__init__(input_dim, active_dims, 'deep_rbf')
     self.variance1 = Param('variance1', variance1)
     self.lengthscale = Param('lengtscale', lengthscale)
     self.variance2 = Param('variance2', variance2)
     self.link_parameters(self.variance1, self.lengthscale, self.variance2)
Exemplo n.º 4
0
 def __init__(self,
              input_dim,
              variance=1.0,
              lengthscale=1.0,
              epsilon=0.0,
              active_dims=None):
     super().__init__(input_dim, active_dims, "time_se")
     self.variance = Param("variance", variance)
     self.lengthscale = Param("lengthscale", lengthscale)
     self.epsilon = Param("epsilon", epsilon)
     self.link_parameters(self.variance, self.lengthscale, self.epsilon)
Exemplo n.º 5
0
 def __init__(self,
              input_dim,
              variance1=1.,
              lengthscale=1.,
              variance2=1.,
              active_dims=None):
     super(deepCosine, self).__init__(input_dim, active_dims, 'deep_cosine')
     #assert input_dim == 1, "For this kernel we assume input_dim=1"
     self.variance1 = Param('variance1', variance1)
     self.lengthscale = Param('lengtscale', lengthscale)
     self.variance2 = Param('variance2', variance2)
     #self.lengthscale2 = Param('lengthscale2', lengthscale2)
     self.link_parameters(self.variance1, self.lengthscale, self.variance2)
Exemplo n.º 6
0
    def __init__(self,
                 layer,
                 direction='bottom_up',
                 encoder='mlp',
                 encoder_dims=None,
                 mpi_comm=None,
                 mpi_root=0,
                 name='encoder'):
        super(EncoderLayer, self).__init__(name=name)
        self.mpi_comm, self.mpi_root = mpi_comm, mpi_root
        self.layer = layer
        self.direction = direction
        if direction == 'bottom_up':
            self.bottom_up = True
            # self.X, self.Y = layer.Y, layer.X
        elif direction == 'top_down':
            self.bottom_up = False
            # self.X, self.Y = layer.X, layer.Y
        else:
            raise Exception(
                'the argument of "direction" has to be either "bottom_up" or "top_down"!'
            )
        self.uncertain_input = isinstance(self.X, VariationalPosterior)
        assert isinstance(
            self.Y, VariationalPosterior
        ), "No need to have a encoder layer for certain output!"

        if encoder == 'mlp':
            dim_in, dim_out = self.X.shape[1], self.Y.shape[1]
            from copy import deepcopy
            from deepgp.encoder.mlp import MLP
            self.encoder = MLP([dim_in, int((dim_in+dim_out)*2./3.), int((dim_in+dim_out)/3.), dim_out] if encoder_dims is None \
                               else [dim_in]+deepcopy(encoder_dims)+[dim_out])
        else:
            raise Exception('Unsupported encoder type: ' + encoder)
        self.Y_var_common = Param('Y_var', self.Y.variance.values[1].copy(),
                                  Logexp())

        # Synchronize across MPI nodes
        if self.mpi_comm is not None:
            from ..util.parallel import broadcastArrays
            broadcastArrays([self.encoder.param_array, self.Y_var_common],
                            self.mpi_comm, self.mpi_root)
        self.link_parameters(self.encoder, self.Y_var_common)
Exemplo n.º 7
0
    def __init__(self,
                 X_list,
                 Y_list,
                 W,
                 kernel=None,
                 likelihoods_list=None,
                 name='GPCR',
                 W_rank=1,
                 kernel_name='coreg'):

        #Input and Output
        X, Y, self.output_index = util.multioutput.build_XY(X_list, Y_list)
        Ny = len(Y_list)
        self.opt_trajectory = []
        self.PEHE_trajectory = []
        self.MSE_trajectory = []
        self.treatment_assign = W

        self.logdetK = 0

        #Kernel
        if kernel is None:
            kernel = kern.RBF(X.shape[1] - 1)

            kernel = util.multioutput.ICM(input_dim=X.shape[1] - 1,
                                          num_outputs=Ny,
                                          kernel=kernel,
                                          W_rank=1,
                                          name=kernel_name)

        #Likelihood
        likelihood = util.multioutput.build_likelihood(Y_list,
                                                       self.output_index,
                                                       likelihoods_list)

        super(CMGP,
              self).__init__(X,
                             Y,
                             kernel,
                             likelihood,
                             inference_method=RiskEmpiricalBayes(),
                             Y_metadata={'output_index': self.output_index})

        self.X = Param("input", X)
Exemplo n.º 8
0
    def __init__(self, Y, dim_down, dim_up, likelihood, MLP_dims=None, X=None, X_variance=None, init='rand',  Z=None, num_inducing=10,  kernel=None, inference_method=None, uncertain_inputs=True,mpi_comm=None, mpi_root=0, back_constraint=True, name='mrd-view'):

        self.uncertain_inputs = uncertain_inputs
        self.layer_lower = None
        self.scale = 1.

        if back_constraint:
            from .mlp import MLP
            from copy import deepcopy
            self.encoder = MLP([dim_down, int((dim_down+dim_up)*2./3.), int((dim_down+dim_up)/3.), dim_up] if MLP_dims is None else [dim_down]+deepcopy(MLP_dims)+[dim_up])
            X = self.encoder.predict(Y.mean.values if isinstance(Y, VariationalPosterior) else Y)
            X_variance = 0.0001*np.ones(X.shape)
            self.back_constraint = True
        else:
            self.back_constraint = False

        if Z is None:
            Z = np.random.rand(num_inducing, dim_up)*2-1. #np.random.permutation(X.copy())[:num_inducing]
        assert Z.shape[1] == X.shape[1]
        
        if likelihood is None: likelihood = likelihoods.Gaussian(variance=Y.var()*0.01)
        
        if uncertain_inputs: X = NormalPosterior(X, X_variance)
        if kernel is None: kernel = kern.RBF(dim_up, ARD = True)
        
        # The command below will also give the field self.X to the view.
        super(MRDView, self).__init__(X, Y, Z, kernel, likelihood, inference_method=inference_method, mpi_comm=mpi_comm, mpi_root=mpi_root, name=name)
        if back_constraint: self.link_parameter(self.encoder)

        if self.uncertain_inputs and self.back_constraint:
            from GPy import Param
            from GPy.core.parameterization.transformations import Logexp
            self.X_var_common = Param('X_var',X_variance[0].copy(),Logexp())
            self.link_parameters(self.X_var_common)
        # There's some redundancy in the self.Xv and self.X. Currently we use self.X for the likelihood part and all calculations part,
        # self.Xv is only used for the self.Xv.gradient part. 
        # This is redundant but it's there in case we want to do the product of experts MRD model.
        self.Xv = self.X
Exemplo n.º 9
0
 def setLengthScale(self, len):
     # self.libKern = GPy.kern.RBF(self.input_dim, self.func, self.isTran, lengthscale=len, ARD=False)
     self.libKern.lengthscale = Param('lengthscale', len, Logexp())