def __init__(self,
            numberOfInducingPoints, # Number of inducing ponts in sparse GP
            batchSize,              # Size of mini batch
            dimX,                   # Dimensionality of the latent co-ordinates
            dimZ,                   # Dimensionality of the latent variables
            data,                   # [NxP] matrix of observations
            kernelType='ARD',
            encoderType_qX='FreeForm2',  # MLP', 'Kernel'.
            encoderType_rX='FreeForm2',  # MLP', 'Kernel'.
            Xu_optimise=False,
            numHiddenUnits_encoder=10,
            numHiddenUnits_decoder=10,
            numHiddenLayers_decoder=2,
            continuous=True
        ):

        SGPDV.__init__(self,
            numberOfInducingPoints, # Number of inducing ponts in sparse GP
            batchSize,              # Size of mini batch
            dimX,                   # Dimensionality of the latent co-ordinates
            dimZ,                   # Dimensionality of the latent variables
            data,                   # [NxP] matrix of observations
            kernelType=kernelType,
            encoderType_qX=encoderType_qX,
            encoderType_rX=encoderType_rX,
            Xu_optimise=Xu_optimise,
            numberOfEncoderHiddenUnits=numHiddenUnits_encoder
        )

        self.HU_decoder = numHiddenUnits_decoder
        self.numHiddenLayers_decoder = numHiddenLayers_decoder
        self.continuous = continuous

        # Construct appropriately sized matrices to initialise theano shares

        self.W_zh  = sharedZeroMatrix(self.HU_decoder, self.Q, 'W_zh')
        self.W_hy1 = sharedZeroMatrix(self.P, self.HU_decoder, 'W_hy')
        self.b_zh  = sharedZeroVector(self.HU_decoder, 'b_zh', broadcastable=(False,True))
        self.b_hy1 = sharedZeroVector(self.P, 'b_zh', broadcastable=(False,True))

        self.likelihoodVariables = [self.W_zh, self.W_hy1, self.b_zh, self.b_hy1]

        if self.numHiddenLayers_decoder == 2:
            self.W_hh = sharedZeroMatrix(self.HU_decoder, self.HU_decoder, 'W_hh')
            self.b_hh = sharedZeroVector(self.HU_decoder, 'b_hh', broadcastable=(False,True))

            self.likelihoodVariables.extend([self.W_hh, self.b_hh])
        if self.continuous:
            self.W_hy2 = sharedZeroMatrix(self.P, self.HU_decoder, 'W_hy2')
            self.b_hy2 = sharedZeroVector(self.P, 'b_hy2', broadcastable=(False,True))

            self.likelihoodVariables.extend([self.W_hy2, self.b_hy2])

        self.gradientVariables.extend(self.likelihoodVariables)

        # Keep track of bounds and gradients for post analysis
        self.all_bounds = []
        self.all_gradients = []
Example #2
0
    def __init__(self,
            numberOfInducingPoints, # Number of inducing ponts in sparse GP
            batchSize,              # Size of mini batch
            dimX,                   # Dimensionality of the latent co-ordinates
            dimZ,                   # Dimensionality of the latent variables
            data,                   # [NxP] matrix of observations
            kernelType='RBF',
            encoderType_qX='FreeForm',  # 'FreeForm', 'MLP', 'Kernel'.
            encoderType_rX='FreeForm',  # 'FreeForm', 'MLP', 'Kernel', 'NoEncoding'.
            encoderType_ru='FreeForm',  # 'FreeForm', 'MLP', 'NoEncoding'
            z_optimise=False,
            numHiddenUnits_encoder=0,
            numHiddentUnits_decoder=10,
            continuous=True

        ):
                       #self, numberOfInducingPoints, batchSize, dimX, dimZ, data, numHiddenUnits, kernelType_='RBF', continuous_=True, encode_qX=True,encode_rX=False, encode_ru=False, encoder_type='kernel' ):
        SGPDV.__init__(self,
            numberOfInducingPoints,
            batchSize,
            dimX,
            dimZ,
            data,
            kernelType,
            encoderType_qX,  # 'FreeForm', 'MLP', 'Kernel'.
            encoderType_rX,  # 'FreeForm', 'MLP', 'Kernel', 'NoEncoding'.
            encoderType_ru,  # 'FreeForm', 'MLP', 'NoEncoding'
            z_optimise,
            numHiddenUnits_encoder
            )

        self.K = dimZ # max number of features
        self.D = data.shape[1] # dimensionality of features
        self.continuous = continuous

        # Suitably sized zero matrices
        K_D_mat   = np.zeros((self.K,self.D), dtype=floatX)
        K_D_D_ten = np.zeros((self.K,self.D,self.D), dtype=floatX)
        K_2_mat   = np.zeros((self.K,2), dtype=floatX)
        N_K_mat   = np.zeros((self.N,self.K), dtype=floatX)
        scalar    = np.zeros(1, dtype=floatX)

        #self.A       = th.shared( K_D_mat )
        self.Phi_IBP = th.shared( K_D_D_ten, name='Phi_IBP')
        self.phi_IBP = th.shared( K_D_mat,   name='phi_IBP')
        self.tau_IBP = th.shared( K_2_mat,   name='tau_IBP')
        self.mu_IBP  = th.shared( N_K_mat,   name='mu_IBP')
        self.log_alpha_IBP = th.shared(scalar, name='log_alpha_IBP')
        self.log_sigma_y = th.shared(scalar, name='log_sigma_y')
        self.log_sigma_A = th.shared(scalar, name='log_sigma_A')

        self.alpha_IBP   = T.exp(self.log_alpha_IBP)
        self.sigma_y = T.exp(self.log_sigma_y)
        self.sigma_A = T.exp(self.log_sigma_A)

        self.gradientVariables.extend([self.A,self.Phi_IBP,self.phi_IBP,self.tau_IBP,self.alpha_IBP])

        self.z_IBP_samp = T.nnet.sigmoid(self.z)
Example #3
0
    def __init__(
            self,
            numberOfInducingPoints,  # Number of inducing ponts in sparse GP
            batchSize,  # Size of mini batch
            dimX,  # Dimensionality of the latent co-ordinates
            dimZ,  # Dimensionality of the latent variables
            data,  # [NxP] matrix of observations
            kernelType='ARD',
            encoderType_qX='FreeForm2',  # MLP', 'Kernel'.
            encoderType_rX='FreeForm2',  # MLP', 'Kernel'.
            Xu_optimise=False,
            numHiddenUnits_encoder=10,
            numHiddenUnits_decoder=10,
            numHiddenLayers_decoder=2,
            continuous=True):

        SGPDV.__init__(
            self,
            numberOfInducingPoints,  # Number of inducing ponts in sparse GP
            batchSize,  # Size of mini batch
            dimX,  # Dimensionality of the latent co-ordinates
            dimZ,  # Dimensionality of the latent variables
            data,  # [NxP] matrix of observations
            kernelType=kernelType,
            encoderType_qX=encoderType_qX,
            encoderType_rX=encoderType_rX,
            Xu_optimise=Xu_optimise,
            numberOfEncoderHiddenUnits=numHiddenUnits_encoder)

        self.HU_decoder = numHiddenUnits_decoder
        self.numHiddenLayers_decoder = numHiddenLayers_decoder
        self.continuous = continuous

        # Construct appropriately sized matrices to initialise theano shares

        self.W_zh = sharedZeroMatrix(self.HU_decoder, self.Q, 'W_zh')
        self.W_hy1 = sharedZeroMatrix(self.P, self.HU_decoder, 'W_hy')
        self.b_zh = sharedZeroVector(self.HU_decoder,
                                     'b_zh',
                                     broadcastable=(False, True))
        self.b_hy1 = sharedZeroVector(self.P,
                                      'b_zh',
                                      broadcastable=(False, True))

        self.likelihoodVariables = [
            self.W_zh, self.W_hy1, self.b_zh, self.b_hy1
        ]

        if self.numHiddenLayers_decoder == 2:
            self.W_hh = sharedZeroMatrix(self.HU_decoder, self.HU_decoder,
                                         'W_hh')
            self.b_hh = sharedZeroVector(self.HU_decoder,
                                         'b_hh',
                                         broadcastable=(False, True))

            self.likelihoodVariables.extend([self.W_hh, self.b_hh])
        if self.continuous:
            self.W_hy2 = sharedZeroMatrix(self.P, self.HU_decoder, 'W_hy2')
            self.b_hy2 = sharedZeroVector(self.P,
                                          'b_hy2',
                                          broadcastable=(False, True))

            self.likelihoodVariables.extend([self.W_hy2, self.b_hy2])

        self.gradientVariables.extend(self.likelihoodVariables)

        # Keep track of bounds and gradients for post analysis
        self.all_bounds = []
        self.all_gradients = []