def logLik(self, x, z): ''' model specific likelihood :return: log-P(x|z) ''' logPxz = mathT.gaussInit(x, self.varn) z1z2 = T.prod(z, axis=1, keepdims=True) return logPxz(T.concatenate([z1z2, z1z2], axis=1))
def __init__(self): ''' :return: ''' super(banana, self).__init__() self.priormus = utils.sharedf(np.zeros(2)) self.priorvar = utils.sharedf(np.eye(2)) self.stdn = utils.sharedf([.5, .5]) self.varn = nlinalg.diag(T.sqr(self.stdn)) self.logPz = mathT.gaussInit(self.priormus, self.priorvar)
def initLogPrior(self): priorvar = np.eye(self.dim) self.logPrior = mathT.gaussInit(np.zeros(self.dim), priorvar) _, self.priorGen = mathZ.gaussInit(np.zeros(self.dim),priorvar)
import theano.tensor as T import numpy as np import nodes import nodes.layers as layers import nodes.iaf as iaf import utils.mathT as mathT import utils.mathZ as mathZ import utils.plotZ as plotZ f32 = theano.config.floatX DIM = 2 SAMPLING_E = int(1e+5) LR = 0.1 # defining target distribution logPZ = mathT.gaussInit([0., 0.], np.eye(2) / 2., mean=True) # defining normalising flow model iafmodel = iaf.IafStack('IAF_simple', dim=DIM) layer1 = iaf.IafLinear({ 'name': 'iaf-1', 'dim': 2, 'bias': True, 'initmethod': 'identity', 'lr': LR }) layer1.setWeight(weights=np.array([[0.3, 0.2], [0, 0.2]]), bias=[0, 0]) layer2 = iaf.IafTanh({'name': 'tanh'}) layer3 = iaf.IafPermute({'name': 'per', 'dim': 2})
def __init__(self,name): self.name=name self.layers = list() self.logPrior = mathT.gaussInit(np.zeros(self.dim), np.e)
def initLogPrior(self): noisevar = N.sharedf(np.eye(self.dim))*T.sqr(self.priorstd) noisemu = N.sharedf(np.zeros(self.dim)) self.logPrior = mathT.gaussInit(noisemu, noisevar)