Esempio n. 1
0
    def __init__(self, params, lr=0.01, momentum=0.0, decay=0.0):
        self.LR0 = lr
        self.mom = momentum
        self.dcy = decay

        # updated variable
        self.lr = utilsT.sharedf(lr)
        self.it = utilsT.sharedf(0)
Esempio n. 2
0
    def __init__(self, dim, samplingsize, batchsize, name=None):
        super(PermuteLayer, self).__init__(dim, samplingsize, batchsize, name)

        # all batch members share the same permutation matrix
        weights = list()
        for i in range(batchsize):
            weights.append(mathZ.permutMat(dim, enforcing=True, dtype=floatX))
        weights = np.asarray(weights, dtype=floatX)
        jacon = np.zeros((self.batchsize, self.splsize), dtype=floatX)

        self.w = utilsT.sharedf(weights)  # d x d
        self.logjaco = utilsT.sharedf(jacon)  # B x N
Esempio n. 3
0
    def __init__(self, dim, name=None):
        self.name = name
        self.dim = dim

        self.means = utilsT.sharedf(np.zeros(dim))
        self.vars = utilsT.sharedf(np.ones(dim))

        self.varmat = tlin.diag(self.vars)
        self.rmat = tlin.diag(T.sqrt(self.vars))
        self.means_ = self.means.dimshuffle(['x', 0])
        self.qzft = mathT.multiNormInit_sharedParams(self.means, self.varmat,
                                                     self.dim)
        self.qzfn = None
        self.params = [self.means, self.vars]
Esempio n. 4
0
    def __init__(self, name=None):
        super(Banana, self).__init__(name)
        self.dimx, self.dimz = 1, 2

        self.stdx_ztrue = 0.7

        self.var_xz = utilsT.sharedf(1.)
        self.params = []

        self.logP_z = mathT.multiNormInit(mean=np.zeros(self.dimz),
                                          varmat=np.eye(
                                              self.dimz))  # wont change
        self.logP_uninorm = mathT.normInit_sharedParams(mean=utilsT.sharedf(0),
                                                        var=self.var_xz,
                                                        offset=None)

        self.normal = mathZ.normInit(0, self.var_xz.get_value())
        self.nprior = mathZ.multiNormInit(np.zeros(self.dimz),
                                          np.eye(self.dimz))
Esempio n. 5
0
    def __init__(self, name=None):
        super(Banana, self).__init__(name)

        self.dimx, self.dimz = 1, 2

        # true parameter of the function
        self.stdx_ztrue = 0.7

        # shared params, to be updated (learnt)
        self.stdx_z = utilsT.sharedf(1.)
        self.params = [self.stdx_z]

        # prior of z : wont change
        self.logP_z = mathT.multiNormInit(mean=np.zeros(self.dimz),
                                          varmat=np.eye(self.dimz))
        # ( x-z1*z2 ) ~ N( 0, sigma2 )
        self.logP_uninorm = mathT.normInit_sharedParams(mean=utilsT.sharedf(0),
                                                        var=T.sqr(self.stdx_z),
                                                        offset=None)
Esempio n. 6
0
    def __init__(self, dim, name=None, scale=None):
        super(LinLayer, self).__init__(dim, name)

        # define weight mask and weight
        self.scale = (.0002 / self.dim)**.5
        if scale:
            self.scale = scale
        mask = np.triu(np.ones((dim, dim)))
        weight = mathZ.weightsInit(dim, dim, scale=self.scale,
                                   normalise=True)  # TODO scaling

        self.mask = utilsT.sharedf(mask)
        self.w = utilsT.sharedf(weight * mask)
        self.b = utilsT.sharedf(np.zeros(dim))
        self.u = utilsT.sharedf(
            mathZ.biasInit(dim, mean=0, scale=self.scale) / 2)

        self.wmked = self.mask * self.w  # masked weight
        self.wdiag = tlin.extract_diag(self.wmked)
        self.params = [self.w, self.b, self.u]
        self.paramshapes = [(dim, dim), (dim, ), (dim, )]
Esempio n. 7
0
def multiNormInit(mean, varmat):
    '''
    multi-variate normal distribution
    :param mean: numpy.ndarray, (d,)
    :param varmat: numpy.ndarray, (d,d)
    :return: log-pdf function, linking theano.tensors
    '''
    d = mean.shape[0]
    const = -d / 2. * np.log(2 * PI) - 0.5 * np.log(np.abs(nlin.det(varmat)))
    varinv = nlin.inv(varmat)

    mean_ = utilsT.sharedf(mean)
    const_ = utilsT.sharedf(const)
    varinv_ = utilsT.sharedf(varinv)

    def loglik(x):
        subx = x - mean_
        subxcvt = T.dot(subx, varinv_)
        subxsqr = subx * subxcvt
        return -T.sum(subxsqr, axis=1) / 2. + const_

    return loglik
Esempio n. 8
0
    def __init__(self, dim, samplingsize, batchsize, name=None):
        super(LinLayer, self).__init__(dim, samplingsize, batchsize, name)

        # define weight mask and weight
        self.scale = (.0002 / self.dim)**.5

        # values setups
        mask = np.triu(np.ones((dim, dim)))
        wn = npr.randn(batchsize, dim, dim) * self.scale / (dim + dim)
        bn = np.zeros((batchsize, dim))
        un = npr.randn(batchsize, dim) * self.scale

        self.mask = utilsT.sharedf(mask)
        self.w = utilsT.sharedf(wn * mask)  # B x d x d
        self.b = utilsT.sharedf(bn)  # B x d
        self.u = utilsT.sharedf(un)  # B x d

        self.wmked = self.w * self.mask  # masked weight
        self.iwdiag = theano.shared(np.arange(dim))
        self.wdiag = self.wmked[:, self.iwdiag, self.iwdiag]

        self.params = [self.w, self.b, self.u]
        self.paramshapes = [(batchsize, dim, dim), (batchsize, dim),
                            (batchsize, dim)]
Esempio n. 9
0
def sharedConst(v, offset=0.):
    shp = v.shape.eval()
    return utilsT.sharedf(np.ones(shp) * offset)
Esempio n. 10
0
__author__ = 'andy17'

import theano
import network
import network.normFlow as nf
import config
import utils
import utils.mathT as mathT
import utils.theanoGeneral as utilsT
import numpy as np

DIM = 2
SAMPLINGNUM = 10

# defining target model
mean = utilsT.sharedf([2.0, 1.0])
varmat = utilsT.sharedf(np.eye(DIM) / 2)
logTarget = mathT.multiNormInit(mean, varmat)
Esempio n. 11
0
 def __init__(self, dim, name=None):
     super(PermuteLayer, self).__init__(dim, name)
     self.w = utilsT.sharedf(mathZ.permutMat(dim, enforcing=True))
     self.logjaco = utilsT.sharedf(0.)