예제 #1
0
def langevin(x0, score_q, lr=1e-2, max_iter=500, progressbar=True, trace=False, **model_params):

    theta = theano.shared(x0)
    i = theano.shared(floatX(0))

    stepsize = T.cast(lr * (i+1)**(-0.55), theano.config.floatX)
    grad = score_q(theta, **model_params)
    update = stepsize * grad/2. + T.sqrt(stepsize) * t_rng.normal(size=theta.shape)

    cov_grad = T.sum(update**2, axis=1).mean()

    langevin_step = theano.function([], [], updates=[(theta, theta+update), (i, i+1)])

    if progressbar:
        progress = tqdm(np.arange(max_iter))
    else:
        progress = np.arange(max_iter)

    xx = []
    for _ in progress:
        langevin_step()
        if trace:
            xx.append(theta.get_value())

    theta_val = theta.get_value()
    return theta_val, xx
예제 #2
0
파일: ops.py 프로젝트: drmingle/Foxhound
 def op(self, state):
     X = self.l_in.op(state=state)
     self.mu = T.dot(X, self.wmu)
     self.log_sigma = 0.5 * T.dot(X, self.wsigma) 
     if state['sample']:
         Z = state['sample']
         # Z = t_rng.normal(self.log_sigma.shape)
     else:
         Z = self.mu + T.exp(self.log_sigma) * t_rng.normal(self.log_sigma.shape)
     return Z
예제 #3
0
 def op(self, state):
     X = self.l_in.op(state=state)
     self.mu = T.dot(X, self.wmu)
     self.log_sigma = 0.5 * T.dot(X, self.wsigma) 
     if state['sample']:
         Z = state['sample']
         # Z = t_rng.normal(self.log_sigma.shape)
     else:
         Z = self.mu + T.exp(self.log_sigma) * t_rng.normal(self.log_sigma.shape)
     return Z
예제 #4
0
파일: ops.py 프로젝트: jellchou/Foxhound
 def op(self, state):
     X = self.l_in.op(state=state)  
     if state['dropout']:
         X += t_rng.normal(X.shape, std=self.scale, dtype=theano.config.floatX)
     return X
예제 #5
0
 def op(self, state):
     X = self.l_in.op(state=state)  
     if state['dropout']:
         X += t_rng.normal(X.shape, std=self.scale, dtype=theano.config.floatX)
     return X