def langevin(x0, score_q, lr=1e-2, max_iter=500, progressbar=True, trace=False, **model_params): theta = theano.shared(x0) i = theano.shared(floatX(0)) stepsize = T.cast(lr * (i+1)**(-0.55), theano.config.floatX) grad = score_q(theta, **model_params) update = stepsize * grad/2. + T.sqrt(stepsize) * t_rng.normal(size=theta.shape) cov_grad = T.sum(update**2, axis=1).mean() langevin_step = theano.function([], [], updates=[(theta, theta+update), (i, i+1)]) if progressbar: progress = tqdm(np.arange(max_iter)) else: progress = np.arange(max_iter) xx = [] for _ in progress: langevin_step() if trace: xx.append(theta.get_value()) theta_val = theta.get_value() return theta_val, xx
def op(self, state): X = self.l_in.op(state=state) self.mu = T.dot(X, self.wmu) self.log_sigma = 0.5 * T.dot(X, self.wsigma) if state['sample']: Z = state['sample'] # Z = t_rng.normal(self.log_sigma.shape) else: Z = self.mu + T.exp(self.log_sigma) * t_rng.normal(self.log_sigma.shape) return Z
def op(self, state): X = self.l_in.op(state=state) if state['dropout']: X += t_rng.normal(X.shape, std=self.scale, dtype=theano.config.floatX) return X