def log_likelihood_lower_bound(self, x): x_sequence = tensor.tile(x.dimshuffle('x', 0, 1), (self.T, 1, 1)) rval = self.apply(x_sequence) c_states, mu_phi, log_sigma_phi = rval[0], rval[-2], rval[-1] prior_mu = self.prior_mu.dimshuffle('x', 'x', 0) prior_log_sigma = self.prior_log_sigma.dimshuffle('x', 'x', 0) kl_term = ( prior_log_sigma - log_sigma_phi + 0.5 * ( tensor.exp(2 * log_sigma_phi) + (mu_phi - prior_mu) ** 2 ) / tensor.exp(2 * prior_log_sigma) - 0.5).sum(axis=2).sum(axis=0) kl_term.name = 'kl_term' reconstruction_term = - ( x * tensor.nnet.softplus(-c_states[-1]) + (1 - x) * tensor.nnet.softplus(c_states[-1])).sum(axis=1) reconstruction_term.name = 'reconstruction_term' log_likelihood_lower_bound = reconstruction_term - kl_term log_likelihood_lower_bound.name = 'log_likelihood_lower_bound' annotation = Annotation() annotation.add_auxiliary_variable(kl_term, name='kl_term') annotation.add_auxiliary_variable(-reconstruction_term, name='reconstruction_term') add_annotation(log_likelihood_lower_bound, annotation) return log_likelihood_lower_bound
def annotate_update(self, update, tag_to): a = Annotation() for (var, up) in update: a.updates[var] = up add_annotation(tag_to, a)