def log_prob(self, value): if self._validate_args: self._validate_sample(value) normalize_term = self.total_count * logsumexp(self.logits, axis=-1) \ - gammaln(self.total_count + 1) return np.sum(value * self.logits - gammaln(value + 1), axis=-1) - normalize_term
def semi_supervised_hmm(transition_prior, emission_prior, supervised_categories, supervised_words, unsupervised_words): num_categories, num_words = transition_prior.shape[ 0], emission_prior.shape[0] transition_prob = numpyro.sample( 'transition_prob', dist.Dirichlet( np.broadcast_to(transition_prior, (num_categories, num_categories)))) emission_prob = numpyro.sample( 'emission_prob', dist.Dirichlet( np.broadcast_to(emission_prior, (num_categories, num_words)))) # models supervised data; # here we don't make any assumption about the first supervised category, in other words, # we place a flat/uniform prior on it. numpyro.sample('supervised_categories', dist.Categorical( transition_prob[supervised_categories[:-1]]), obs=supervised_categories[1:]) numpyro.sample('supervised_words', dist.Categorical(emission_prob[supervised_categories]), obs=supervised_words) # computes log prob of unsupervised data transition_log_prob = np.log(transition_prob) emission_log_prob = np.log(emission_prob) init_log_prob = emission_log_prob[:, unsupervised_words[0]] log_prob = forward_log_prob(init_log_prob, unsupervised_words[1:], transition_log_prob, emission_log_prob) log_prob = logsumexp(log_prob, axis=0, keepdims=True) # inject log_prob to potential function numpyro.factor('forward_log_prob', log_prob)
def log_prob(self, value): if self._validate_args: self._validate_sample(value) value = np.expand_dims(value, -1) log_pmf = self.logits - logsumexp(self.logits, axis=-1, keepdims=True) value, log_pmf = promote_shapes(value, log_pmf) value = value[..., :1] return np.take_along_axis(log_pmf, value, -1)[..., 0]
def predict(model, at_bats, hits, z, rng, player_names, train=True): header = model.__name__ + (' - TRAIN' if train else ' - TEST') model = substitute(seed(model, rng), z) model_trace = trace(model).get_trace(at_bats) predictions = model_trace['obs']['value'] print_results('=' * 30 + header + '=' * 30, predictions, player_names, at_bats, hits) if not train: model = substitute(model, z) model_trace = trace(model).get_trace(at_bats, hits) log_joint = 0. for site in model_trace.values(): site_log_prob = site['fn'].log_prob(site['value']) log_joint = log_joint + np.sum(site_log_prob.reshape(site_log_prob.shape[:1] + (-1,)), -1) log_post_density = logsumexp(log_joint) - np.log(np.shape(log_joint)[0]) print('\nPosterior log density: {:.2f}\n'.format(log_post_density))
def dual_moon_pe(x): term1 = 0.5 * ((np.linalg.norm(x, axis=-1) - 2) / 0.4) ** 2 term2 = -0.5 * ((x[..., :1] + np.array([-2., 2.])) / 0.6) ** 2 return term1 - logsumexp(term2, axis=-1)
def forward_one_step(prev_log_prob, curr_word, transition_log_prob, emission_log_prob): log_prob_tmp = np.expand_dims(prev_log_prob, axis=1) + transition_log_prob log_prob = log_prob_tmp + emission_log_prob[:, curr_word] return logsumexp(log_prob, axis=0)
def log_prob(self, x): term1 = 0.5 * ((np.linalg.norm(x, axis=-1) - 2) / 0.4) ** 2 term2 = -0.5 * ((x[..., :1] + np.array([-2., 2.])) / 0.6) ** 2 pe = term1 - logsumexp(term2, axis=-1) return -pe