Beispiel #1
0
 def _get_entropy(self, input_values={}):
     if self.distribution.has_analytic_entropy:
         entropy_array = self._get_statistic(query=lambda dist, parameters: dist.get_entropy(**parameters),
                                             input_values=input_values)
         return sum_from_dim(entropy_array, 2)
     else:
         return -self.calculate_log_probability(input_values, include_parents=False)
Beispiel #2
0
    def __init__(
            self,
            variational_samplers,
            particles,
            cost_function=None,
            deviation_statistics=None,
            biased=False,
            number_post_samples=20000,
            gradient_estimator=gradient_estimators.PathwiseDerivativeEstimator
    ):
        self.gradient_estimator = gradient_estimator
        self.learnable_posterior = True
        self.learnable_model = False  #TODO: to implement later
        self.needs_sampler = True
        self.learnable_sampler = True
        self.biased = biased
        self.number_post_samples = number_post_samples
        if cost_function:
            self.cost_function = cost_function
        else:
            self.cost_function = lambda x, y: sum_from_dim(
                (x - y)**2, dim_index=1)
        if deviation_statistics:
            self.deviation_statistics = deviation_statistics
        else:
            self.deviation_statistics = lambda lst: sum(lst)

        def model_statistics(dic):
            num_samples = list(dic.values())[0].shape[0]
            reassigned_particles = [
                reassign_samples(p._get_sample(num_samples),
                                 source_model=p,
                                 target_model=dic) for p in particles
            ]

            statistics = [
                self.deviation_statistics([
                    self.cost_function(value_pair[0].detach().numpy(),
                                       value_pair[1].detach().numpy())
                    for var, value_pair in zip_dict(dic, p).items()
                ]) for p in reassigned_particles
            ]
            return np.array(statistics).transpose()

        truncation_rules = [
            lambda a, idx=index: True if (idx == np.argmin(a)) else False
            for index in range(len(particles))
        ]

        self.sampler_model = [
            truncate_model(model=sampler,
                           truncation_rule=rule,
                           model_statistics=model_statistics)
            for sampler, rule in zip(variational_samplers, truncation_rules)
        ]
Beispiel #3
0
 def _get_entropy(self, input_values={}, for_gradient=True):
     if not self.is_transformed:
         entropy_array = {
             var: var._get_entropy(input_values)
             for var in self.variables
         }
         return sum([
             sum_from_dim(var_ent, 2) for var_ent in entropy_array.values()
         ])
     else:
         return -self.calculate_log_probability(input_values,
                                                for_gradient=for_gradient)
Beispiel #4
0
xt = utilities.tile_parameter(torch.tensor(x), number_samples=ns)
xc = utilities.tile_parameter_chainer(chainer.Variable(x), number_samples=ns)

equal_tensor_variable(xt, xc)

## get_diagonal: torch reshape
x = np.random.normal(size=(1, 20, 10, 20))
xt = utilities.get_diagonal(torch.tensor(x))
xc = utilities.get_diagonal_chainer(chainer.Variable(x))

equal_tensor_variable(xt, xc)

## sum_from_dim: torch sum
x = np.random.normal(size=(20, 5, 4, 2))
dim_index = 1
xt = utilities.sum_from_dim(torch.tensor(x), dim_index)
xc = utilities.sum_from_dim_chainer(chainer.Variable(x), dim_index)

equal_tensor_variable(xt, xc)

## partial_broadcast
xl = []
for i in range(1, 3):
    xl.append(np.random.normal(size=(20, i, 10, 3)))

xt = utilities.partial_broadcast(*[torch.tensor(x) for x in xl])
xc = utilities.partial_broadcast_chainer(*[chainer.Variable(x) for x in xl])

print([i.shape for i in xl])
print([i.numpy().shape for i in xt])
print([i.shape for i in xc])