Exemplo n.º 1
0
 def utility_term_Jensen(us, data_mask):
     #print("[utility_term_Jensen] us=%s" % us.shape)
     us = flatten_first_two_dims(us)
     point_utility_term = (us + EPS_JENSEN).log().mean(0)
     assert point_utility_term.shape == data_mask.shape, "%s=datashape!=mask.shape=%s" % (
         point_utility_term.shape, data_mask.shape)
     return torch.masked_select(point_utility_term, data_mask).sum()
Exemplo n.º 2
0
def sample_predictive_y(qw, qz, nsamples_theta, nsamples_y):
    """ Returns a tensor with samples (nsamples_y x nsamples_theta).
        Flattents the first two dimensions 
        (samples of y for different thetas) from sample_predictive_y0.
    """
    return flatten_first_two_dims(
        sample_predictive_y0(qw, qz, nsamples_theta, nsamples_y))
Exemplo n.º 3
0
        def optimize_h_with_bayes_estimator(*args):
            """Assumes that optimal_h calculates gain-optimal value analytically."""
            ys = flatten_first_two_dims(
                self.sample_predictive_y0(
                    *args,
                    nsamples_theta=self.H_NSAMPLES_UTILITY_TERM_THETA,
                    nsamples_y=self.H_NSAMPLES_UTILITY_TERM_Y))
            h = self.optimal_h_bayes_estimator(ys)

            if optimize_h_with_bayes_estimator.counter < 3 and not is_valid(h):
                print(
                    "WARNING: your optimal_h_bayes_estimator=%s returns invalid decisions! Are we good here?"
                    % self.optimal_h_bayes_estimator.__name__)
            optimize_h_with_bayes_estimator.counter += 1
            return h