def forward(self, t):
        # t, yt: (n,)
        n = len(t)
        dt = t[1] - t[0]
        nzero = torch.zeros(n)

        # sample the prior
        a = pyro.sample("a", Uniform(*self.a_bounds))
        log_lscale = pyro.sample("log_lscale",
                                 Uniform(*self.log_lscale_bounds))
        lscale = torch.exp(log_lscale)
        log_sigma = pyro.sample("log_sigma", Uniform(*self.logs_bounds))
        tdist = t.unsqueeze(-1) - t  # (n,n)
        b_sigma = pyro.sample("b_sigma", Uniform(*self.b_bounds))
        b_cov = b_sigma * b_sigma * torch.exp(
            -tdist * tdist / (2 * lscale * lscale)) + torch.eye(n) * 1e-5
        b = pyro.sample("b", MultivariateNormal(nzero, b_cov))

        # calculate the rate
        int_bdt = torch.cumsum(b, dim=0) * dt
        mu = a + int_bdt  # (n,)

        # simulate the observation
        logysim = pyro.sample("logyt", Normal(mu, torch.exp(log_sigma)))

        return logysim
Beispiel #2
0
def test_add():
    X = Uniform(0, 1).rv  # (0, 1)
    X = X + 1  # (1, 2)
    X = 1 + X  # (2, 3)
    X += 1  # (3, 4)
    x = X.dist.sample([N_SAMPLES])
    assert ((3 <= x) & (x <= 4)).all().item()
Beispiel #3
0
def test_subtract():
    X = Uniform(0, 1).rv  # (0, 1)
    X = 1 - X  # (0, 1)
    X = X - 1  # (-1, 0)
    X -= 1  # (-2, -1)
    x = X.dist.sample([N_SAMPLES])
    assert ((-2 <= x) & (x <= -1)).all().item()
Beispiel #4
0
    def _model(x_data, y_data):
        # weight and bias priors
        w_prior = Normal(torch.zeros(1, 1), torch.ones(1, 1)).to_event(1)
        b_prior = Normal(10 * torch.ones(1, 1),
                         10 * torch.ones(1, 1)).to_event(1)

        priors = {'linear.weight': w_prior, 'linear.bias': b_prior}

        scale = pyro.sample('sigma', Uniform(0, 2000))

        lifted_module = pyro.random_module("module", regression_model, priors)
        # sample a nn (which also samples w and b)
        lifted_reg_model = lifted_module()

        with pyro.plate("map", len(x_data)):
            # run the nn forward on data
            prediction_mean = lifted_reg_model(x_data).squeeze(
                -1)  # shape: (256,)

            # condition on the observed data
            res = pyro.sample("obs",
                              Normal(prediction_mean, scale),
                              obs=y_data)  # shape (256, 1)

            return prediction_mean
Beispiel #5
0
    def _model(x_data, y_data):

        fc1w_prior = Normal(torch.zeros(2, 3), torch.ones(2, 3)).to_event()
        fc1b_prior = Normal(0.25 * torch.ones(2),
                            0.25 * torch.ones(2)).to_event()

        outw_prior = Normal(torch.zeros(1, 2), torch.ones(1, 2)).to_event()
        outb_prior = Normal(0.25 * torch.ones(1),
                            0.25 * torch.ones(1)).to_event()

        priors = {
            'fc1.weight': fc1w_prior,
            'fc1.bias': fc1b_prior,
            'out.weight': outw_prior,
            'out.bias': outb_prior
        }

        scale = pyro.sample('sigma', Uniform(0, 20))

        # lift module parameters to random variables sampled from the priors
        lifted_module = pyro.random_module("module", nn_model, priors)
        # sample a regressor (which also samples w and b)
        lifted_reg_model = lifted_module()

        with pyro.plate("map", len(x_data)):

            # run the nn forward on data
            prediction_mean = lifted_reg_model(x_data).squeeze(-1)

            # condition on the observed data
            pyro.sample("obs", Normal(prediction_mean, scale), obs=y_data)

            return prediction_mean
Beispiel #6
0
def pyromodel(x, y):
    priors = {}
    for name, par in model.named_parameters():
        priors[name] = dist.Normal(torch.zeros(*par.shape),
                                   50 * torch.ones(*par.shape)).independent(
                                       par.dim())

        #print("batch shape:", priors[name].batch_shape)
        #print("event shape:", priors[name].event_shape)
        #print("event dim:", priors[name].event_dim)

    bayesian_model = pyro.random_module('bayesian_model', model, priors)
    sampled_model = bayesian_model()
    sigma = pyro.sample('sigma', Uniform(0, 50))
    with pyro.iarange("map", len(x)):
        prediction_mean = sampled_model(x)
        logging.debug(f"prediction_mean: {prediction_mean.shape}")

        if y is not None:
            logging.debug(f"y_data: {y.shape}")

        d_dist = Normal(prediction_mean, sigma).to_event(1)

        if y is not None:
            logging.debug(f"y_data: {y.shape}")

        logging.debug(f"batch shape: {d_dist.batch_shape}")
        logging.debug(f"event shape: {d_dist.event_shape}")
        logging.debug(f"event dim: {d_dist.event_dim}")

        pyro.sample("obs", d_dist, obs=y)

        return prediction_mean
Beispiel #7
0
def guide_t0(data):
    # T-1 alpha params for beta sampling
    kappa = pyro.param('kappa',
                       lambda: Uniform(0, 2).sample([T - 1]),
                       constraint=constraints.positive)

    # concentration params for q_theta #[T,C]
    tau = pyro.param('tau',
                     lambda: MultivariateNormal(0.5 * torch.ones(C), 0.25 *
                                                torch.eye(C)).sample([T]),
                     constraint=constraints.unit_interval)

    # N params for categorical dist; topic weights; symmetric prior
    phi = pyro.param('phi',
                     lambda: Dirichlet(1 / T * torch.ones(T)).sample([N]),
                     constraint=constraints.simplex)

    with pyro.plate("beta_plate", T - 1):
        q_beta = 0
        q_beta += pyro.sample("beta", Beta(torch.ones(T - 1), kappa))
        # q_beta *= 1

    # sample probs for multinomial distributions
    with pyro.plate("theta_plate", T):
        # outputs multinomial probabilities for each topic
        q_theta = 0
        q_theta += pyro.sample("theta", Dirichlet(tau))
        # q_theta *= 1

    with pyro.plate("data", N):
        z = 0
        z += pyro.sample("z", Categorical(phi))
Beispiel #8
0
    def _traces(self, *args, **kwargs):
        """
        make trace posterior distribution
        """
        # initialize traces with a draw from the prior
        old_model_trace = poutine.trace(self.model)(*args, **kwargs)
        traces = []
        t = 0
        i = 0
        while t < self.burn + self.lag * self.samples:
            i += 1
            # q(z' | z)
            new_guide_trace = poutine.block(poutine.trace(self.model))(
                old_model_trace, *args, **kwargs)
            # p(x, z')
            new_model_trace = poutine.trace(
                poutine.replay(self.model, new_guide_trace))(*args, **kwargs)
            # q(z | z')
            old_guide_trace = poutine.block(
                poutine.trace(poutine.replay(self.model, old_model_trace)))(
                    new_model_trace, *args, **kwargs)
            # p(x, z') q(z' | z) / p(x, z) q(z | z')
            logr = new_model_trace.log_pdf() + new_guide_trace.log_pdf() - \
                old_model_trace.log_pdf() - old_guide_trace.log_pdf()
            rnd = pyro.sample("mh_step_{}".format(i),
                              Uniform(torch.zeros(1), torch.ones(1)))

            if torch.log(rnd).data[0] < logr.data[0]:
                # accept
                t += 1
                old_model_trace = new_model_trace
                if t <= self.burn or (t > self.burn and t % self.lag == 0):
                    yield (new_model_trace, new_model_trace.log_pdf())
def model(x_data, y_data):
    
    fc1w_prior = Normal(loc=torch.zeros_like(net.fc1.weight), scale=torch.ones_like(net.fc1.weight))
    fc1b_prior = Normal(loc=torch.zeros_like(net.fc1.bias), scale=torch.ones_like(net.fc1.bias))
    
    outw_prior = Normal(loc=torch.zeros_like(net.out.weight), scale=torch.ones_like(net.out.weight))
    outb_prior = Normal(loc=torch.zeros_like(net.out.bias), scale=torch.ones_like(net.out.bias))
    
    priors = {'fc1.weight': fc1w_prior, 'fc1.bias': fc1b_prior,  'out.weight': outw_prior, 'out.bias': outb_prior}

    f_prior = Normal(0., 1.)
    priors = {'linear.weight': w_prior, 'linear.bias': b_prior, 'factor': f_prior}
    scale = pyro.sample("sigma", Uniform(0., 10.))

    # lift module parameters to random variables sampled from the priors
    lifted_module = pyro.random_module("module", net, priors)
    # sample a regressor (which also samples w and b)
    lifted_reg_model = lifted_module()
    """
    
    lhat = log_softmax(lifted_reg_model(x_data))
    
    pyro.sample("obs", Categorical(logits=lhat), obs=y_data)
    """
    # run the regressor forward conditioned on inputs
    prediction_mean = lifted_reg_model(x_data).squeeze(-1)
    pyro.sample("obs", Normal(prediction_mean, scale),
                obs=y_data)
    return prediction_mean
Beispiel #10
0
def model(x_data, y_data):
    # weight and bias priors
    fc1w_prior = Normal(torch.zeros(1, 2), torch.ones(1, 2)).to_event(1)
    fc1b_prior = Normal(torch.tensor([[8.]]),
                        torch.tensor([[1000.]])).to_event(1)
    outw_prior = Normal(loc=torch.zeros_like(net.out.weight),
                        scale=torch.ones_like(net.out.weight))
    outb_prior = Normal(loc=torch.zeros_like(net.out.bias),
                        scale=torch.ones_like(net.out.bias))
    #outw_prior = Normal(loc=outw_mu_param, scale=outw_sigma_param).independent(1)
    #outw_prior = Normal(loc=outw_mu_param, scale=outw_sigma_param).independent(1)
    #f_prior = Normal(0., 1.)
    #priors = {'linear.weight': w_prior, 'linear.bias': b_prior, 'factor': f_prior}
    priors = {
        'fc1.weight': fc1w_prior,
        'fc1.bias': fc1b_prior,
        'out.weight': outw_prior,
        'out.bias': outb_prior
    }
    scale = pyro.sample("sigma", Uniform(0., 10.))
    # lift module parameters to random variables sampled from the priors
    lifted_module = pyro.random_module("module", net, priors)
    # sample a nn (which also samples w and b)
    lifted_reg_model = lifted_module()

    with pyro.plate("map", len(x_data)):
        # run the nn forward on data
        prediction_mean = lifted_reg_model(x_data).squeeze(-1)
        # condition on the observed data
        pyro.sample("obs", Normal(prediction_mean, scale), obs=y_data)
        return prediction_mean
def model(x_data, y_data, regression_model):
    p = x_data.shape[1]
    # weight and bias priors
    # w_prior = Normal(torch.zeros(1, 2), torch.ones(1, 2)).to_event(1)
    # b_prior = Normal(torch.tensor([[8.]]), torch.tensor([[1000.]])).to_event(1)
    w_prior = Normal(torch.zeros(1, p), torch.ones(1, p)).to_event(1)
    b_prior = Normal(torch.tensor([[1.]]), torch.tensor([[10.]])).to_event(1)

    f_prior = Normal(0., 1.)

    priors = {
        'linear.weight': w_prior,
        'linear.bias': b_prior,
        'factor': f_prior
    }

    scale = pyro.sample("sigma", Uniform(0., 10.))

    # lift module parameters to random variables sampled from the priors
    lifted_module = pyro.random_module("module", regression_model, priors)
    # sample a nn (which also samples w and b)
    lifted_reg_model = lifted_module()
    with pyro.plate("map", len(x_data)):
        # run the nn forward on data
        prediction_mean = lifted_reg_model(x_data).squeeze(-1)
        # condition on the observed data
        pyro.sample("obs", Normal(prediction_mean, scale), obs=y_data)
        return prediction_mean
Beispiel #12
0
    def model(self, features, target):
        def normal_prior(x):
            return Normal(torch.zeros_like(x),
                          torch.ones_like(x)).to_event(x.dim())

        self.priors = {}

        for i in range(len(self.net.hidden_sizes)):
            self.priors['h' + str(i) + '.weight'] = normal_prior(
                getattr(self.net, 'h' + str(i)).weight)
            self.priors['h' + str(i) + '.bias'] = normal_prior(
                getattr(self.net, 'h' + str(i)).bias)

        self.priors['out' + '.weight'] = normal_prior(self.net.out.weight)
        self.priors['out' + '.bias'] = normal_prior(self.net.out.bias)

        # lift module parameters to random variables sampled from the priors
        lifted_module = pyro.random_module("module", self.net, self.priors)
        # sample a regressor (which also samples w and b)
        model_sample = lifted_module()

        out_sigma = pyro.sample("sigma", Uniform(0., 10.))

        # precision = pyro.sample("precision", Uniform(0., 10.))
        # out_sigma = 1 / precision

        with pyro.plate("data", len(target)):

            target_mean = model_sample(features).squeeze(-1)
            # target is not one-hot encoded
            pyro.sample("obs", Normal(target_mean, out_sigma), obs=target)

            return target_mean
Beispiel #13
0
def model(is_cont_africa, ruggedness, log_gdp):
    a = pyro.sample("a", Normal(8., 1000.))
    b_a = pyro.sample("bA", Normal(0., 1.))
    b_r = pyro.sample("bR", Normal(0., 1.))
    b_ar = pyro.sample("bAR", Normal(0., 1.))
    sigma = pyro.sample("sigma", Uniform(0., 10.))
    mean = a + b_a * is_cont_africa + b_r * ruggedness + b_ar * is_cont_africa * ruggedness 
    with pyro.plate("data", 170):
        pyro.sample("obs", Normal(mean, sigma), obs=log_gdp)
Beispiel #14
0
 def sample(self):
     # TODO: write a better implementation
     f = lambda a: Uniform(a[0], a[1]).sample()
     lis = list(self.condition_matrix.size())
     sample = torch.zeros((lis[0], lis[1]))
     for i in range(lis[0]):
         for j in range(lis[1]):
             sample[i, j] += f(self.condition_matrix[i, j])
     return sample
Beispiel #15
0
def test_tensor_ops():
    pi = 3.141592654
    X = Uniform(0, 1).expand([5, 5]).rv
    a = tt([[1, 2, 3, 4, 5]])
    b = a.T
    X = abs(pi*(-X + a - 3*b))
    x = X.dist.sample()
    assert x.shape == (5, 5)
    assert (x >= 0).all().item()
Beispiel #16
0
def test_clippedadam_lrd(lrd):
    x1 = torch.tensor(0.0, requires_grad=True)
    orig_lr = 1.0
    opt_ca = optim.clipped_adam.ClippedAdam(params=[x1], lr=orig_lr, lrd=lrd)
    for step in range(3):
        g = Uniform(-5.0, 5.0).sample()
        x1.backward(g)
        opt_ca.step()
        assert opt_ca.param_groups[0]["lr"] == orig_lr * lrd**(step + 1)
Beispiel #17
0
 def survives(self, t, λ, μ, ρ):
     t_end = t - Exponential(μ).sample()
     if t_end <= 0:
         if Bernoulli(ρ).sample():
             return True
         t_end = 0
     for i in range(int(Poisson(λ * (t - t_end)).sample())):
         τ = Uniform(t_end, t).sample()
         if self.survives(τ, λ, μ, ρ):
             return True
     return False
Beispiel #18
0
def fully_pooled(at_bats):
    """
    Number of hits in $K$ at bats for each player has a Binomial
    distribution with a common probability of success, $\phi$.

    :param (torch.Tensor) at_bats: Number of at bats for each player.
    :return: Number of hits predicted by the model.
    """
    phi_prior = Uniform(at_bats.new_tensor(0), at_bats.new_tensor(1))
    phi = pyro.sample("phi", phi_prior)
    return pyro.sample("obs", Binomial(at_bats, phi))
Beispiel #19
0
def not_pooled(at_bats):
    """
    Number of hits in $K$ at bats for each player has a Binomial
    distribution with independent probability of success, $\phi_i$.

    :param (torch.Tensor) at_bats: Number of at bats for each player.
    :return: Number of hits predicted by the model.
    """
    num_players = at_bats.shape[0]
    phi_prior = Uniform(at_bats.new_tensor(0), at_bats.new_tensor(1)).expand_by([num_players]).independent(1)
    phi = pyro.sample("phi", phi_prior)
    return pyro.sample("obs", Binomial(at_bats, phi))
Beispiel #20
0
def test_chaining():
    X = (
        Uniform(0, 1).rv  # (0, 1)
        .add(1)  # (1, 2)
        .pow(2)  # (1, 4)
        .mul(2)  # (2, 8)
        .sub(5)  # (-3, 3)
        .tanh()  # (-1, 1); more like (-0.995, +0.995)
        .exp()  # (1/e, e)
    )
    x = X.dist.sample([N_SAMPLES])
    assert ((1/math.e <= x) & (x <= math.e)).all().item()
Beispiel #21
0
def test_clippedadam_clip(clip_norm):
    x1 = torch.tensor(0., requires_grad=True)
    x2 = torch.tensor(0., requires_grad=True)
    opt_ca = optim.clipped_adam.ClippedAdam(params=[x1], lr=1., lrd=1., clip_norm=clip_norm)
    opt_a = torch.optim.Adam(params=[x2], lr=1.)
    for step in range(3):
        opt_ca.zero_grad()
        opt_a.zero_grad()
        x1.backward(Uniform(clip_norm, clip_norm + 3.).sample())
        x2.backward(torch.tensor(clip_norm))
        opt_ca.step()
        opt_a.step()
        assert_equal(x1, x2)
Beispiel #22
0
def model(observed_data):
    mu_prior = Uniform(0.0, 1.0)
    mu = pyro.sample("mu", mu_prior)

    def observe_T(T_obs, obs_name):
        T_simulated = simulate(mu)
        T_obs_dist = Normal(T_simulated, torch.tensor(time_measurement_sigma))
        pyro.sample(obs_name, T_obs_dist, obs=T_obs)

    for i, T_obs in enumerate(observed_data):
        observe_T(T_obs, "obs_%d" % i)

    return mu
Beispiel #23
0
    def pgm_model(self):
        sex_dist = Bernoulli(logits=self.sex_logits).to_event(1)
        # pseudo call to register with pyro
        _ = self.sex_logits
        sex = pyro.sample('sex', sex_dist, infer=dict(baseline={'use_decaying_avg_baseline': True}))

        slice_number_dist = Uniform(self.slice_number_min, self.slice_number_max).to_event(1)
        slice_number = pyro.sample('slice_number', slice_number_dist)

        age_base_dist = Normal(self.age_base_loc, self.age_base_scale).to_event(1)
        age_dist = TransformedDistribution(age_base_dist, self.age_flow_transforms)
        _ = self.age_flow_components
        age = pyro.sample('age', age_dist)
        age_ = self.age_flow_constraint_transforms.inv(age)

        duration_context = torch.cat([sex, age_], 1)
        duration_base_dist = Normal(self.duration_base_loc, self.duration_base_scale).to_event(1)
        duration_dist = ConditionalTransformedDistribution(duration_base_dist, self.duration_flow_transforms).condition(duration_context)  # noqa: E501
        duration = pyro.sample('duration', duration_dist)
        _ = self.duration_flow_components
        duration_ = self.duration_flow_constraint_transforms.inv(duration)

        edss_context = torch.cat([sex, duration_], 1)
        edss_base_dist = Normal(self.edss_base_loc, self.edss_base_scale).to_event(1)
        edss_dist = ConditionalTransformedDistribution(edss_base_dist, self.edss_flow_transforms).condition(edss_context)  # noqa: E501
        edss = pyro.sample('edss', edss_dist)
        _ = self.edss_flow_components
        edss_ = self.edss_flow_constraint_transforms.inv(edss)

        brain_context = torch.cat([sex, age_], 1)
        brain_volume_base_dist = Normal(self.brain_volume_base_loc, self.brain_volume_base_scale).to_event(1)
        brain_volume_dist = ConditionalTransformedDistribution(brain_volume_base_dist, self.brain_volume_flow_transforms).condition(brain_context)
        _ = self.brain_volume_flow_components
        brain_volume = pyro.sample('brain_volume', brain_volume_dist)
        brain_volume_ = self.brain_volume_flow_constraint_transforms.inv(brain_volume)

        ventricle_context = torch.cat([age_, brain_volume_, duration_], 1)
        ventricle_volume_base_dist = Normal(self.ventricle_volume_base_loc, self.ventricle_volume_base_scale).to_event(1)
        ventricle_volume_dist = ConditionalTransformedDistribution(ventricle_volume_base_dist, self.ventricle_volume_flow_transforms).condition(ventricle_context)  # noqa: E501
        ventricle_volume = pyro.sample('ventricle_volume', ventricle_volume_dist)
        _ = self.ventricle_volume_flow_components
        ventricle_volume_ = self.ventricle_volume_flow_constraint_transforms.inv(ventricle_volume)

        lesion_context = torch.cat([brain_volume_, ventricle_volume_, duration_, edss_], 1)
        lesion_volume_base_dist = Normal(self.lesion_volume_base_loc, self.lesion_volume_base_scale).to_event(1)
        lesion_volume_dist = ConditionalTransformedDistribution(lesion_volume_base_dist, self.lesion_volume_flow_transforms).condition(lesion_context)
        lesion_volume = pyro.sample('lesion_volume', lesion_volume_dist)
        _ = self.lesion_volume_flow_components

        return dict(age=age, sex=sex, ventricle_volume=ventricle_volume, brain_volume=brain_volume,
                    lesion_volume=lesion_volume, duration=duration, edss=edss, slice_number=slice_number)
Beispiel #24
0
def model(observed_data):
    mu_prior = Uniform(Variable(torch.zeros(1)), Variable(torch.ones(1)))
    mu = pyro.sample("mu", mu_prior)

    def observe_T(T_obs, obs_name):
        T_simulated = simulate(mu)
        T_obs_dist = Normal(T_simulated,
                            Variable(torch.Tensor([time_measurement_sigma])))
        pyro.observe(obs_name, T_obs_dist, T_obs)

    for i, T_obs in enumerate(observed_data):
        observe_T(T_obs, "obs_%d" % i)

    return mu
Beispiel #25
0
def not_pooled(at_bats, hits):
    r"""
    Number of hits in $K$ at bats for each player has a Binomial
    distribution with independent probability of success, $\phi_i$.

    :param (torch.Tensor) at_bats: Number of at bats for each player.
    :param (torch.Tensor) hits: Number of hits for the given at bats.
    :return: Number of hits predicted by the model.
    """
    num_players = at_bats.shape[0]
    with pyro.plate("num_players", num_players):
        phi_prior = Uniform(scalar_like(at_bats, 0), scalar_like(at_bats, 1))
        phi = pyro.sample("phi", phi_prior)
        return pyro.sample("obs", Binomial(at_bats, phi), obs=hits)
Beispiel #26
0
def test_clip_norm(pyro_optim, clip, value):
    x1 = torch.tensor(0.0, requires_grad=True)
    x2 = torch.tensor(0.0, requires_grad=True)
    opt_c = pyro_optim({"lr": 1.0}, {clip: value})
    opt = pyro_optim({"lr": 1.0})
    for step in range(3):
        x1.backward(Uniform(value, value + 3.0).sample())
        x2.backward(torch.tensor(value))
        opt_c([x1])
        opt([x2])
        assert_equal(x1.grad, torch.tensor(value))
        assert_equal(x2.grad, torch.tensor(value))
        assert_equal(x1, x2)
        opt_c.optim_objs[x1].zero_grad()
        opt.optim_objs[x2].zero_grad()
Beispiel #27
0
    def get_new_kernel(self, thetas: Tensor) -> Distribution:
        """Return new kernel distribution for a given set of paramters."""

        if self.kernel == "gaussian":
            assert self.kernel_variance.ndim == 2
            return MultivariateNormal(loc=thetas,
                                      covariance_matrix=self.kernel_variance)

        elif self.kernel == "uniform":
            low = thetas - self.kernel_variance
            high = thetas + self.kernel_variance
            # Move batch shape to event shape to get Uniform that is multivariate in
            # parameter dimension.
            return Uniform(low=low, high=high).to_event(1)
        else:
            raise ValueError(f"Kernel, '{self.kernel}' not supported.")
Beispiel #28
0
def _skewness(event_shape):
    skewness = torch.zeros(event_shape.numel())
    done = False
    while not done:
        for i in range(event_shape.numel()):
            max_ = 1.0 - skewness.abs().sum(-1)
            if torch.any(max_ < 1e-15):
                break
            skewness[i] = Uniform(-max_, max_).sample()
        done = not torch.any(max_ < 1e-15)

    if event_shape == tuple():
        skewness = skewness.reshape(event_shape)
    else:
        skewness = skewness.view(event_shape)
    return skewness
Beispiel #29
0
def test_clippedadam_pass(clip_norm):
    x1 = torch.tensor(0.0, requires_grad=True)
    x2 = torch.tensor(0.0, requires_grad=True)
    opt_ca = optim.clipped_adam.ClippedAdam(params=[x1],
                                            lr=1.0,
                                            lrd=1.0,
                                            clip_norm=clip_norm)
    opt_a = torch.optim.Adam(params=[x2], lr=1.0)
    for step in range(3):
        g = Uniform(-clip_norm, clip_norm).sample()
        opt_ca.zero_grad()
        opt_a.zero_grad()
        x1.backward(g)
        x2.backward(g)
        opt_ca.step()
        opt_a.step()
        assert_equal(x1, x2)
Beispiel #30
0
def pyromodel(x, y):
    priors = {}
    for name, par in model.named_parameters():
        priors[name] = dist.Normal(torch.zeros(*par.shape), 10 * torch.ones(*par.shape)).independent(par.dim())
        
        #print("batch shape:", priors[name].batch_shape)
        #print("event shape:", priors[name].event_shape)
        #print("event dim:", priors[name].event_dim)    
        
    bayesian_model = pyro.random_module('bayesian_model', model, priors)
    sampled_model = bayesian_model()
    sigma = pyro.sample('sigma', Uniform(0, 10))
    with pyro.iarange("map", len(x)):
        prediction_mean = sampled_model(x).squeeze(-1)
        pyro.sample("obs",
                    dist.Normal(prediction_mean,
                                0.05 * sigma),
                    obs=y)