예제 #1
0
def model(x_data, y_data):
    # weight and bias priors
    fc1w_prior = Normal(torch.zeros(1, 2), torch.ones(1, 2)).to_event(1)
    fc1b_prior = Normal(torch.tensor([[8.]]),
                        torch.tensor([[1000.]])).to_event(1)
    outw_prior = Normal(loc=torch.zeros_like(net.out.weight),
                        scale=torch.ones_like(net.out.weight))
    outb_prior = Normal(loc=torch.zeros_like(net.out.bias),
                        scale=torch.ones_like(net.out.bias))
    #outw_prior = Normal(loc=outw_mu_param, scale=outw_sigma_param).independent(1)
    #outw_prior = Normal(loc=outw_mu_param, scale=outw_sigma_param).independent(1)
    #f_prior = Normal(0., 1.)
    #priors = {'linear.weight': w_prior, 'linear.bias': b_prior, 'factor': f_prior}
    priors = {
        'fc1.weight': fc1w_prior,
        'fc1.bias': fc1b_prior,
        'out.weight': outw_prior,
        'out.bias': outb_prior
    }
    scale = pyro.sample("sigma", Uniform(0., 10.))
    # lift module parameters to random variables sampled from the priors
    lifted_module = pyro.random_module("module", net, priors)
    # sample a nn (which also samples w and b)
    lifted_reg_model = lifted_module()

    with pyro.plate("map", len(x_data)):
        # run the nn forward on data
        prediction_mean = lifted_reg_model(x_data).squeeze(-1)
        # condition on the observed data
        pyro.sample("obs", Normal(prediction_mean, scale), obs=y_data)
        return prediction_mean
예제 #2
0
    def model(self, seq):
        mu0 = torch.zeros(self.emb_dim).to(self.device)
        tri0 = self.tri0  # create this when initializing. (takes 4ms each time!)

        muV = pyro.sample("muV",
                          dist.MultivariateNormal(loc=mu0, scale_tril=tri0))

        with plate("item_loop", self.num_items):
            V = pyro.sample(f"V", dist.MultivariateNormal(muV,
                                                          scale_tril=tri0))

        # LIFT MODULE:
        prior = {
            'linear.bias': dist.Normal(0, 1),
            'V.weight': Deterministic_distr(V)
        }
        lifted_module = pyro.random_module("net", self, prior=prior)

        lifted_reg_model = lifted_module()
        lifted_reg_model.lstm.flatten_parameters()

        with pyro.plate("data", len(seq),
                        subsample_size=self.batch_size) as ind:
            batch_seq = seq[ind, ]
            x = batch_seq[:, :-1]
            y = batch_seq[:, 1:]
            batch_mask = (y != 0).float()

            lprobs = lifted_reg_model(x)
            data = pyro.sample(
                "obs_x",
                dist.Categorical(logits=lprobs).mask(batch_mask).to_event(2),
                obs=y)
        return lifted_reg_model
예제 #3
0
    def model(self, x_data: torch.Tensor, y_data: torch.Tensor):
        fc1w_prior = Normal(
            loc=torch.zeros_like(self.fc1.weight),
            scale=torch.ones_like(self.fc1.weight),
        )
        fc1b_prior = Normal(loc=torch.zeros_like(self.fc1.bias),
                            scale=torch.ones_like(self.fc1.bias))

        outw_prior = Normal(
            loc=torch.zeros_like(self.out.weight),
            scale=torch.ones_like(self.out.weight),
        )
        outb_prior = Normal(loc=torch.zeros_like(self.out.bias),
                            scale=torch.ones_like(self.out.bias))

        priors = {
            "fc1.weight": fc1w_prior,
            "fc1.bias": fc1b_prior,
            "out.weight": outw_prior,
            "out.bias": outb_prior,
        }

        # lift module parameters to random variables sampled from the priors
        lifted_module = pyro.random_module("module", self, priors)
        # sample a regressor (which also samples w and b)
        lifted_reg_model = lifted_module()

        lhat = self.log_softmax(lifted_reg_model(x_data))

        pyro.sample("obs", Categorical(logits=lhat), obs=y_data)
예제 #4
0
def old_model(x_data, y_data):

    Scale2 = pyro.sample("sigma", Uniform(0., 10.))
    fc1w_prior = Normal(loc=torch.zeros_like(net.fc1.weight),
                        scale=torch.ones_like(net.fc1.weight))
    fc1b_prior = Normal(loc=torch.zeros_like(net.fc1.bias),
                        scale=torch.ones_like(net.fc1.bias))

    outw_prior = Normal(loc=torch.zeros_like(net.out.weight),
                        scale=torch.ones_like(net.out.weight))
    outb_prior = Normal(loc=torch.zeros_like(net.out.bias),
                        scale=torch.ones_like(net.out.bias))

    priors = {
        'fc1.weight': fc1w_prior,
        'fc1.bias': fc1b_prior,
        'out.weight': outw_prior,
        'out.bias': outb_prior
    }
    # lift module parameters to random variables sampled from the priors
    lifted_module = pyro.random_module("module", net, priors)
    # sample a regressor (which also samples w and b)
    lifted_reg_model = lifted_module()

    prediction_mean = lifted_reg_model(x_data)

    pyro.sample("obs", Normal(prediction_mean, Scale2), obs=y_data)
예제 #5
0
    def model(self, seq):
        bias = dist.Normal(0,1)
        mu0 = torch.zeros(self.emb_dim).to(self.device)
        var0 = torch.diag(torch.ones(self.emb_dim).to(self.device)*2)

        muV = pyro.sample("muV", dist.MultivariateNormal(loc = mu0, covariance_matrix= var0))

        with plate("item_loop", self.num_items):
            V = pyro.sample(f"V", dist.MultivariateNormal(muV, var0))

        # LIFT MODULE:
        prior = {'linear.bias' : bias,
                'V.weight' : Deterministic_distr(V)}
        lifted_module = pyro.random_module("net", self, prior= prior)
        

        lifted_reg_model = lifted_module()
        lifted_reg_model.lstm.flatten_parameters()

        with pyro.plate("data", len(seq), subsample_size = self.batch_size) as ind:
            batch_seq = seq[ind,]
            batch_mask = (batch_seq!=0).float()

            lprobs = lifted_reg_model(batch_seq)
            data = pyro.sample("obs_x", 
                               dist.Categorical(logits=lprobs).mask(batch_mask).to_event(2), 
                               obs = batch_seq)
        return lifted_reg_model
예제 #6
0
def model(x_data, y_data):
    # weight and bias priors
    mu = Variable(torch.zeros(second_layer, first_layer)).type_as(x_data)
    sigma = Variable(torch.ones(second_layer, first_layer)).type_as(x_data)
    bias_mu = Variable(torch.zeros(second_layer)).type_as(x_data)
    bias_sigma = Variable(torch.ones(second_layer)).type_as(x_data)
    w_prior, b_prior = Normal(mu, sigma), Normal(bias_mu, bias_sigma)

    mu2 = Variable(torch.zeros(1, second_layer)).type_as(x_data)
    sigma2 = Variable(torch.ones(1, second_layer)).type_as(x_data)
    bias_mu2 = Variable(torch.zeros(1)).type_as(x_data)
    bias_sigma2 = Variable(torch.ones(1)).type_as(x_data)
    w_prior2, b_prior2 = Normal(mu2, sigma2), Normal(bias_mu2, bias_sigma2)

    priors = {'hidden.weight': w_prior,
              'hidden.bias': b_prior,
              'predict.weight': w_prior2,
              'predict.bias': b_prior2}
    scale = Variable(torch.ones(x_data.size(0))).type_as(x_data)
    # lift module parameters to random variables sampled from the priors
    lifted_module = pyro.random_module("module", regression_model, priors)
    # sample a nn (which also samples w and b)
    lifted_reg_model = lifted_module()
    with pyro.plate("map", len(x_data)):
        # run the nn forward on data
        prediction_mean = lifted_reg_model(x_data).squeeze(-1)
        # condition on the observed data
        pyro.sample("obs",
                    Normal(prediction_mean, scale),
                    obs=y_data)
        return prediction_mean
예제 #7
0
    def model(self, features, target):
        def normal_prior(x):
            return Normal(torch.zeros_like(x),
                          torch.ones_like(x)).to_event(x.dim())

        self.priors = {}

        for i in range(len(self.net.hidden_sizes)):
            self.priors['h' + str(i) + '.weight'] = normal_prior(
                getattr(self.net, 'h' + str(i)).weight)
            self.priors['h' + str(i) + '.bias'] = normal_prior(
                getattr(self.net, 'h' + str(i)).bias)

        self.priors['out' + '.weight'] = normal_prior(self.net.out.weight)
        self.priors['out' + '.bias'] = normal_prior(self.net.out.bias)

        # lift module parameters to random variables sampled from the priors
        lifted_module = pyro.random_module("module", self.net, self.priors)
        # sample a regressor (which also samples w and b)
        model_sample = lifted_module()
        # print(model_sample)

        with pyro.plate("data", len(target)):

            # yhat = self.log_softmax(model_sample(features))
            # target is not one-hot encoded
            # pyro.sample("obs",
            #             Categorical(logits=yhat), obs=target)

            yhat = self.softmax(model_sample(features))

            # target is not one-hot encoded
            pyro.sample("obs", Categorical(probs=yhat), obs=target)
            return yhat
예제 #8
0
    def model(self, features, target):
        def normal_prior(x):
            return Normal(torch.zeros_like(x),
                          torch.ones_like(x)).to_event(x.dim())

        self.priors = {}

        for i in range(len(self.net.hidden_sizes)):
            self.priors['h' + str(i) + '.weight'] = normal_prior(
                getattr(self.net, 'h' + str(i)).weight)
            self.priors['h' + str(i) + '.bias'] = normal_prior(
                getattr(self.net, 'h' + str(i)).bias)

        self.priors['out' + '.weight'] = normal_prior(self.net.out.weight)
        self.priors['out' + '.bias'] = normal_prior(self.net.out.bias)

        # lift module parameters to random variables sampled from the priors
        lifted_module = pyro.random_module("module", self.net, self.priors)
        # sample a regressor (which also samples w and b)
        model_sample = lifted_module()

        out_sigma = pyro.sample("sigma", Uniform(0., 10.))

        # precision = pyro.sample("precision", Uniform(0., 10.))
        # out_sigma = 1 / precision

        with pyro.plate("data", len(target)):

            target_mean = model_sample(features).squeeze(-1)
            # target is not one-hot encoded
            pyro.sample("obs", Normal(target_mean, out_sigma), obs=target)

            return target_mean
 def guide(self, inputs, targets):
     dists = {}
     for param, data in self.encoder.named_parameters():
         # if 'weight' in param or 'bias' in param:
         dists[param] = variable_normal(param, data.shape)
     lifted_module = pyro.random_module("encoder", self.encoder, dists)
     return lifted_module()
def model(x, y):
    fc1_mean_weight_prior = pyro.distributions.Normal(loc=torch.zeros_like(det_net.fc1_mean.weight), scale=torch.ones_like(det_net.fc1_mean.weight))
    fc1_mean_bias_prior = pyro.distributions.Normal(loc=torch.zeros_like(det_net.fc1_mean.bias), scale=torch.ones_like(det_net.fc1_mean.bias))

    fc2_mean_weight_prior = pyro.distributions.Normal(loc=torch.zeros_like(det_net.fc2_mean.weight), scale=torch.ones_like(det_net.fc2_mean.weight))
    fc2_mean_bias_prior = pyro.distributions.Normal(loc=torch.zeros_like(det_net.fc2_mean.bias), scale=torch.ones_like(det_net.fc2_mean.bias))

    fc3_mean_weight_prior = pyro.distributions.Normal(loc=torch.zeros_like(det_net.fc3_mean.weight), scale=torch.ones_like(det_net.fc3_mean.weight))
    fc3_mean_bias_prior = pyro.distributions.Normal(loc=torch.zeros_like(det_net.fc3_mean.bias), scale=torch.ones_like(det_net.fc3_mean.bias))

    fc1_var_weight_prior = pyro.distributions.Normal(loc=torch.zeros_like(det_net.fc1_var.weight), scale=torch.ones_like(det_net.fc1_var.weight))
    fc1_var_bias_prior = pyro.distributions.Normal(loc=torch.zeros_like(det_net.fc1_var.bias), scale=torch.ones_like(det_net.fc1_var.bias))

    fc2_var_weight_prior = pyro.distributions.Normal(loc=torch.zeros_like(det_net.fc2_var.weight), scale=torch.ones_like(det_net.fc2_var.weight))
    fc2_var_bias_prior = pyro.distributions.Normal(loc=torch.zeros_like(det_net.fc2_var.bias), scale=torch.ones_like(det_net.fc2_var.bias))

    fc3_var_weight_prior = pyro.distributions.Normal(loc=torch.zeros_like(det_net.fc3_var.weight), scale=torch.ones_like(det_net.fc3_var.weight))
    fc3_var_bias_prior = pyro.distributions.Normal(loc=torch.zeros_like(det_net.fc3_var.bias), scale=torch.ones_like(det_net.fc3_var.bias))

    priors = {"fc1_mean.weight": fc1_mean_weight_prior, "fc1_mean.bias": fc1_mean_bias_prior,
              "fc2_mean.weight": fc2_mean_weight_prior, "fc2_mean.bias": fc2_mean_bias_prior,
              "fc3_mean.weight": fc3_mean_weight_prior, "fc3_mean.bias": fc3_mean_bias_prior,
              "fc1_var.weight": fc1_var_weight_prior, "fc1_var.bias": fc1_var_bias_prior,
              "fc2_var.weight": fc2_var_weight_prior, "fc2_var.bias": fc2_var_bias_prior,
              "fc3_var.weight": fc3_var_weight_prior, "fc3_var.bias": fc3_var_bias_prior}

    lifted_module = pyro.random_module("module", det_net, priors)

    sampled_reg_model = lifted_module()

    mu, log_sigma_2 = sampled_reg_model(x)

    sigma = torch.sqrt(torch.exp(log_sigma_2))

    return pyro.sample("obs", pyro.distributions.Normal(mu, sigma), obs=y)
예제 #11
0
    def _model(x_data, y_data):
        # weight and bias priors
        w_prior = Normal(torch.zeros(1, 1), torch.ones(1, 1)).to_event(1)
        b_prior = Normal(10 * torch.ones(1, 1),
                         10 * torch.ones(1, 1)).to_event(1)

        priors = {'linear.weight': w_prior, 'linear.bias': b_prior}

        scale = pyro.sample('sigma', Uniform(0, 2000))

        lifted_module = pyro.random_module("module", regression_model, priors)
        # sample a nn (which also samples w and b)
        lifted_reg_model = lifted_module()

        with pyro.plate("map", len(x_data)):
            # run the nn forward on data
            prediction_mean = lifted_reg_model(x_data).squeeze(
                -1)  # shape: (256,)

            # condition on the observed data
            res = pyro.sample("obs",
                              Normal(prediction_mean, scale),
                              obs=y_data)  # shape (256, 1)

            return prediction_mean
예제 #12
0
 def test_random_module(self):
     pyro.clear_param_store()
     lifted_tr = poutine.trace(pyro.random_module("name", self.model, prior=self.prior)).get_trace()
     for name in lifted_tr.nodes.keys():
         if lifted_tr.nodes[name]["type"] == "param":
             assert lifted_tr.nodes[name]["type"] == "sample"
             assert not lifted_tr.nodes[name]["is_observed"]
예제 #13
0
    def _model(x_data, y_data):

        fc1w_prior = Normal(torch.zeros(2, 3), torch.ones(2, 3)).to_event()
        fc1b_prior = Normal(0.25 * torch.ones(2),
                            0.25 * torch.ones(2)).to_event()

        outw_prior = Normal(torch.zeros(1, 2), torch.ones(1, 2)).to_event()
        outb_prior = Normal(0.25 * torch.ones(1),
                            0.25 * torch.ones(1)).to_event()

        priors = {
            'fc1.weight': fc1w_prior,
            'fc1.bias': fc1b_prior,
            'out.weight': outw_prior,
            'out.bias': outb_prior
        }

        scale = pyro.sample('sigma', Uniform(0, 20))

        # lift module parameters to random variables sampled from the priors
        lifted_module = pyro.random_module("module", nn_model, priors)
        # sample a regressor (which also samples w and b)
        lifted_reg_model = lifted_module()

        with pyro.plate("map", len(x_data)):

            # run the nn forward on data
            prediction_mean = lifted_reg_model(x_data).squeeze(-1)

            # condition on the observed data
            pyro.sample("obs", Normal(prediction_mean, scale), obs=y_data)

            return prediction_mean
예제 #14
0
def model(x_data, y_data):

    fc1w_prior = Normal(loc=torch.zeros_like(net.fc1.weight),
                        scale=torch.ones_like(net.fc1.weight))
    fc1b_prior = Normal(loc=torch.zeros_like(net.fc1.bias),
                        scale=torch.ones_like(net.fc1.bias))

    outw_prior = Normal(loc=torch.zeros_like(net.out.weight),
                        scale=torch.ones_like(net.out.weight))
    outb_prior = Normal(loc=torch.zeros_like(net.out.bias),
                        scale=torch.ones_like(net.out.bias))

    priors = {
        'fc1.weight': fc1w_prior,
        'fc1.bias': fc1b_prior,
        'out.weight': outw_prior,
        'out.bias': outb_prior
    }
    # lift module parameters to random variables sampled from the priors
    lifted_module = pyro.random_module("module", net, priors)
    # sample a regressor (which also samples w and b)
    lifted_reg_model = lifted_module()
    """
    
    lhat = log_softmax(lifted_reg_model(x_data))
    
    pyro.sample("obs", Categorical(logits=lhat), obs=y_data)
    """
    # run the regressor forward conditioned on inputs
    prediction_mean = lifted_reg_model(x_data).squeeze(-1)
    pyro.sample("obs", Normal(prediction_mean, 1), obs=y_data)
    return prediction_mean
def guide_2(x_data, y_data):
    
    conv1w_mu = torch.randn_like(net.conv1.weight)
    conv1w_sigma = torch.randn_like(net.conv1.weight)
    conv1w_mu_param = pyro.param("conv1w_mu", conv1w_mu)
    conv1w_sigma_param = softplus(pyro.param("conv1w_sigma", conv1w_sigma))
    conv1w_prior = Normal(loc=conv1w_mu_param, scale=conv1w_sigma_param)
    
    conv1b_mu = torch.randn_like(net.conv1.bias)
    conv1b_sigma = torch.randn_like(net.conv1.bias)
    conv1b_mu_param = pyro.param("conv1b_mu", conv1b_mu)
    conv1b_sigma_param = softplus(pyro.param("conv1b_sigma", conv1b_sigma))
    conv1b_prior = Normal(loc=conv1b_mu_param, scale=conv1b_sigma_param)  

    conv2w_mu = torch.randn_like(net.conv2.weight)
    conv2w_sigma = torch.randn_like(net.conv2.weight)
    conv2w_mu_param = pyro.param("conv2w_mu", conv2w_mu)
    conv2w_sigma_param = softplus(pyro.param("conv2w_sigma", conv2w_sigma))
    conv2w_prior = Normal(loc=conv2w_mu_param, scale=conv2w_sigma_param)
    
    conv2b_mu = torch.randn_like(net.conv2.bias)
    conv2b_sigma = torch.randn_like(net.conv2.bias)
    conv2b_mu_param = pyro.param("conv2b_mu", conv2b_mu)
    conv2b_sigma_param = softplus(pyro.param("conv2b_sigma", conv2b_sigma))
    conv2b_prior = Normal(loc=conv2b_mu_param, scale=conv2b_sigma_param)

    # First layer weight distribution priors
    fc1w_mu = torch.randn_like(net.fc1.weight)
    fc1w_sigma = torch.randn_like(net.fc1.weight)
    fc1w_mu_param = pyro.param("fc1w_mu", fc1w_mu)
    fc1w_sigma_param = softplus(pyro.param("fc1w_sigma", fc1w_sigma))
    fc1w_prior = Normal(loc=fc1w_mu_param, scale=fc1w_sigma_param)
    
    fc1b_mu = torch.randn_like(net.fc1.bias)
    fc1b_sigma = torch.randn_like(net.fc1.bias)
    fc1b_mu_param = pyro.param("fc1b_mu", fc1b_mu)
    fc1b_sigma_param = softplus(pyro.param("fc1b_sigma", fc1b_sigma))
    fc1b_prior = Normal(loc=fc1b_mu_param, scale=fc1b_sigma_param)

    fc2w_mu = torch.randn_like(net.fc2.weight)
    fc2w_sigma = torch.randn_like(net.fc2.weight)
    fc2w_mu_param = pyro.param("fc2w_mu", fc2w_mu)
    fc2w_sigma_param = softplus(pyro.param("fc2w_sigma", fc2w_sigma))
    fc2w_prior = Normal(loc=fc2w_mu_param, scale=fc2w_sigma_param)
    
    fc2b_mu = torch.randn_like(net.fc2.bias)
    fc2b_sigma = torch.randn_like(net.fc2.bias)
    fc2b_mu_param = pyro.param("fc2b_mu", fc2b_mu)
    fc2b_sigma_param = softplus(pyro.param("fc2b_sigma", fc2b_sigma))
    fc2b_prior = Normal(loc=fc2b_mu_param, scale=fc2b_sigma_param)
    
    


    priors = {'conv1.weight': conv1w_prior, 'conv1.bias': conv1b_prior, 'conv2.weight': conv2w_prior, 'conv2.bias': conv2b_prior,
              'fc1.weight': fc1w_prior, 'fc1.bias': fc1b_prior, 'fc2.weight': fc2w_prior, 'fc2.bias': fc2b_prior}
    
    lifted_module = pyro.random_module("module", net, priors)
    
    return lifted_module()
def model_4(x_data, y_data):

  conv1w_prior = Normal(loc=torch.zeros_like(net.conv1.weight), scale=torch.ones_like(net.conv1.weight))
  conv1b_prior = Normal(loc=torch.zeros_like(net.conv1.bias), scale=torch.ones_like(net.conv1.bias))

  conv2w_prior = Normal(loc=torch.zeros_like(net.conv2.weight), scale=torch.ones_like(net.conv2.weight))
  conv2b_prior = Normal(loc=torch.zeros_like(net.conv2.bias), scale=torch.ones_like(net.conv2.bias))

  conv3w_prior = Normal(loc=torch.zeros_like(net.conv3.weight), scale=torch.ones_like(net.conv3.weight))
  conv3b_prior = Normal(loc=torch.zeros_like(net.conv3.bias), scale=torch.ones_like(net.conv3.bias))

  conv4w_prior = Normal(loc=torch.zeros_like(net.conv4.weight), scale=torch.ones_like(net.conv4.weight))
  conv4b_prior = Normal(loc=torch.zeros_like(net.conv4.bias), scale=torch.ones_like(net.conv4.bias))

  fc1w_prior = Normal(loc=torch.zeros_like(net.fc1.weight), scale=torch.ones_like(net.fc1.weight))
  fc1b_prior = Normal(loc=torch.zeros_like(net.fc1.bias), scale=torch.ones_like(net.fc1.bias))

  fc2w_prior = Normal(loc=torch.zeros_like(net.fc2.weight), scale=torch.ones_like(net.fc2.weight))
  fc2b_prior = Normal(loc=torch.zeros_like(net.fc2.bias), scale=torch.ones_like(net.fc2.bias))

  priors = {'conv1.weight': conv1w_prior, 'conv1.bias': conv1b_prior,
            'conv2.weight': conv2w_prior, 'conv2.bias': conv2b_prior,
            'conv3.weight': conv3w_prior, 'conv3.bias': conv3b_prior,
            'conv4.weight': conv4w_prior, 'conv4.bias': conv4b_prior,
            'fc1.weight': fc1w_prior, 'fc1.bias': fc1b_prior,
            'fc2.weight': fc2w_prior, 'fc2.bias': fc2b_prior}

  lifted_module = pyro.random_module("module", net, priors)

  lifted_reg_model = lifted_module()

  lhat = log_softmax(lifted_reg_model(x_data))

  pyro.sample("obs", Categorical(logits=lhat), obs=y_data)
예제 #17
0
def pyromodel(x, y):
    priors = {}
    for name, par in model.named_parameters():
        priors[name] = dist.Normal(torch.zeros(*par.shape),
                                   50 * torch.ones(*par.shape)).independent(
                                       par.dim())

        #print("batch shape:", priors[name].batch_shape)
        #print("event shape:", priors[name].event_shape)
        #print("event dim:", priors[name].event_dim)

    bayesian_model = pyro.random_module('bayesian_model', model, priors)
    sampled_model = bayesian_model()
    sigma = pyro.sample('sigma', Uniform(0, 50))
    with pyro.iarange("map", len(x)):
        prediction_mean = sampled_model(x)
        logging.debug(f"prediction_mean: {prediction_mean.shape}")

        if y is not None:
            logging.debug(f"y_data: {y.shape}")

        d_dist = Normal(prediction_mean, sigma).to_event(1)

        if y is not None:
            logging.debug(f"y_data: {y.shape}")

        logging.debug(f"batch shape: {d_dist.batch_shape}")
        logging.debug(f"event shape: {d_dist.event_shape}")
        logging.debug(f"event dim: {d_dist.event_dim}")

        pyro.sample("obs", d_dist, obs=y)

        return prediction_mean
예제 #18
0
def model(x_data, y_data):

    fc1w_prior = Normal(loc=torch.zeros_like(net.fc1.weight),
                        scale=torch.ones_like(net.fc1.weight))
    fc1b_prior = Normal(loc=torch.zeros_like(net.fc1.bias),
                        scale=torch.ones_like(net.fc1.bias))

    outw_prior = Normal(loc=torch.zeros_like(net.out.weight),
                        scale=torch.ones_like(net.out.weight))
    outb_prior = Normal(loc=torch.zeros_like(net.out.bias),
                        scale=torch.ones_like(net.out.bias))

    priors = {
        'fc1.weight': fc1w_prior,
        'fc1.bias': fc1b_prior,
        'out.weight': outw_prior,
        'out.bias': outb_prior
    }

    # lift module parameters to random variables sampled from the priors
    lifted_module = pyro.random_module("module", net, priors)
    # sample a regressor (which also samples w and b)
    lifted_reg_model = lifted_module()

    with pyro.plate("map", len(x_data)):
        # run the nn forward on data
        prediction_mean = lifted_reg_model(x_data).squeeze(-1)
        # condition on the observed data
        pyro.sample("obs", Normal(prediction_mean, scale), obs=y_data)
        return prediction_mean
예제 #19
0
파일: BNN2.py 프로젝트: CheChem/BNN
def guide(x_data, y_data):
    # First layer weight distribution priors
    fc1w_mu_param = pyro.param("fc1w_mu_param",
                               torch.randn_like(net.fc1.weight))
    fc1w_sigma_param = softplus(
        pyro.param("fc1w_sigma", torch.randn_like(net.fc1.weight)))
    fc1w_prior = Normal(loc=fc1w_mu_param, scale=fc1w_sigma_param)
    # First layer bias distribution priors
    fc1b_mu_param = pyro.param("fc1b_mu_param", torch.randn_like(net.fc1.bias))
    fc1b_sigma_param = softplus(
        pyro.param("fc1b_sigma_param", torch.randn_like(net.fc1.bias)))
    fc1b_prior = Normal(loc=fc1b_mu_param, scale=fc1b_sigma_param)
    # Output layer weight distribution priors
    outw_mu = torch.randn_like(net.out.weight)
    outw_sigma = torch.randn_like(net.out.weight)
    outw_mu_param = pyro.param("outw_mu", outw_mu)
    outw_sigma_param = softplus(pyro.param("outw_sigma", outw_sigma))
    outw_prior = Normal(loc=outw_mu_param,
                        scale=outw_sigma_param).independent(1)
    # Output layer bias distribution priors
    outb_mu = torch.randn_like(net.out.bias)
    outb_sigma = torch.randn_like(net.out.bias)
    outb_mu_param = pyro.param("outb_mu", outb_mu)
    outb_sigma_param = softplus(pyro.param("outb_sigma", outb_sigma))
    outb_prior = Normal(loc=outb_mu_param, scale=outb_sigma_param)
    priors = {
        'fc1.weight': fc1w_prior,
        'fc1.bias': fc1b_prior,
        'out.weight': outw_prior,
        'out.bias': outb_prior
    }

    lifted_module = pyro.random_module("module", net, priors)

    return lifted_module()
예제 #20
0
    def model(self, x_data, y_data):
        # weight and bias priors
        w1_prior = Normal(loc = torch.zeros_like(self.fc1.weight), 
                        scale = torch.ones_like(self.fc1.weight)).independent(2)
        b1_prior = Normal(loc = torch.zeros_like(self.fc1.bias),
                        scale = torch.ones_like(self.fc1.bias)).independent(1)

        wout_prior = Normal(loc=torch.zeros_like(self.out.weight), 
                            scale=torch.ones_like(self.out.weight)).independent(2)
        bout_prior = Normal(loc=torch.zeros_like(self.out.bias), 
                          scale=torch.ones_like(self.out.bias)).independent(1)

        priors = {'fc1.weight': w1_prior, 'fc1.bias': b1_prior, 
                  'out.weight': wout_prior,'out.bias': bout_prior}
        # lift module parameters from neural net
        lifted_module = pyro.random_module("module", self, priors)
        lifted_reg_model = lifted_module()
        with pyro.plate("map", len(x_data)):
            #run forward on regression_model
            prediction = lifted_reg_model(x_data)
            prediction_mean = prediction[:, 0]
            softplus = torch.nn.Softplus()
            prediction_var = softplus(prediction[:, 1])
            prediction_std = torch.pow(prediction_var, 0.5)
            # condition on the observed data
            pyro.sample("obs", Normal(prediction_mean, prediction_std), obs = y_data)
            return prediction_mean
예제 #21
0
def guide(data):

    # define our variational parameters
    w_loc = torch.randn(1, p)
    # note that we initialize our scales to be pretty narrow
    w_log_sig = torch.tensor(-3.0 * torch.ones(1, p) +
                             0.05 * torch.randn(1, p))
    b_loc = torch.randn(1)
    b_log_sig = torch.tensor(-3.0 * torch.ones(1) + 0.05 * torch.randn(1))

    # register learnable params in the param store
    mw_param = pyro.param("guide_mean_weight", w_loc)
    sw_param = softplus(pyro.param("guide_log_scale_weight", w_log_sig))
    mb_param = pyro.param("guide_mean_bias", b_loc)
    sb_param = softplus(pyro.param("guide_log_scale_bias", b_log_sig))

    # guide distributions for w and b
    w_dist = Normal(mw_param, sw_param).independent(1)
    b_dist = Normal(mb_param, sb_param).independent(1)
    dists = {'linear.weight': w_dist, 'linear.bias': b_dist}

    # overload the parameters in the module with random samples
    # from the guide distributions
    lifted_module = pyro.random_module("module_pyro", regression_model, dists)

    return lifted_module()
예제 #22
0
def model(x, y):
    # set prior on weights of `linear_1` and `linear_2`
    w1 = pdist.Normal(loc=torch.zeros_like(net.linear1.weight),
                      scale=torch.ones_like(net.linear1.weight))
    b1 = pdist.Normal(loc=torch.zeros_like(net.linear1.bias),
                      scale=torch.ones_like(net.linear1.bias))
    w2 = pdist.Normal(loc=torch.zeros_like(net.linear2.weight),
                      scale=torch.ones_like(net.linear2.weight))
    b2 = pdist.Normal(loc=torch.zeros_like(net.linear2.bias),
                      scale=torch.ones_like(net.linear2.bias))

    # a dictionary of priors
    priors = {
        'linear1.weight': w1,
        'linear1.bias': b1,
        'linear2.weight': w2,
        'linear2.bias': b2
    }

    # lift neural net module
    lifted_net = pyro.random_module("module", net, priors)

    # sample a net
    nn_model = lifted_net()

    # run the sampled model
    # y_hat = torch.log_softmax(nn_model(x))
    y_hat = nn_model(x)

    with pyro.plate('data'):
        pyro.sample('obs', pdist.Categorical(logits=y_hat), obs=y)
def model(x_data, y_data, regression_model):
    p = x_data.shape[1]
    # weight and bias priors
    # w_prior = Normal(torch.zeros(1, 2), torch.ones(1, 2)).to_event(1)
    # b_prior = Normal(torch.tensor([[8.]]), torch.tensor([[1000.]])).to_event(1)
    w_prior = Normal(torch.zeros(1, p), torch.ones(1, p)).to_event(1)
    b_prior = Normal(torch.tensor([[1.]]), torch.tensor([[10.]])).to_event(1)

    f_prior = Normal(0., 1.)

    priors = {
        'linear.weight': w_prior,
        'linear.bias': b_prior,
        'factor': f_prior
    }

    scale = pyro.sample("sigma", Uniform(0., 10.))

    # lift module parameters to random variables sampled from the priors
    lifted_module = pyro.random_module("module", regression_model, priors)
    # sample a nn (which also samples w and b)
    lifted_reg_model = lifted_module()
    with pyro.plate("map", len(x_data)):
        # run the nn forward on data
        prediction_mean = lifted_reg_model(x_data).squeeze(-1)
        # condition on the observed data
        pyro.sample("obs", Normal(prediction_mean, scale), obs=y_data)
        return prediction_mean
예제 #24
0
def model(data):

    # Create normal priors over the parameters
    loc, scale = torch.zeros(1, p), 10 * torch.ones(1, p)
    bias_loc, bias_scale = 3 * torch.ones(1), 10 * torch.ones(1)
    w_prior = Normal(loc, scale).independent(1)
    b_prior = Normal(bias_loc, bias_scale).independent(1)
    priors = {'linear.weight': w_prior, 'linear.bias': b_prior}

    # lift module parameters to random variables sampled from the priors
    lifted_module = pyro.random_module("module_pyro", regression_model, priors)

    # sample a regressor (which also samples w and b)
    lifted_reg_model = lifted_module()

    with pyro.iarange("map", N, subsample=data):

        x_data = data[:, :-1]
        y_data = data[:, -1]

        # run the regressor forward conditioned on data
        prediction_mean = lifted_reg_model(x_data).squeeze(-1)

        # condition on the observed data
        pyro.sample("obs",
                    Normal(prediction_mean, .5 * torch.ones(data.size(0))),
                    obs=y_data)
예제 #25
0
def get_random_module(name, nn_module, all_priors):
    """Puts priors on LensingSystem's parameters. Extends `pyro.random_module()`.
    Notes
    -----
    Might be possible to do this more cleanly...

    Parameters
    ----------
    nn_module : `nn.Module`
        An `nn.Module`.
    all_priors : `dict`
        Must contain a pyro distribution or `torch.tensor`
        for each of `nn_module`'s named parameters that
        has `requires_grad == True`.

    Returns
    -------
    A lifted version of the lensing instance, with parameters
    sampled from the priors. If a prior is `dist.Delta`, the
    parameter is replaced by the value from that distribution
    to ensure compatibility with MCMC functions.
    """
    non_delta_priors = {}
    for p, val in nn_module.named_parameters():
        if val.requires_grad:
            if isinstance(all_priors[p], dist.Delta):
                val.data = all_priors[p].v
            elif isinstance(all_priors[p], torch.Tensor):
                val.data = all_priors[p]
            else:
                non_delta_priors[p] = all_priors[p]

    return pyro.random_module(name, nn_module, non_delta_priors)
예제 #26
0
    def pyro_model(x, y):

        fn = pyro.random_module("model", model, prior=priors)
        sampled_model = fn()

        output = sampled_model.likelihood(sampled_model(x))
        pyro.sample("obs", output, obs=y)
예제 #27
0
def model(fc_network: BNN, x_data, y_data):
    # create prior for weight and bias per layer, p(w) [q(z) // p(w)]
    priors = {}
    for i, layer in enumerate(fc_network.fc):
        if not hasattr(layer, 'weight'):
            continue
        # print("model: ",i,layer)
        priors["model.{}.weight".format(str(i))] = \
            Normal(Variable(torch.zeros_like(layer.weight)), Variable(torch.ones_like(layer.weight)))
        priors["model.{}.bias".format(str(i))] = \
            Normal(Variable(torch.zeros_like(layer.bias)), Variable(torch.ones_like(layer.bias)))
    # print('model: ',priors)
    # exit(0)
    # print('model_shapes',layer.weight.shape, layer.bias.shape)

    # lift module parameters to random variables sampled from the priors --> Sample a NN from the priors!
    lifted_module = pyro.random_module("module", fc_network, priors)
    # sample a regressor (which also samples w and b)
    lifted_reg_model = lifted_module()

    with pyro.plate("map", len(x_train), subsample=data):
        x_data = data[:, :-1]
        y_data = data[:, -1]
        # run the regressor forward conditioned on inputs
        prediction_mean = lifted_reg_model(x_data).squeeze()
        pyro.sample("obs", Bernoulli(prediction_mean), obs=y_data.squeeze())
예제 #28
0
    def pyro_model(self, input_data, output_data):

        inw_prior = pyro.distributions.Normal(
            loc=torch.zeros_like(self.net.net[0].weight,
                                 device=self.net.device),
            scale=torch.ones_like(self.net.net[0].weight))
        inb_prior = pyro.distributions.Normal(
            loc=torch.zeros_like(self.net.net[0].bias, device=self.net.device),
            scale=torch.ones_like(self.net.net[0].bias))

        outw_prior = pyro.distributions.Normal(
            loc=torch.zeros_like(self.net.net[-1].weight,
                                 device=self.net.device),
            scale=torch.ones_like(self.net.net[-1].weight))
        outb_prior = pyro.distributions.Normal(
            loc=torch.zeros_like(self.net.net[-1].bias,
                                 device=self.net.device),
            scale=torch.ones_like(self.net.net[-1].bias))

        priors = {
            'net[0].weight': inw_prior,
            'net[0].bias': inb_prior,
            'net[-1].weight': outw_prior,
            'net[-1].bias': outb_prior
        }
        # lift module parameters to random variables sampled from the priors
        lifted_module = pyro.random_module("module", self.net, priors)
        # sample a regressor (which also samples w and b)
        lifted_reg_model = lifted_module()

        lhat = lifted_reg_model(input_data)

        pyro.sample("obs",
                    pyro.distributions.Categorical(logits=lhat),
                    obs=output_data)
예제 #29
0
파일: BNN2.py 프로젝트: CheChem/BNN
def model(x_data, y_data):
    fc1w_prior = Normal(loc=torch.zeros_like(net.fc1.weight),
                        scale=torch.ones_like(net.fc1.weight))
    fc1b_prior = Normal(loc=torch.zeros_like(net.fc1.bias),
                        scale=torch.ones_like(net.fc1.bias))

    outw_prior = Normal(loc=torch.zeros_like(net.out.weight),
                        scale=torch.ones_like(net.out.weight))
    outb_prior = Normal(loc=torch.zeros_like(net.out.bias),
                        scale=torch.ones_like(net.out.bias))

    priors = {
        'fc1.weight': fc1w_prior,
        'fc1.bias': fc1b_prior,
        'out.weight': outw_prior,
        'out.bias': outb_prior
    }
    # lift module parameters to random variables sampled from the priors
    lifted_module = pyro.random_module("module", net, priors)
    # sample a regressor (which also samples w and b)
    lifted_reg_model = lifted_module()

    lhat = lifted_reg_model(x_data)

    pyro.sample("obs", Categorical(logits=lhat), obs=y_data)
def model(data):
    x_data = data[0]
    y_data = data[1]
    '''
    mu, sigma = Variable(torch.zeros(10, p)), Variable(10 * torch.ones(10, p))
    bias_mu, bias_sigma = Variable(torch.zeros(10)), Variable(10 * torch.ones(10))

    w_prior, b_prior = Normal(mu, sigma), Normal(bias_mu, bias_sigma)
    priors = {'linear.weight': w_prior, 'linear.bias': b_prior}
    '''

    w_prior1, b_prior1 = Normal(mu1, sigma1), Normal(bias_mu1, bias_sigma1)
    w_prior2, b_prior2 = Normal(mu2, sigma2), Normal(bias_mu2, bias_sigma2)
    w_prior3, b_prior3 = Normal(mu3, sigma3), Normal(bias_mu3, bias_sigma3)

    priors = {
        'linear.weight': w_prior1,
        'linear.bias': b_prior1,
        'linear2.weight': w_prior2,
        'linear2.bias': b_prior2,
        'linear3.weight': w_prior3,
        'linear3.bias': b_prior3
    }
    lifted_module = pyro.random_module("module", bnn, priors)
    lifted_bnn_model = lifted_module()

    # run regressor forward conditioned on data
    prediction = lifted_bnn_model(x_data).squeeze()
    pyro.sample("obs", Categorical(ps=prediction), obs=y_data)
def probabilistic_model(inputs, labels):
    '''
    pyro.random_module() converts weights and biases into random variables 
    that have the prior probability distribution given by 
    dense_weight_prior and dense_bias_prior for a normal distribution
    this "overloads" the parameters of the random module with
    samples from the prior!
    '''
    resnet = ResNet18()
    dense_weight_prior = Normal(loc=torch.zeros_like(resnet.dense.weight),
                                scale=torch.ones_like(resnet.dense.weight))
    dense_bias_prior = Normal(loc=torch.zeros_like(resnet.dense.bias),
                              scale=torch.ones_like(resnet.dense.bias))

    priors = {
        'dense.weight': dense_weight_prior,
        'dense.bias': dense_bias_prior
    }

    lifted_module = pyro.random_module("module", resnet, priors)

    # This samples a neural network (which also samples weights and biases)
    # we wrap the nn model with random_module and sample and instance
    # of the nn
    sampled_nn_model = lifted_module()

    # runs the sampled nn on the input data
    lhat = F.log_softmax(sampled_nn_model(inputs))

    # this shows the output of the network will be categorical
    pyro.sample("obs", Categorical(logits=lhat), obs=labels)
예제 #32
0
def test_random_module(nn_module):
    pyro.clear_param_store()
    nn_module = nn_module()
    p = torch.ones(2, 2)
    prior = dist.Bernoulli(p)
    lifted_mod = pyro.random_module("module", nn_module, prior)
    nn_module = lifted_mod()
    for name, parameter in nn_module.named_parameters():
        assert torch.equal(torch.ones(2, 2), parameter.data)
예제 #33
0
 def test_random_module_prior_dict(self):
     pyro.clear_param_store()
     lifted_nn = pyro.random_module("name", self.model, prior=self.nn_prior)
     lifted_tr = poutine.trace(lifted_nn).get_trace()
     for key_name in lifted_tr.nodes.keys():
         name = pyro.params.user_param_name(key_name)
         if name in {'fc.weight', 'fc.prior'}:
             dist_name = name[3:]
             assert dist_name + "_prior" == lifted_tr.nodes[key_name]['fn'].__name__
             assert lifted_tr.nodes[key_name]["type"] == "sample"
             assert not lifted_tr.nodes[key_name]["is_observed"]
예제 #34
0
def guide(data):
    w_loc = data.new_tensor(torch.randn(1, p))
    w_log_sig = data.new_tensor(-3.0 * torch.ones(1, p) + 0.05 * torch.randn(1, p))
    b_loc = data.new_tensor(torch.randn(1))
    b_log_sig = data.new_tensor(-3.0 * torch.ones(1) + 0.05 * torch.randn(1))
    # register learnable params in the param store
    mw_param = pyro.param("guide_mean_weight", w_loc)
    sw_param = softplus(pyro.param("guide_log_scale_weight", w_log_sig))
    mb_param = pyro.param("guide_mean_bias", b_loc)
    sb_param = softplus(pyro.param("guide_log_scale_bias", b_log_sig))
    # gaussian guide distributions for w and b
    w_dist = Normal(mw_param, sw_param).independent(1)
    b_dist = Normal(mb_param, sb_param).independent(1)
    dists = {'linear.weight': w_dist, 'linear.bias': b_dist}
    # overloading the parameters in the module with random samples from the guide distributions
    lifted_module = pyro.random_module("module", regression_model, dists)
    # sample a regressor
    return lifted_module()
예제 #35
0
def guide(data):
    w_mu = Variable(torch.randn(p, 1).type_as(data.data), requires_grad=True)
    w_log_sig = Variable((-3.0 * torch.ones(p, 1) + 0.05 * torch.randn(p, 1)).type_as(data.data), requires_grad=True)
    b_mu = Variable(torch.randn(1).type_as(data.data), requires_grad=True)
    b_log_sig = Variable((-3.0 * torch.ones(1) + 0.05 * torch.randn(1)).type_as(data.data), requires_grad=True)
    # register learnable params in the param store
    mw_param = pyro.param("guide_mean_weight", w_mu)
    sw_param = softplus(pyro.param("guide_log_sigma_weight", w_log_sig))
    mb_param = pyro.param("guide_mean_bias", b_mu)
    sb_param = softplus(pyro.param("guide_log_sigma_bias", b_log_sig))
    # gaussian guide distributions for w and b
    w_dist = Normal(mw_param, sw_param)
    b_dist = Normal(mb_param, sb_param)
    dists = {'linear.weight': w_dist, 'linear.bias': b_dist}
    # overloading the parameters in the module with random samples from the guide distributions
    lifted_module = pyro.random_module("module", regression_model, dists)
    # sample a regressor
    return lifted_module()
예제 #36
0
def model(data):
    # Create unit normal priors over the parameters
    mu = Variable(torch.zeros(p, 1)).type_as(data)
    sigma = Variable(torch.ones(p, 1)).type_as(data)
    bias_mu = Variable(torch.zeros(1)).type_as(data)
    bias_sigma = Variable(torch.ones(1)).type_as(data)
    w_prior, b_prior = Normal(mu, sigma), Normal(bias_mu, bias_sigma)
    priors = {'linear.weight': w_prior, 'linear.bias': b_prior}
    # lift module parameters to random variables sampled from the priors
    lifted_module = pyro.random_module("module", regression_model, priors)
    # sample a regressor (which also samples w and b)
    lifted_reg_model = lifted_module()

    with pyro.iarange("map", N, subsample=data):
        x_data = data[:, :-1]
        y_data = data[:, -1]
        # run the regressor forward conditioned on inputs
        prediction_mean = lifted_reg_model(x_data).squeeze()
        pyro.observe("obs", Normal(prediction_mean, Variable(torch.ones(data.size(0))).type_as(data)), y_data.squeeze())
예제 #37
0
def model(data):
    # Create unit normal priors over the parameters
    loc = data.new_zeros(torch.Size((1, p)))
    scale = 2 * data.new_ones(torch.Size((1, p)))
    bias_loc = data.new_zeros(torch.Size((1,)))
    bias_scale = 2 * data.new_ones(torch.Size((1,)))
    w_prior = Normal(loc, scale).independent(1)
    b_prior = Normal(bias_loc, bias_scale).independent(1)
    priors = {'linear.weight': w_prior, 'linear.bias': b_prior}
    # lift module parameters to random variables sampled from the priors
    lifted_module = pyro.random_module("module", regression_model, priors)
    # sample a regressor (which also samples w and b)
    lifted_reg_model = lifted_module()

    with pyro.iarange("map", N, subsample=data):
        x_data = data[:, :-1]
        y_data = data[:, -1]
        # run the regressor forward conditioned on inputs
        prediction_mean = lifted_reg_model(x_data).squeeze(-1)
        pyro.sample("obs", Normal(prediction_mean, 1),
                    obs=y_data)