示例#1
0
文件: test_multi.py 项目: zyxue/pyro
def test_optimizers(factory):
    optim = factory()

    def model(loc, cov):
        x = pyro.param("x", torch.randn(2))
        y = pyro.param("y", torch.randn(3, 2))
        z = pyro.param("z",
                       torch.randn(4, 2).abs(),
                       constraint=constraints.greater_than(-1))
        pyro.sample("obs_x", dist.MultivariateNormal(loc, cov), obs=x)
        with pyro.plate("y_plate", 3):
            pyro.sample("obs_y", dist.MultivariateNormal(loc, cov), obs=y)
        with pyro.plate("z_plate", 4):
            pyro.sample("obs_z", dist.MultivariateNormal(loc, cov), obs=z)

    loc = torch.tensor([-0.5, 0.5])
    cov = torch.tensor([[1.0, 0.09], [0.09, 0.1]])
    for step in range(200):
        tr = poutine.trace(model).get_trace(loc, cov)
        loss = -tr.log_prob_sum()
        params = {
            name: site['value'].unconstrained()
            for name, site in tr.nodes.items() if site['type'] == 'param'
        }
        optim.step(loss, params)

    for name in ["x", "y", "z"]:
        actual = pyro.param(name)
        expected = loc.expand(actual.shape)
        assert_equal(actual,
                     expected,
                     prec=1e-2,
                     msg='{} in correct: {} vs {}'.format(
                         name, actual, expected))
示例#2
0
def test_optimizers(factory):
    optim = factory()

    def model(loc, cov):
        x = pyro.param("x", torch.randn(2))
        y = pyro.param("y", torch.randn(3, 2))
        z = pyro.param("z", torch.randn(4, 2).abs(), constraint=constraints.greater_than(-1))
        pyro.sample("obs_x", dist.MultivariateNormal(loc, cov), obs=x)
        with pyro.iarange("y_iarange", 3):
            pyro.sample("obs_y", dist.MultivariateNormal(loc, cov), obs=y)
        with pyro.iarange("z_iarange", 4):
            pyro.sample("obs_z", dist.MultivariateNormal(loc, cov), obs=z)

    loc = torch.tensor([-0.5, 0.5])
    cov = torch.tensor([[1.0, 0.09], [0.09, 0.1]])
    for step in range(100):
        tr = poutine.trace(model).get_trace(loc, cov)
        loss = -tr.log_prob_sum()
        params = {name: pyro.param(name).unconstrained() for name in ["x", "y", "z"]}
        optim.step(loss, params)

    for name in ["x", "y", "z"]:
        actual = pyro.param(name)
        expected = loc.expand(actual.shape)
        assert_equal(actual, expected, prec=1e-2,
                     msg='{} in correct: {} vs {}'.format(name, actual, expected))
def main(x_data, y_data, num_iterations, model, is_logit=True):

    if is_logit:
        optim = torch.optim.SGD(model.parameters(),
                                lr=0.01)  # 70%=0.01) # 50%=0.001)
        criterion = torch.nn.CrossEntropyLoss()  # computes softmax internallyΩ
    else:
        optim = torch.optim.Adam(model.parameters(), lr=0.01)
        # criterion = torch.nn.NLLLoss()  # size_average=True
        # criterion = torch.nn.MSELoss(reduction='sum')

    for j in range(num_iterations):
        # run the model forward on the data
        if is_logit:
            y_pred = model(x_data).squeeze(-1)
            # calculate the log loss, when forward computes regression
            loss = criterion(y_pred, y_data)  # (outputs, labels)
        else:
            y_pred = model(x_data)
            # loss = criterion(input=y_pred, target=y_data)
            loss = nn.functional.binary_cross_entropy(input=y_pred.squeeze(-1),
                                                      target=y_data)

        # initialize gradients to zero
        optim.zero_grad()
        # backpropagate
        loss.backward()
        # take a gradient step
        optim.step()
        if (j + 1) % 50 == 0:
            print("\n [iteration %04d] loss: %.4f  " % (j + 1, loss.item()))
            if is_logit:
                outputs = model(x_data)
                _, predicted = torch.max(outputs.data, 1)
                correct = (predicted == y_data).sum()
                print('Accuracy of logistic model: {} %'.format(
                    100 * correct / y_data.size(0)))
            else:
                y_pred = model(x_data).squeeze(-1)
                error_mean = ((y_pred > 0.5).float() -
                              y_data_float).abs().mean()
                print('Accuracy of regression model: {} %'.format(100. - 100. *
                                                                  error_mean))

    # Inspect learned parameters
    print("Learned parameters:")
    for name, param in model.named_parameters():
        print(name, param.data.numpy())
示例#4
0
def main():
    x_data = data[:, :-1]
    y_data = data[:, -1]
    for j in range(num_iterations):
        # run the model forward on the data
        y_pred = net(x_data).squeeze(-1)
        # calculate the mse loss
        loss = loss_fn(y_pred, y_data)
        # initialize gradients to zero
        optim.zero_grad()
        # backpropagate
        loss.backward()
        # take a gradient step
        optim.step()
        if (j + 1) % 50 == 0:
            print("[iteration %04d] loss: %.4f" % (j + 1, loss.item()))
    # Inspect learned parameters
    print("Learned parameters:")
    for name, param in net.named_parameters():
        print(name, param.data.numpy())
示例#5
0
def main():

    # x_data = train
    # y_data = test
    X, y = train[:, 0:n_lag], train[:, n_lag:]
    X = X.reshape(X.shape[0], 1, X.shape[1])
    x_data, y_data = np.array(X), np.array(y)
    x_data = torch.tensor(x_data, dtype=torch.float)
    y_data = torch.tensor(y_data, dtype=torch.float)
    print(x_data.shape)
    print(y_data.shape)

    for j in range(num_iterations):
        y_pred = regression_model(x_data).squeeze(-1)
        loss = loss_fn(y_pred, y_data)
        optim.zero_grad()
        loss.backward()
        optim.step()
        if (j + 1) % 100 == 0:
            print("[iteration %04d] loss: %.4f" % (j + 1, loss.item()))
    print("Learned parameters:")
    for name, param in regression_model.named_parameters():
        print(name, param.data.numpy())
示例#6
0
 def __call__(cls, i, loss, locs):
     optim = locs['svi'].optim
     if isinstance(optim, pyro.optim.PyroLRScheduler):
         optim.step(*cls._get_args(i, loss, locs))