示例#1
0
from training import trainer

# Set up trainer which initializes train/test sets
trainer = trainer.Trainer()
model = ElasticNet(random_state=10)

# Set up param grid
param_grid = [{
    'alpha': [0, 0.001, 0.01, 0.1, 1, 10],
    'fit_intercept': [False, True],
    'l1_ratio': [0.1, 0.5, 0.9],
    'max_iter': [100, 300, 750, 1000, 2000],
    'positive': [True, False],
    'selection': ['cyclic', 'random']
}]

# Train
trained_grid = trainer.train(model, param_grid)
predictor = trained_grid.best_estimator_
best_params = trained_grid.best_params_
best_score = trained_grid.best_score_
print("The best parameters found are", best_params)
print("The best RMSE score was", best_score)

# Test
trainer.test(predictor)

# Save results
params_file = 'elasticnet.txt'
pickle_file = 'elasticnet.pkl'
trainer.save(predictor, best_params, best_score, params_file, pickle_file)
示例#2
0
    train_metrics = {
        'Loss':
        RunningAverage(output_transform=lambda x: x['cross_entropy']),
        'Accuracy':
        RunningAverage(
            Accuracy(output_transform=lambda x: (x['preds'], x['targets'])))
    }

    eval_metrics = {
        'Loss': Average(output_transform=lambda x: x['cross_entropy']),
        'Accuracy':
        Accuracy(output_transform=lambda x: (x['preds'], x['targets']))
    }

    train(args.run_name, model, train_set, test_set, train_step, eval_step,
          train_metrics, eval_metrics, args.n_iterations, args.batch_size)

    predictions = []
    truths = []
    test_loader = DataLoader(test_set,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=4)
    model.eval()
    for images, targets in test_loader:
        images, targets = images.to(device), targets.to(device)
        with torch.no_grad():
            predictions.append(model(images).argmax(dim=1))
        truths.append(targets)
    predictions = torch.cat(predictions, dim=0)
    truths = torch.cat(truths, dim=0)
示例#3
0
        n_obs = random.randint(args.min_obs, args.max_obs)
        x, v, x_q, v_q = x[:, :n_obs], v[:, :n_obs], x[:, -1], v[:, -1]

        x_mu, _, kl = model(x, v, x_q, v_q)
        return x_mu, x_q, kl

    def sample_func(model, batch):
        x, v = batch

        n_obs = random.randint(args.min_obs, args.max_obs)
        x, v, x_q, v_q = x[:, :n_obs], v[:, :n_obs], x[:, -1], v[:, -1]

        x_mu, r = model.sample(x, v, v_q)
        return x_mu, x_q, r

    train_set = MultiViewDataset(n_views=args.max_obs + 1,
                                 n_samples=args.samples_per_epoch)
    val_set = MultiViewDataset(n_views=args.max_obs + 1,
                               n_samples=args.batch_size)
    model = GQN(c_dim=3,
                v_dim=train_set.v_dim,
                r_dim=args.r_dim,
                h_dim=args.h_dim,
                z_dim=args.z_dim,
                l=args.l)

    train(args.run_name, forward_func, sample_func, model, train_set, val_set,
          args.n_epochs, args.batch_size, args.lr_i, args.lr_f, args.lr_n,
          args.sig_i, args.sig_f, args.sig_n)