Ejemplo n.º 1
0
def experiment(args):
    args = CliArgs(args)
    params = args.get_params()
    pipe = DataPipe(params, mode="train", preprocess=False)
    train_loader, val_loader = pipe.build()

    net = Net(params, pipe.width)
    loss_wt = None
    if hasattr(params, "loss_weights"):
        loss_wt = params.loss_weights
    loss_fn = getattr(nn, params.loss)(torch.FloatTensor(loss_wt))
    # optimizer = getattr(optim, params.optimizer)(net.parameters(), lr=params.learning_rate, momentum=params.momentum)
    optimizer = torch.optim.Adam(net.parameters(),
                                 lr=params.learning_rate,
                                 weight_decay=params.weight_decay)
    print_every = int(pipe.length / (2 * params.batch_size))

    train = TrainLoop(net,
                      train_loader,
                      optimizer,
                      loss_fn,
                      val_loader=val_loader,
                      print_every=print_every)
    train.fit(params.num_epochs)

    if hasattr(params, "out_path"):
        torch.save(net, params.out_path)
Ejemplo n.º 2
0
# train_dataset = torch.load('./data/preprocessed/train_dataset.pt', map_location=device)
# val_dataset = torch.load('./data/preprocessed/val_dataset.pt', map_location=device)
# test_dataset = torch.load('./data/preprocessed/test_dataset.pt', map_location=device)

sample = train_dataset[1]
train_loader = DataLoader(train_dataset,
                          batch_size=batch_size,
                          shuffle=True,
                          num_workers=0)
val_loader = DataLoader(val_dataset,
                        batch_size=batch_size,
                        shuffle=False,
                        num_workers=0)

net = Net(sample['x'].shape[0], 6, n_layers=n_layers, n_units=n_units)
net.to(device)
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=lr, momentum=momentum)

print_every = int(len(train_dataset) / (10 * batch_size))
train = TrainLoop(net,
                  train_loader,
                  optimizer,
                  loss_fn,
                  device,
                  writer,
                  val_loader=val_loader,
                  print_every=print_every)
train.fit(max_epochs)