예제 #1
0
def trainAndEvaluate(model, path_to_file):
    """ Trains and evaluates the model
    :param model: model to train on
    :type model: LogisticRegressionModel or MultinomialRegressionModel
    :param path_to_file: Path to data
    :type path_to_file: String
    :return: None"""

    train_loader, val_loader, test_loader = get_data_loaders(path_to_file)

    optimizer = optim.Adam(model.parameters(), lr=0.01)

    model.train()
    print('Training model...')
    for t in range(epochs):

        for i, (input_t, y) in enumerate(train_loader):
            preds = model(input_t)
            loss = model.loss_fn(preds, y)
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()

    print('Training Completed')

    model.eval()
    print('Evaluating model...')
    for batch_index, (input_t, y) in enumerate(test_loader):
        preds = model(input_t)
        loss = model.loss_fn(preds, y)
        print(f"Loss: {loss.detach()}")
예제 #2
0
def main():

    args = load_args()
    init_random_seeds(args.seed)

    # EXPORT ARGS AS JSON
    json_path = export_args(args)  # write to file
    json_args = load_json_args(json_path)  # read from file
    fprint("RUNNING ARGS:\n{}\n".format(json.dumps(json_args, indent=4)), args)

    fprint("Python Version: {}".format(platform.python_version()), args)
    fprint("PyTorch Version: {}".format(torch.__version__), args)
    fprint(
        "Torchvision Version: {}".format(
            torchvision.__version__.split('a')[0]), args)

    # Get data loaders
    data_loaders = get_data_loaders(args)

    # Initialize model
    model, params_to_update = initialize_model(is_pretrained=args.pretrained)

    fprint("\nARCHITECTURE:\n{}\n".format(model), args)

    for name, param in model.named_parameters():
        fprint("{:25} requires_grad = {}".format(name, param.requires_grad),
               args)

    # Send the model to CPU or GPU
    model = model.to(torch.device(args.device))

    # Setup the optimizer
    if args.optimizer == 'sgdm':
        optimizer = optim.SGD(params_to_update,
                              lr=args.lr,
                              weight_decay=args.weight_decay,
                              momentum=0.9)
    elif args.optimizer == 'adam':
        optimizer = optim.AdamW(params_to_update,
                                lr=args.lr,
                                weight_decay=args.weight_decay)

    # Setup the loss function
    criterion = torch.nn.CrossEntropyLoss()

    # Train and evaluate
    model, optimizer = train_model(model, data_loaders, criterion, optimizer,
                                   args)

    # Test
    test_model(model, data_loaders, args)

    # Generate plots:
    generate_plots(json_path)
예제 #3
0
def trainAndEvaluate(model, path_to_pkl, path_to_label, epochs):
    """ Trains and evaluates the model
    :param model: model to train on
    :type model: CNN Model or NN Model
    :param path_to_file: Path to data
    :type path_to_file: String
    :return: None"""

    train_loader, val_loader, test_loader = get_data_loaders(
        path_to_pkl, path_to_label)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    model.train()
    print('Training model...')
    for t in range(epochs):
        for i, (input_t, y) in enumerate(train_loader):
            preds = model(input_t)
            loss = criterion(preds, y.long())
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()

    print('Training Completed')

    model.eval()
    print('Evaluating model...')
    correct = 0
    for batch_index, (input_t, y) in enumerate(test_loader):
        preds = model(input_t)
        for i in range(len(preds)):
            y_hat = np.argmax(preds[i].detach().numpy())
            if y_hat == y[i]:
                correct += 1

    print("Accuracy={}".format(correct / 12000))
예제 #4
0
max_batch_size = 256

n_epochs = 10

loud = True

MPI.COMM_WORLD.Barrier()

parameters = [p for p in lenet5_distributed.parameters()]
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(parameters, lr=1e-3)

if P_base.rank == 0:
    training_loader, test_loader = get_data_loaders(max_batch_size,
                                                    download=False,
                                                    dummy=False)
else:
    training_loader, test_loader = get_data_loaders(max_batch_size,
                                                    download=False,
                                                    dummy=True)

# Adapted from https://github.com/activatedgeek/LeNet-5/blob/master/run.py
loss_list, batch_list = [], []

tt = MPI.Wtime()
for epoch in range(n_epochs):
    tte = MPI.Wtime()

    for i, (images, labels) in enumerate(training_loader):
    ## Don't worry about loss.backward() for now. Think of it as calculating gradients.

    ## And voila, your model is trained. Now, use something similar to run your model on
    ## the validation and test data loaders:
    # Eg:
    # model.eval()
    # for batch_index, (input_t, y) in enumerate(val/test_loader):
    #
    #   preds = Feed the input to the model
    #
    #   loss = loss_fn(preds, y)
    #
    ## You don't need to do loss.backward() or optimizer.step() here since you are no
    ## longer training.

    train_loader, val_loader, test_loader = get_data_loaders(
        "data/DS2.csv", transform_fn=data_transform)

    model = LinearRegressionModel(2, mae_loss)

    optimizer = optim.Adam(model.parameters(), lr=0.01)

    model.train()
    for t in range(100):

        for i, (input_t, y) in enumerate(train_loader):
            preds = model(input_t)
            loss = model.loss_f(
                preds, y)  # You might have to change the shape of things here.
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
예제 #6
0
if __name__ == '__main__':

    # Configure these
    path_to_csv = 'window.csv'
    transform_fn = data_transform
    train_test_split = [0.7, 0.3]
    batch_size = 32
    lr = 0.001
    loss_fn = nn.MSELoss()
    num_param = 3  # can be obtained from loaders
    TOTAL_TIME_STEPS = 100

    train_loader, test_loader =\
        dl.get_data_loaders(
            path_to_csv,
            transform_fn=transform_fn,
            train_test_split=train_test_split,
            batch_size=batch_size)

    model = Model(num_param)
    optimizer = optim.Adam(model.parameters(), lr=lr)

    model.train()
    for t in range(TOTAL_TIME_STEPS):
        for batch_index, (input_t, y) in enumerate(train_loader):

            optimizer.zero_grad()
            # print(model.thetas)
            preds = model(input_t)
            # print('HELLLLOOOOOO')
            # print(preds)
예제 #7
0
    :return: torch.Tensor
    :rtype: torch.Tensor
    """
    ## TODO 4: Implement L1 loss. Use PyTorch operations.
    # Use PyTorch operations to return a PyTorch tensor.
    loss = nn.L1loss()
    return loss(output, target)


if __name__ == "__main__":
    ## Here you will want to create the relevant dataloaders for the csv files for which
    ## you think you should use Linear Regression. The syntax for doing this is something like:
    # Eg:
    train_loader, val_loader, test_loader =\
      get_data_loaders('data/DS1.csv',
                       transform_fn=None, #data_transform  # Can also pass in None here
                       train_val_test=[0.8, 0.2, 0.2],
                       batch_size=32)

    ## Now you will want to initialise your Linear Regression model, using something like
    # Eg:
    model = LinearRegressionModel(2)

    ## Then, you will want to define your optimizer (the thing that updates your model weights)
    # Eg:
    optimizer = optim.SGD(model.parameters(), lr=0.01)

    ## Now, you can start your training loop:
    # Eg:
    model.train()
    for t in range(100):
        for batch_index, (input_t, y) in enumerate(train_loader):
예제 #8
0
                    help='path to saved model if loading.')

opt = parser.parse_args()

#instantiate model:
net = MLP(input_size=784, width=opt.netWidth)
if opt.cuda:
    net = net.cuda()
if opt.model_path != '':
    net.load_state_dict(torch.load(opt.model_path), strict=False)

#instantiate optimizer:
optimizer = get_optimizer(net=net, lr=opt.lr, opt_str=opt.optim)

#getting data loaders:
train_loader, test_loader = get_data_loaders(BS=opt.batchSize)

#train model:
if opt.model_path == '':
    net, stats = train(net, opt.epochs, opt.cuda, optimizer, train_loader,
                       test_loader)
# net, stats = train(torch.nn.Sequential(AddNoise(mean=0,std=np.sqrt(0.25)),net), opt.epochs, opt.cuda, optimizer, train_loader, test_loader)

#gaussian noise moments:

max_var = opt.max_var
N_var = 10.0
var = np.arange(0, max_var + max_var / N_var, max_var / N_var)
mean = 100

t1 = time.time()