Esempio n. 1
0
def trainer(model=model,
            batch_size=batch_size,
            train_size=train_size,
            n_epochs=n_epochs,
            lr=lr,
            weight_decay=weight_decay,
            adjust_learning_rate=adjust_learning_rate,
            amsgrad=amsgrad,
            betas0=betas0,
            betas1=betas1,
            use_cuda=use_cuda):
    print("Testing out: ")
    print("batch_size: ", batch_size)
    print("train_size: ", train_size)
    print("n_epochs: ", n_epochs)
    print("lr: ", lr)
    print("weight_decay: ", weight_decay)
    print("betas0: ", betas0)
    print("betas1: ", betas1)
    print("hidden_size: ", model.hidden_size)

    best_validate_accuracy = 0

    # build model

    # specify optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=lr,
                                 weight_decay=weight_decay,
                                 amsgrad=amsgrad,
                                 betas=(betas0, betas1))

    # prepare data loaders
    train_isingdataset = supervised_convnet.IsingDataset(
        X_train[:train_size], y_train[:train_size])
    train_loader = torch.utils.data.DataLoader(train_isingdataset,
                                               batch_size=batch_size,
                                               num_workers=num_workers,
                                               shuffle=True)

    validate_isingdataset = supervised_convnet.IsingDataset(
        X_train[-validate_size:], y_train[-validate_size:])
    validate_loader = torch.utils.data.DataLoader(validate_isingdataset,
                                                  batch_size=batch_size,
                                                  num_workers=num_workers,
                                                  shuffle=True)
    # supervised_convnet.print_model_parameters(model)

    global_step = 0
    first_epoch_validate_accuracy = 0
    for epoch in range(1, n_epochs + 1):
        print("epoch", epoch)
        # monitor training loss
        accuracy = 0.0
        train_loss = 0.0
        # adjust learning rate
        if adjust_learning_rate == True:
            supervised_convnet.adjust_learning_rate(optimizer, epoch, lr)

        ###################
        # train the model #
        ###################
        for batch_idx, (data, target) in enumerate(train_loader):
            data = Variable(data.unsqueeze(1).type('torch.FloatTensor'))
            target = Variable(target.type('torch.FloatTensor'))

            if use_cuda and torch.cuda.is_available():
                data = data.cuda()
                target = target.cuda()

            optimizer.zero_grad()
            output = model(data).squeeze(1)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()
            global_step += 1
            # update running training loss
            accuracy += (torch.abs(target - output) <
                         0.5).sum().item() / batch_size
            train_loss += loss.item()  #* batch_size

        # print avg training statistics
        # train_loss = train_loss/len(train_loader)

    # Validation phase
    validate_accuracy = 0
    if epoch % 1 == 0:
        for batch_idx, (data, target) in enumerate(validate_loader):
            data = Variable(data.unsqueeze(1).type('torch.FloatTensor'))
            target = Variable(target.type('torch.FloatTensor'))

            if use_cuda and torch.cuda.is_available():
                data = data.cuda()
                target = target.cuda()

            output = model(data).squeeze(1)
            validate_accuracy += (torch.abs(target - output) <
                                  0.5).sum().item()
        print('Epoch: {} \t Train Loss: {} \t Validate_Accuracy: {}'.format(
            epoch,
            train_loss / len(train_loader),
            validate_accuracy / validate_size,
        ))

        # if validate_accuracy/validate_size > best_validate_accuracy:
        #     best_validate_accuracy = validate_accuracy/validate_size

        # supervised_convnet.print_model_gradient(model)

        # writer.add_scalar("validation_accuracy", validate_accuracy/len(train_loader))
        # print("trainLoss", train_loss/len(train_loader))
        # print("accuracy", accuracy/len(train_loader))
        # model_params = supervised_convnet.get_param_histogram(model)
        # model_grad = supervised_convnet.get_param_grad_histogram(model)
        # writer.add_scalar("training_accuracy", accuracy/len(train_loader), global_step)
        # # writer.add_scalar("validate_accuracy", validate_accuracy/len(validate_loader), global_step)
        # writer.add_scalar("parameter_mean", np.mean(model_params), global_step)
        # writer.add_scalar("parameter_grad_mean", np.mean(model_grad), global_step)
        # writer.add_scalar("parameter_std", np.std(model_params), global_step)
        # writer.add_scalar("parameter_grad_std", np.std(model_grad), global_step)
        # writer.add_histogram("parameter_histogram", model_params, global_step)
        # writer.add_histogram("parameter_grad_histogram", model_grad, global_step)

    print("model parameters! \n")
    supervised_convnet.print_model_parameters(model)

    # return last accuracy
    return validate_accuracy / validate_size, model.state_dict()
Esempio n. 2
0
    X_train[-2000:], y_train[-2000:])
validate_loader = torch.utils.data.DataLoader(validate_isingdataset,
                                              batch_size=batch_size,
                                              num_workers=num_workers,
                                              shuffle=True)
supervised_convnet.print_model_parameters(model)

global_step = 0
for epoch in range(1, n_epochs + 1):
    print("epoch", epoch)
    # monitor training loss
    accuracy = 0.0
    train_loss = 0.0
    # adjust learning rate
    if adjust_learning_rate == True:
        supervised_convnet.adjust_learning_rate(optimizer, epoch, lr)

    ###################
    # train the model #
    ###################
    for batch_idx, (data, target) in enumerate(train_loader):
        data = data.unsqueeze(1).type('torch.FloatTensor')
        target = target.type('torch.FloatTensor')
        optimizer.zero_grad()
        output = model(data)[-1].view(-1)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()
        global_step += 1
        # update running training loss
        accuracy += (torch.abs(target - output) <