Beispiel #1
0
def trainer(model=model,
            batch_size=batch_size,
            train_size=train_size,
            n_epochs=n_epochs,
            lr=lr,
            weight_decay=weight_decay,
            adjust_learning_rate=adjust_learning_rate,
            amsgrad=amsgrad,
            betas0=betas0,
            betas1=betas1,
            use_cuda=use_cuda):
    print("Testing out: ")
    print("batch_size: ", batch_size)
    print("train_size: ", train_size)
    print("n_epochs: ", n_epochs)
    print("lr: ", lr)
    print("weight_decay: ", weight_decay)
    print("betas0: ", betas0)
    print("betas1: ", betas1)
    print("hidden_size: ", model.hidden_size)

    best_validate_accuracy = 0

    # build model

    # specify optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=lr,
                                 weight_decay=weight_decay,
                                 amsgrad=amsgrad,
                                 betas=(betas0, betas1))

    # prepare data loaders
    train_isingdataset = supervised_convnet.IsingDataset(
        X_train[:train_size], y_train[:train_size])
    train_loader = torch.utils.data.DataLoader(train_isingdataset,
                                               batch_size=batch_size,
                                               num_workers=num_workers,
                                               shuffle=True)

    validate_isingdataset = supervised_convnet.IsingDataset(
        X_train[-validate_size:], y_train[-validate_size:])
    validate_loader = torch.utils.data.DataLoader(validate_isingdataset,
                                                  batch_size=batch_size,
                                                  num_workers=num_workers,
                                                  shuffle=True)
    # supervised_convnet.print_model_parameters(model)

    global_step = 0
    first_epoch_validate_accuracy = 0
    for epoch in range(1, n_epochs + 1):
        print("epoch", epoch)
        # monitor training loss
        accuracy = 0.0
        train_loss = 0.0
        # adjust learning rate
        if adjust_learning_rate == True:
            supervised_convnet.adjust_learning_rate(optimizer, epoch, lr)

        ###################
        # train the model #
        ###################
        for batch_idx, (data, target) in enumerate(train_loader):
            data = Variable(data.unsqueeze(1).type('torch.FloatTensor'))
            target = Variable(target.type('torch.FloatTensor'))

            if use_cuda and torch.cuda.is_available():
                data = data.cuda()
                target = target.cuda()

            optimizer.zero_grad()
            output = model(data).squeeze(1)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()
            global_step += 1
            # update running training loss
            accuracy += (torch.abs(target - output) <
                         0.5).sum().item() / batch_size
            train_loss += loss.item()  #* batch_size

        # print avg training statistics
        # train_loss = train_loss/len(train_loader)

    # Validation phase
    validate_accuracy = 0
    if epoch % 1 == 0:
        for batch_idx, (data, target) in enumerate(validate_loader):
            data = Variable(data.unsqueeze(1).type('torch.FloatTensor'))
            target = Variable(target.type('torch.FloatTensor'))

            if use_cuda and torch.cuda.is_available():
                data = data.cuda()
                target = target.cuda()

            output = model(data).squeeze(1)
            validate_accuracy += (torch.abs(target - output) <
                                  0.5).sum().item()
        print('Epoch: {} \t Train Loss: {} \t Validate_Accuracy: {}'.format(
            epoch,
            train_loss / len(train_loader),
            validate_accuracy / validate_size,
        ))

        # if validate_accuracy/validate_size > best_validate_accuracy:
        #     best_validate_accuracy = validate_accuracy/validate_size

        # supervised_convnet.print_model_gradient(model)

        # writer.add_scalar("validation_accuracy", validate_accuracy/len(train_loader))
        # print("trainLoss", train_loss/len(train_loader))
        # print("accuracy", accuracy/len(train_loader))
        # model_params = supervised_convnet.get_param_histogram(model)
        # model_grad = supervised_convnet.get_param_grad_histogram(model)
        # writer.add_scalar("training_accuracy", accuracy/len(train_loader), global_step)
        # # writer.add_scalar("validate_accuracy", validate_accuracy/len(validate_loader), global_step)
        # writer.add_scalar("parameter_mean", np.mean(model_params), global_step)
        # writer.add_scalar("parameter_grad_mean", np.mean(model_grad), global_step)
        # writer.add_scalar("parameter_std", np.std(model_params), global_step)
        # writer.add_scalar("parameter_grad_std", np.std(model_grad), global_step)
        # writer.add_histogram("parameter_histogram", model_params, global_step)
        # writer.add_histogram("parameter_grad_histogram", model_grad, global_step)

    print("model parameters! \n")
    supervised_convnet.print_model_parameters(model)

    # return last accuracy
    return validate_accuracy / validate_size, model.state_dict()
Beispiel #2
0
        

    def forward(self, x):
        # add hidden layers with relu activation function
        layer1 = torch.tanh(self.conv2d(x))
        return layer1

correlated_data = np.load("../ising81x81->27x27_using_w1_temp1_correlated.npy")[:10000,:,:]
# data = np.vstack((uncorrelated_data, correlated_data))
data = (correlated_data)
# label = np.hstack((-np.ones(10000), np.ones(10000)))
label = np.hstack((np.ones(10000)))
print(data.shape)
# X_train, X_test, y_train, y_test = train_test_split(data, label, test_size=0, random_state=42)
# isingdataset = supervised_convnet.IsingDataset(X_train[:200], y_train[:200])
isingdataset = supervised_convnet.IsingDataset(data, label)
print(isingdataset.y)

# Create training and test dataloaders
num_workers = 0
# how many samples per batch to load
batch_size = 10000
# number of epochs to train the model
n_epochs = 1
# learning rate
lr = 0.0001
# adjust learning rate?
adjust_learning_rate = False

# specify loss function
criterion = nn.MSELoss()
Beispiel #3
0
    comment="--batch size {}, training set {}, epoch {}, lr {}, \
                        weight decay {}, hidden_size {}".format(
        batch_size, train_size, n_epochs, lr, weight_decay, hidden_size))

# build model
model = supervised_convnet.SupervisedConvNet(filter_size=3,
                                             square_size=3,
                                             hidden_size=hidden_size)

# specify optimizer
optimizer = torch.optim.Adam(model.parameters(),
                             lr=lr,
                             weight_decay=weight_decay)

# prepare data loaders
train_isingdataset = supervised_convnet.IsingDataset(X_train[:train_size],
                                                     y_train[:train_size])
train_loader = torch.utils.data.DataLoader(train_isingdataset,
                                           batch_size=batch_size,
                                           num_workers=num_workers,
                                           shuffle=True)

validate_isingdataset = supervised_convnet.IsingDataset(
    X_train[-2000:], y_train[-2000:])
validate_loader = torch.utils.data.DataLoader(validate_isingdataset,
                                              batch_size=batch_size,
                                              num_workers=num_workers,
                                              shuffle=True)
supervised_convnet.print_model_parameters(model)

global_step = 0
for epoch in range(1, n_epochs + 1):
Beispiel #4
0
        layer2 = torch.tanh(self.linear1(reshape))
        return layer1, reshape, layer2

# load already generated uncorrelated (in generate_uncorrelated_data.py)
uncorrelated_data = np.load("../ising27x27->9x9_using_w2_temp1_uncorrelated.npy")
# correlated data is 9x9
correlated_data = np.load("../ising27x27->9x9_using_w2_temp1_correlated.npy")[:10000,:9,:9] 
data = np.vstack((uncorrelated_data, correlated_data))
# data = (correlated_data)
label = np.hstack((-np.ones(10000), np.ones(10000)))
# label = np.hstack((np.ones(10000)))
# np.set_printoptions(precision=1)
# print(uncorrelated_data[:10])
# print(label)
X_train, X_test, y_train, y_test = train_test_split(data, label, test_size=0.33, random_state=42)
isingdataset = supervised_convnet.IsingDataset(X_train[:200], y_train[:200])
# isingdataset = supervised_convnet.IsingDataset(data, label)
# print(isingdataset.X[:10])
# print(isingdataset.y[:10])

# Create training and test dataloaders
num_workers = 0
# how many samples per batch to load
batch_size = 200
# number of epochs to train the model
n_epochs = 1000
# learning rate
lr = 0.01
# adjust learning rate?
adjust_learning_rate = False