Esempio n. 1
0
def train_model(model,
                lr,
                batch_size,
                momentum,
                epochs,
                train_input,
                train_target,
                test_input,
                test_target,
                loss_log=False):
    losses = []
    validation_input = test_input
    valdidation_target = test_target
    validation_loss = []
    train_loss = []
    validation_acc = []
    train_acc = []
    optimizer = SGD(model.layers, learning_rate=lr, momentum=momentum)
    criterion = MSE()
    batch_size = batch_size

    for epoch in range(epochs):
        acc_loss = 0

        for b in range(0, train_input.size(0), batch_size):
            if b + batch_size > train_input.size(0):
                mini_batch_size = train_input.size(0) - b
            else:
                mini_batch_size = batch_size
            #model forward pass
            pred = model.forward(train_input.narrow(0, b, mini_batch_size))
            ## computing loss
            loss = criterion.compute_loss(
                pred, train_target.narrow(0, b, mini_batch_size))
            acc_loss += loss.item()
            #compute gradient of loss
            grad_wrt_outputs = criterion.backward()
            #backpropagate with gradient of loss
            model.backward(grad_wrt_outputs)
            #update parameters of model
            optimizer.step()
            #reset gradient matrices to 0
            model.zero_grad()

        batches = int(train_input.size(0) / mini_batch_size
                      ) if train_input.size(0) % mini_batch_size == 0 else int(
                          train_input.size(0) / mini_batch_size) + 1
        #keeping track of loss accuracy after one epoch
        if loss_log:
            print(epoch, acc_loss / batches)
            pred_train = model.forward(train_input)
            pred_test = model.forward(test_input)
            train_loss.append(MSE().compute_loss(pred_train,
                                                 train_target).item())
            validation_loss.append(MSE().compute_loss(pred_test,
                                                      test_target).item())
            train_acc.append(computeAccuracy(model, train_input, train_target))
            validation_acc.append(
                computeAccuracy(model, test_input, test_target))
            model.zero_grad()
    return train_loss, validation_loss, train_acc, validation_acc
Esempio n. 2
0
if __name__ == '__main__':
    fcnn = FCNN()
    times = torch.linspace(-5, 5, 100)
    training_times = torch.linspace(-5, 5, 100).unsqueeze(-1)
    print(training_times.shape)
    training_values = g(training_times, True)
    values = g(times)
    optimiser = AdamHD(fcnn.parameters(), alpha_lr=1e-8)
    #optimiser = Adam(fcnn.parameters)
    epochs = 500

    for _ in range(epochs):
        predictions = fcnn(training_times)
        loss = MSE(training_values, predictions)
        loss.backward()
        optimiser.step()
        optimiser.zero_grad()
        print(f"MSE : {loss}")

    plotting = True

    if plotting:
        plt.figure()
        with torch.no_grad():
            for i in range(5):
                predictions = fcnn(torch.unsqueeze(times, -1),
                                   training=True).squeeze()
                if i == 0:
                    plt.plot(times,
                             predictions,