Example #1
0
def linear_regression(slope, intercept, num_points, optimizer_str):
    input_dim, output_dim, dataset = linear_data(slope, intercept, num_points)
    use_bias = True
    learning_rate = 0.01
    weight_decay = 0.0
    batch_size = 1
    epochs = 1000
    threshold = 1e-8

    training_dataloader = torch.utils.data.DataLoader(dataset, batch_size)
    validation_dataloader = torch.utils.data.DataLoader(dataset, batch_size)

    model = LinearRegression(input_dim, output_dim, use_bias)
    optimizer_classic = FixedPointIteration(training_dataloader,
                                            validation_dataloader,
                                            learning_rate, weight_decay)
    optimizer_classic.import_model(model)
    optimizer_classic.set_loss_function('mse')
    optimizer_classic.set_optimizer(optimizer_str)
    training_classic_loss_history, validation_classic_loss_history, _ = optimizer_classic.train(
        epochs, threshold, batch_size)

    weights = list(model.get_model().parameters())

    return weights[0].item(), weights[1].item(
    ), validation_classic_loss_history
Example #2
0
def test_multiscale_paraboloid(dim=100,
                               condition_number=1.e3,
                               optimizer='sgd',
                               lr=1.e-4,
                               w_decay=0.0,
                               epochs=10000,
                               threshold=1.e-8):
    batch_size = dim

    dataset = torch.utils.data.TensorDataset(torch.zeros(dim),
                                             torch.zeros(dim))

    training_dataloader = torch.utils.data.DataLoader(dataset, batch_size)
    validation_dataloader = torch.utils.data.DataLoader(dataset, batch_size)

    model = Paraboloid(dim, condition_number=condition_number)
    optimizer_classic = FixedPointIteration(training_dataloader,
                                            validation_dataloader, lr, w_decay)
    optimizer_classic.import_model(model)
    optimizer_classic.set_loss_function('mse')
    optimizer_classic.set_optimizer(optimizer)
    _, validation_classic_loss_history, _ = optimizer_classic.train(
        epochs, threshold, batch_size)

    weights = model.get_model().get_weight()

    return weights.abs().max(), validation_classic_loss_history
Example #3
0
def test_rosenbrock(dim=2,
                    optimizer='sgd',
                    lr=1.e-4,
                    epochs=10000,
                    threshold=1.e-8,
                    initial_guess=None):
    # dummy dataset and dataloaders
    batch_size = 1
    dataset = torch.utils.data.TensorDataset(torch.zeros(1), torch.zeros(1))
    training_dataloader = torch.utils.data.DataLoader(dataset, batch_size)
    validation_dataloader = torch.utils.data.DataLoader(dataset, batch_size)

    w_decay = 0.0

    model = Rosenbrock(dim, initial_guess=initial_guess)
    optimizer_classic = FixedPointIteration(training_dataloader,
                                            validation_dataloader, lr, w_decay)
    optimizer_classic.import_model(model)
    optimizer_classic.set_loss_function('linear')
    optimizer_classic.set_optimizer(optimizer)
    _, validation_classic_loss_history, _ = optimizer_classic.train(
        epochs, threshold, batch_size)

    weights = model.get_model().get_weight()

    return (weights - 1.0).abs().max(), validation_classic_loss_history
Example #4
0
def neural_network_linear_regression(slope, intercept, num_points,
                                     optimizer_str):
    inputDim, outputDim, dataset = linear_data(slope, intercept, num_points)
    num_neurons_list = [1]
    use_bias = True
    classification_problem = False
    activation = None
    weight_decay = 0.0
    learning_rate = 1e-3
    batch_size = 1
    epochs = 10000
    threshold = 1e-8

    training_dataloader = torch.utils.data.DataLoader(dataset, batch_size)
    validation_dataloader = torch.utils.data.DataLoader(dataset, batch_size)

    model = MLP(inputDim, outputDim, num_neurons_list, use_bias, activation,
                classification_problem)

    optimizer_classic = FixedPointIteration(training_dataloader,
                                            validation_dataloader,
                                            learning_rate, weight_decay)
    optimizer_classic.import_model(model)
    optimizer_classic.set_loss_function('mse')
    optimizer_classic.set_optimizer(optimizer_str)
    training_classic_loss_history, validation_classic_loss_history, _ = optimizer_classic.train(
        epochs, threshold, batch_size)

    weights = list(model.get_model().parameters())

    return weights, validation_classic_loss_history
Example #5
0
    for iteration in range(0, number_runs):

        torch.manual_seed(iteration)

        model_classic = CNN2D(input_dim,output_dim,num_neurons_list,use_bias,activation,classification_problem,available_device)

        model_anderson = deepcopy(model_classic)

        # For classification problems, the loss function is the negative log-likelihood (nll)
        # For regression problems, the loss function is the mean squared error (mse)
        loss_function_name = 'nll' if classification_problem else 'mse'

        # Define the standard optimizer which is used as point of reference to assess the improvement provided by the
        # acceleration
        optimizer_classic = FixedPointIteration(training_dataloader,validation_dataloader,learning_rate,weight_decay,verbose)

        optimizer_classic.import_model(model_classic)
        optimizer_classic.set_loss_function(loss_function_name)
        optimizer_classic.set_optimizer(optimizer_name)

        (
            training_classic_loss_history,
            validation_classic_loss_history,
            validation_classic_accuracy_history,
        ) = optimizer_classic.train(epochs, threshold, batch_size)

        optimizer_anderson = DeterministicAcceleration(
            training_dataloader,
            validation_dataloader,
            acceleration,