def ex_1_1_c(x_train, x_test, y_train, y_test):
    """
    Solution for exercise 1.1 c)
    Remember to set alpha to 0 when initializing the model
    :param x_train: The training dataset
    :param x_test: The testing dataset
    :param y_train: The training targets
    :param y_test: The testing targets
    :return:
    """

    ## TODO
    neuron_numbers = [1, 2, 3, 4, 6, 8, 12, 20, 40]
    mses_test = np.zeros((9, 10))
    mses_train = np.zeros((9, 10))
    for n in neuron_numbers:
        for i in range(0, 10):
            random_seed = randint(1, 1000)
            regressor = MLPRegressor(hidden_layer_sizes=(n, ),
                                     solver="lbfgs",
                                     activation="logistic",
                                     alpha=0.0,
                                     max_iter=200,
                                     random_state=random_seed)
            regressor.fit(x_train, y_train)
            mses_train[neuron_numbers.index(n)][i] = calculate_mse(
                regressor, x_train, y_train)
            mses_test[neuron_numbers.index(n)][i] = calculate_mse(
                regressor, x_test, y_test)

    plot_mse_vs_neurons(mses_train, mses_test, neuron_numbers)

    pass
Пример #2
0
def ex_1_1_c(x_train, x_test, y_train, y_test):
    """
    Solution for exercise 1.1 c)
    Remember to set alpha to 0 when initializing the model.
    :param x_train: The training dataset
    :param x_test: The testing dataset
    :param y_train: The training targets
    :param y_test: The testing targets
    :return:
    """

    MSE_train, MSE_test = np.zeros((8, 10)), np.zeros((8, 10))

    hiddenN = [1, 2, 4, 6, 8, 12, 20, 40]
    randoms = np.random.randint(0, 1000, size=10)
    print(randoms)

    for i, n_h in enumerate(hiddenN):

        for j, rand in enumerate(randoms):
            nn = MLPRegressor(activation='logistic',
                              solver='lbfgs',
                              max_iter=5000,
                              hidden_layer_sizes=(n_h, ),
                              alpha=0,
                              random_state=rand)
            nn.fit(x_train, y_train)

            MSE_train[i][j] = calculate_mse(nn, x_train, y_train)
            MSE_test[i][j] = calculate_mse(nn, x_test, y_test)

    plot_mse_vs_neurons(MSE_train, MSE_test, hiddenN)
Пример #3
0
def ex_1_1_c(x_train, x_test, y_train, y_test):
    """
    Solution for exercise 1.1 c)
    Remember to set alpha to 0 when initializing the model
    Use max_iter = 10000 and tol=1e-8
    :param x_train: The training dataset
    :param x_test: The testing dataset
    :param y_train: The training targets
    :param y_test: The testing targets
    :return:
    """

    ## TODO
    hidden_neurons_totest = np.array([1, 2, 3, 4, 6, 8, 12, 20, 40])
    # hidden_neurons_totest = np.array([20])
    dim1 = hidden_neurons_totest.shape[0]
    mse_test_matrix = np.zeros((dim1, 10))
    mse_train_matrix = np.zeros((dim1, 10))
    k = 0
    for i in hidden_neurons_totest:
        n_hidden_neurons = i
        for j in range(10):
            nn = MLPRegressor(activation='logistic', solver='lbfgs', max_iter=10000, tol=1e-8,
                              hidden_layer_sizes=(n_hidden_neurons,), alpha=0, random_state=j)
            nn.fit(x_train, y_train)
            predictions_test = nn.predict(x_test)
            mse_test_matrix[k, j] = calculate_mse(nn, x_test, y_test)
            mse_train_matrix[k, j] = calculate_mse(nn, x_train, y_train)
        k += 1
    plot_mse_vs_neurons(mse_train_matrix, mse_test_matrix, hidden_neurons_totest)
    plt.show()
    plot_learned_function(40, x_train, y_train, 0, x_test, y_test, predictions_test)
    plt.show()
Пример #4
0
def ex_1_1_c(x_train, x_test, y_train, y_test):
    """
    Solution for exercise 1.1 c)
    Remember to set alpha to 0 when initializing the model
    :param x_train: The training dataset
    :param x_test: The testing dataset
    :param y_train: The training targets
    :param y_test: The testing targets
    :return:
    """

    params_n_h = np.array([1, 2, 3, 4, 6, 8, 12, 20, 40])
    seeds = np.array(range(1, 11))
    train_mses = np.zeros((params_n_h.shape[0], seeds.shape[0]))
    test_mses = np.zeros((params_n_h.shape[0], seeds.shape[0]))
    for index_seed, seed in np.ndenumerate(seeds):
        for index_n_h, n_h in np.ndenumerate(params_n_h):

            nn = MLPRegressor(solver='lbfgs',
                              max_iter=200,
                              activation='logistic',
                              hidden_layer_sizes=(n_h, ),
                              alpha=0,
                              random_state=seed)
            nn.fit(x_train, y_train)
            train_mses[index_n_h,
                       index_seed] = calculate_mse(nn, x_train, y_train)
            test_mses[index_n_h,
                      index_seed] = calculate_mse(nn, x_test, y_test)

    print("Min MSE ", np.min(train_mses))
    plot_mse_vs_neurons(train_mses, test_mses, params_n_h)

    pass
def ex_1_1_c(x_train, x_test, y_train, y_test):
    """
    Solution for exercise 1.1 c)
    Remember to set alpha to 0 when initializing the model
    Use max_iter = 10000 and tol=1e-8
    :param x_train: The training dataset
    :param x_test: The testing dataset
    :param y_train: The training targets
    :param y_test: The testing targets
    :return:
    """

    ## TODO
    n_h = [1, 2, 3, 4, 6, 8, 12, 20, 40]
    train_array = np.zeros((9, 10))
    test_array = np.zeros((9, 10))

    for n in n_h:
        for i in range(0, 10):
            index = n_h.index(n)
            nn = MLPRegressor(tol=1e-8, activation='logistic', solver='lbfgs', alpha=0.0,
                              hidden_layer_sizes=(n_h[index],),
                              max_iter=10000, random_state=i)

            nn.fit(x_train, y_train)
            train_array[index][i] = calculate_mse(nn, x_train, y_train)
            test_array[index][i] = calculate_mse(nn, x_test, y_test)

            y_pred_train = nn.predict(x_train)
            y_pred_test = nn.predict(x_test)

            if n == 1:
                plot_learned_function(n, x_train, y_train, y_pred_train, x_test, y_test, y_pred_test)

    plot_mse_vs_neurons(np.array(train_array), np.array(test_array), n_h)
Пример #6
0
def ex_1_1_d(x_train, x_test, y_train, y_test):
    """
    Solution for exercise 1.1 b)
    Remember to set alpha to 0 when initializing the model
    :param x_train: The training dataset
    :param x_test: The testing dataset
    :param y_train: The training targets
    :param y_test: The testing targets
    :return:
    """

    ## TODO
    #pass
    N = 500
    n_hidden = [2, 5, 50]
    mse_train = np.zeros([np.size(n_hidden), N])
    mse_test = np.zeros([np.size(n_hidden), N])

    for j in range(np.size(n_hidden)):
        reg = MLPRegressor(hidden_layer_sizes=(n_hidden[j], ),
                           activation='logistic',
                           solver='lbfgs',
                           alpha=0,
                           random_state=0,
                           warm_start=True,
                           max_iter=1)
        for r in range(N):
            reg.fit(x_train, y_train)

            mse_train[j, r] = calculate_mse(reg, x_train, y_train)
            mse_test[j, r] = calculate_mse(reg, x_test, y_test)

    # PLOT
    plot_mse_vs_neurons(mse_train, mse_test, n_hidden)
    #mse_test_mean = np.mean(mse_test, axis=1)
    #ind = np.argmin(mse_test_mean)

    ind = np.unravel_index(np.argmin(mse_test), mse_test.shape)
    # geht es auch ohne den MLPRegressor nochmal zu initialisieren? Keine Ahnung obs anders besser geht
    reg = MLPRegressor(hidden_layer_sizes=(n_hidden[j], ),
                       activation='logistic',
                       solver='lbfgs',
                       alpha=0,
                       random_state=random.randint(0, 1000),
                       max_iter=500)
    reg.fit(x_train, y_train)

    y_pred_test = reg.predict(x_test)
    y_pred_train = reg.predict(x_train)

    plot_learned_function(n_hidden[ind[0]], x_train, y_train, y_pred_train,
                          x_test, y_test, y_pred_test)
Пример #7
0
def ex_1_1_c(x_train, x_test, y_train, y_test):
    """
    Solution for exercise 1.1 c)
    Remember to set alpha to 0 when initializing the model
    Use max_iter = 10000 and tol=1e-8
    :param x_train: The training dataset
    :param x_test: The testing dataset
    :param y_train: The training targets
    :param y_test: The testing targets
    :return:
    """

    m = 0
    n = 0

    #declaring variables used in MLP-Regressor
    hidden_layers = np.array([1, 2, 3, 4, 6, 8, 12, 20, 40])
    random_state = 10
    activation_mode = 'logistic'
    solver_mode = 'lbfgs'
    alpha = 0
    max_iter = 10000
    tol = 1e-8

    train_mse = np.zeros((hidden_layers.size, random_state))
    test_mse = np.zeros((hidden_layers.size, random_state))

    for m in range(random_state):
        for n in range(hidden_layers.size):
            nn = MLPRegressor(hidden_layer_sizes=(hidden_layers[n], ),
                              activation=activation_mode,
                              solver=solver_mode,
                              alpha=alpha,
                              max_iter=max_iter,
                              random_state=m,
                              tol=tol)
            nn.fit(x_train, y_train)
            #calculate for every random seed train_mse and test_mse
            train_mse[n][m] = calculate_mse(nn, x_train, y_train)
            test_mse[n][m] = calculate_mse(nn, x_test, y_test)

    plot_mse_vs_neurons(train_mse, test_mse, hidden_layers)

    y_test_pred = nn.predict(x_test)
    y_train_pred = nn.predict(x_train)

    plot_learned_function(40, x_train, y_train, y_train_pred, x_test, y_test,
                          y_test_pred)

    pass
Пример #8
0
def ex_1_1_c(x_train, x_test, y_train, y_test):
    """
    Solution for exercise 1.1 c)
    Remember to set alpha to 0 when initializing the model
    :param x_train: The training dataset
    :param x_test: The testing dataset
    :param y_train: The training targets
    :param y_test: The testing targets
    :return:
    """

    ## TODO
    #pass
    N = 10
    n_hidden = [1, 2, 3, 4, 6, 8, 12, 20, 40]
    mse_train = np.zeros([np.size(n_hidden), N])
    mse_test = np.zeros([np.size(n_hidden), N])

    for j in range(np.size(n_hidden)):
        reg = MLPRegressor(hidden_layer_sizes=(1, n_hidden[j]),
                           activation='logistic',
                           solver='lbfgs',
                           alpha=0,
                           random_state=random.randint(0, 1000))
        for r in range(N):

            reg.fit(x_train, y_train)

            mse_train[j, r] = calculate_mse(reg, x_train, y_train)
            mse_test[j, r] = calculate_mse(reg, x_test, y_test)

    # PLOT
    plot_mse_vs_neurons(mse_train, mse_test, n_hidden)
    """
    mse_test_mean = np.mean(mse_test, axis=1) 
    ind = np.argmin(mse_test_mean)
    """
    ind = np.unravel_index(np.argmin(mse_test), mse_test.shape)

    reg = MLPRegressor(hidden_layer_sizes=(n_hidden[ind[0]], ),
                       activation='logistic',
                       solver='lbfgs',
                       alpha=0,
                       random_state=random.randint(0, 1000))

    reg.fit(x_train, y_train)
    y_pred_test = reg.predict(x_test)
    y_pred_train = reg.predict(x_train)
    plot_learned_function(n_hidden[ind[0]], x_train, y_train, y_pred_train,
                          x_test, y_test, y_pred_test)
Пример #9
0
def ex_1_1_c(x_train, x_test, y_train, y_test):
    """
    Solution for exercise 1.1 c)
    Remember to set alpha to 0 when initializing the model
    :param x_train: The training dataset
    :param x_test: The testing dataset
    :param y_train: The training targets
    :param y_test: The testing targets
    :return:
    """

    ## TODO
    n_seeds = 10
    n_neur = [1, 2, 3, 4, 6, 8, 12, 20, 40]
    mse_train = np.zeros([np.size(n_neur), n_seeds])
    mse_test = np.zeros([np.size(n_neur), n_seeds])

    for h in range(np.size(n_neur)):
        for s in range(n_seeds):
            seed = np.random.randint(100)
            reg = MLPRegressor(hidden_layer_sizes=(n_neur[h], ),
                               max_iter=5000,
                               activation='logistic',
                               solver='lbfgs',
                               alpha=0,
                               random_state=seed)

            reg.fit(x_train, y_train)
            mse_train[h, s] = calculate_mse(reg, x_train, y_train)
            mse_test[h, s] = calculate_mse(reg, x_test, y_test)

    plot_mse_vs_neurons(mse_train, mse_test, n_neur)
    sum_mse = mse_test.sum(axis=1)
    ind_min = sum_mse.argmin()

    reg = MLPRegressor(hidden_layer_sizes=(n_neur[ind_min], ),
                       max_iter=5000,
                       activation='logistic',
                       solver='lbfgs',
                       alpha=0,
                       random_state=np.random.randint(100))

    reg.fit(x_train, y_train)
    y_pred_test = reg.predict(x_test)
    y_pred_train = reg.predict(x_train)
    plot_learned_function(n_neur[ind_min], x_train, y_train, y_pred_train,
                          x_test, y_test, y_pred_test)
Пример #10
0
def ex_1_1_c(x_train, x_test, y_train, y_test):
    """
    Solution for exercise 1.1 c)
    Remember to set alpha to 0 when initializing the model.
    :param x_train: The training dataset
    :param x_test: The testing dataset
    :param y_train: The training targets
    :param y_test: The testing targets
    :return:
    """

    ## TODO
    nh = [1, 2, 4, 6, 8, 12, 20, 40]
    mse_all_train = np.zeros(shape=(8, 10))
    mse_all_test = np.zeros(shape=(8, 10))

    for i in range(0, 10):
        for j in range(0, 8):
            seed = np.random.randint(1, 100)
            nn = MLPRegressor(activation='logistic',
                              solver='lbfgs',
                              max_iter=5000,
                              alpha=0,
                              hidden_layer_sizes=(nh[j], ),
                              random_state=seed)
            nn.fit(x_train, y_train)
            mse_train = calculate_mse(nn, x_train, y_train)
            mse_test = calculate_mse(nn, x_test, y_test)
            mse_all_train[j][i] = mse_train
            mse_all_test[j][i] = mse_test
    plot_mse_vs_neurons(mse_all_train, mse_all_test, nh)

    nn = MLPRegressor(activation='logistic',
                      solver='lbfgs',
                      max_iter=5000,
                      alpha=0,
                      hidden_layer_sizes=(nh[2], ))
    nn.fit(x_train, y_train)
    y_pred_train = nn.predict(x_train)

    y_pred_test = nn.predict(x_test)

    plot_learned_function(nh[2], x_train, y_train, y_pred_train, x_test,
                          y_test, y_pred_test)

    pass
Пример #11
0
def ex_1_1_c(x_train, x_test, y_train, y_test):
    """
    Solution for exercise 1.1 c)
    Remember to set alpha to 0 when initializing the model
    Use max_iter = 10000 and tol=1e-8
    :param x_train: The training dataset
    :param x_test: The testing dataset
    :param y_train: The training targets
    :param y_test: The testing targets
    :return:
    """
    n_hidden_neurons_list = [1, 2, 3, 4, 6, 8, 12, 20, 40]
    seeds = 10
    mse = np.zeros((len(n_hidden_neurons_list), seeds, 2))

    for i in range(len(n_hidden_neurons_list)):
        for j in range(seeds):
            regressor = MLPRegressor(
                hidden_layer_sizes=(n_hidden_neurons_list[i], ),
                activation='logistic',
                solver='lbfgs',
                alpha=0,
                max_iter=10000,
                random_state=j,
                tol=1e-8)
            regressor.fit(x_train, y_train)
            # mse shape: [train_mses, test_mses]
            mse[i][j] = calculate_mse(regressor, [x_train, x_test],
                                      [y_train, y_test])
    plot_mse_vs_neurons(mse[:, :, 0], mse[:, :, 1], n_hidden_neurons_list)

    n_hidden = 40
    regressor = MLPRegressor(hidden_layer_sizes=(n_hidden, ),
                             activation='logistic',
                             solver='lbfgs',
                             alpha=0,
                             max_iter=10000,
                             tol=1e-8)
    regressor.fit(x_train, y_train)
    y_pred_train = regressor.predict(x_train)
    y_pred_test = regressor.predict(x_test)

    plot_learned_function(n_hidden, x_train, y_train, y_pred_train, x_test,
                          y_test, y_pred_test)
    ## TODO
    pass
Пример #12
0
def ex_1_1_c(x_train, x_test, y_train, y_test):
    """
    Solution for exercise 1.1 c)
    Remember to set alpha to 0 when initializing the model
    Use max_iter = 10000 and tol=1e-8
    :param x_train: The training dataset
    :param x_test: The testing dataset
    :param y_train: The training targets
    :param y_test: The testing targets
    :return:
    """

    n_hidden_neurons_list = [1, 2, 3, 4, 6, 8, 12, 20, 40]
    n_iterations = 10
    train_mses = numpy.zeros((9, n_iterations))
    test_mses = numpy.zeros((9, n_iterations))
    r = 0
    for n_hidden_neuron in n_hidden_neurons_list:
        for i in range(n_iterations):
            trained_regressor = MLPRegressor(
                hidden_layer_sizes=(n_hidden_neuron, ),
                activation='logistic',
                solver='lbfgs',
                alpha=0,
                tol=1e-8,
                max_iter=10000,
                random_state=i)
            trained_regressor = trained_regressor.fit(x_train, y_train)
            train_mses[r][i] = calculate_mse(trained_regressor, x_train,
                                             y_train)
            test_mses[r][i] = calculate_mse(trained_regressor, x_test, y_test)
        r = r + 1
    plot_mse_vs_neurons(train_mses, test_mses, n_hidden_neurons_list)

    # trained_regressor = MLPRegressor(hidden_layer_sizes=(40, ), activation='logistic', solver='lbfgs', alpha=0,tol= 1e-8, max_iter=10000,random_state=1)
    # trained_regressor = trained_regressor.fit(x_train,y_train)
    # y_pred_train = trained_regressor.predict(x_train)
    # y_pred_test = trained_regressor.predict(x_test)
    # plot_learned_function(40, x_train, y_train, y_pred_train, x_test, y_test, y_pred_test)
    pass
Пример #13
0
def ex_1_1_d(x_train, x_test, y_train, y_test):
    """
    Solution for exercise 1.1 b)
    Remember to set alpha to 0 when initializing the model
    :param x_train: The training dataset
    :param x_test: The testing dataset
    :param y_train: The training targets
    :param y_test: The testing targets
    :return:
    """

    ## TODO
    total_iter = 50
    n_neur = [2, 5, 50]
    solvers = ['lbfgs', 'sgd', 'adam']
    counter_solv = 0
    mse_train = np.zeros([np.size(n_neur), total_iter, np.size(solvers)])
    mse_test = np.zeros([np.size(n_neur), total_iter, np.size(solvers)])
    for solv in solvers:
        for j in range(np.size(n_neur)):
            reg = MLPRegressor(hidden_layer_sizes=(n_neur[j], ),
                               activation='logistic',
                               solver=solv,
                               alpha=0,
                               random_state=0,
                               warm_start=True,
                               max_iter=1)
            for r in range(total_iter):
                reg.fit(x_train, y_train)
                mse_train[j, r,
                          counter_solv] = calculate_mse(reg, x_train, y_train)
                mse_test[j, r,
                         counter_solv] = calculate_mse(reg, x_test, y_test)
        counter_solv += 1

    # PLOT
    for s in range(np.size(solvers)):
        plot_mse_vs_neurons(mse_train[:, :, s], mse_test[:, :, s], n_neur)