def ann(opt_lam, X_train_in, X_test_in, y_train_in, y_test, y_test_in, X_train_in_torch, X_test_in_torch, y_train_in_torch, m, h_lays):
    model = lambda: torch.nn.Sequential(
        torch.nn.Linear(m, h_lays),  # M features to H hiden units
        # 1st transfer function, either Tanh or ReLU:
        torch.nn.ReLU(),
        # torch.nn.Tanh(),
        torch.nn.Linear(h_lays, 1),  # H hidden units to 1 output neuron
        # torch.nn.Sigmoid()  # final tranfer function
    )

    loss_fn = torch.nn.MSELoss()

    # Train for a maximum of 10000 steps, or until convergence (see help for the
    # function train_neural_net() for more on the tolerance/convergence))
    max_iter = 50

    # Go to the file 'toolbox_02450.py' in the Tools sub-folder of the toolbox
    # and see how the network is trained (search for 'def train_neural_net',
    # which is the place the function below is defined)
    net, final_loss, learning_curve = train_neural_net(model,
                                                       loss_fn,
                                                       X=X_train_in_torch,
                                                       y=y_train_in_torch,
                                                       n_replicates=3,
                                                       max_iter=max_iter)

    y_res = net(X_test_in_torch)

    y_res = y_res.data.numpy()
    # y_test = y_test.data.numpy()

    eval_error = np.square(y_test_in - y_res).sum(axis=0) / y_test_in.shape[0]

    return eval_error
def ann(opt_lam, X_train_in, X_test, X_test_in, y_train, y_train_in, y_test, y_test_in, X_train_in_torch, X_test_in_torch, y_train_in_torch, m, h_lays, c):
    model = lambda: torch.nn.Sequential(
        torch.nn.Linear(m, h_lays),  # M features to H hiden units
        torch.nn.ReLU(),  # 1st transfer function
        # Output layer:
        # H hidden units to C classes
        # the nodes and their activation before the transfer
        # function is often referred to as logits/logit output
        torch.nn.Linear(h_lays, c),  # C logits
        # To obtain normalised "probabilities" of each class
        # we use the softmax-funtion along the "class" dimension
        # (i.e. not the dimension describing observations)
        torch.nn.Softmax(dim=1)  # final tranfer function, normalisation of logit output
    )

    loss_fn = torch.nn.CrossEntropyLoss()

    max_iter = 10000
    print('Training model of type:\n{}\n'.format(str(model())))

    # Do cross-validation:
    errors = []  # make a list for storing generalizaition error in each loop
    # Loop over each cross-validation split. The CV.split-method returns the
    # indices to be used for training and testing in each split, and calling
    # the enumerate-method with this simply returns this indices along with
    # a counter k:
    # for k, (train_index, test_index) in enumerate(CV.split(X, YY)):
    #     print('\nCrossvalidation fold: {0}/{1}'.format(k + 1, K))
    #
    #     # Extract training and test set for current CV fold,
    #     # and convert them to PyTorch tensors
    #     X_train = torch.tensor(X[train_index, :], dtype=torch.float)
    #     y_train = torch.tensor(YY[train_index], dtype=torch.long)
    #     X_test = torch.tensor(X[test_index, :], dtype=torch.float)
    #     y_test = torch.tensor(YY[test_index], dtype=torch.long)

    net, final_loss, learning_curve = train_neural_net(model,
                                                       loss_fn,
                                                       X=X_train_in_torch,
                                                       y=y_train_in_torch,
                                                       n_replicates=3,
                                                       max_iter=max_iter)

    print('\n\tBest loss: {}\n'.format(final_loss))

    softmax_logits = net(torch.tensor(X_test, dtype=torch.float))
    # Get the estimated class as the class with highest probability (argmax on softmax_logits)
    y_test_est = (torch.max(softmax_logits, dim=1)[1]).data.numpy()
    # Determine errors
    e = (y_test_est != y_test)

    eval_error = sum(e) / len(e)

    return eval_error
Пример #3
0
def compare_ann_baseline():
    C = 3
    opt_lam = 100
    h_lays = 15
    N, M = X.shape
    K = 10
    cvf = 10
    CV = skmd.KFold(K, shuffle=False)

    Error_test_baseline = [0 for i in range(K)]

    Error_test_ann = [0 for i in range(K)]

    r_values = [0 for i in range(K)]

    outk = 0
    for train_index, test_index in CV.split(X, Y):
        X_train = X[train_index]
        y_train = Y[train_index]
        X_test = X[test_index]
        y_test = Y[test_index]

        X_train = X_train.astype(np.float64)
        y_train = y_train.astype(np.float64)
        X_test = X_test.astype(np.float64)
        y_test = y_test.astype(np.float64)

        CV = skmd.KFold(cvf, shuffle=True)

        Error_test_baseline_inner = [0 for i in range(cvf)]

        Error_test_ann_inner = [0 for i in range(cvf)]

        ink = 0
        for inner_train_index, inner_test_index in CV.split(X_train, y_train):

            X_train_in = X[inner_train_index].astype(np.float64)
            y_train_in = Y[inner_train_index].astype(np.float64)
            X_test_in = X[inner_test_index].astype(np.float64)
            y_test_in = Y[inner_test_index].astype(np.float64)

            X_train_in_torch = torch.tensor(X_train_in, dtype=torch.float)
            y_train_in_torch = torch.tensor(y_train_in, dtype=torch.long)
            X_test_in_torch = torch.tensor(X_test_in, dtype=torch.float)

            y_train_in = y_train_in.reshape((y_train_in.shape[0], ))
            y_test_in = y_test_in.reshape((y_test_in.shape[0], ))

            # Baseline

            unique, counts = np.unique(y_train, return_counts=True)
            eval_error = max(counts) / sum(counts)

            Error_test_baseline_inner[ink] = eval_error

            # ANN

            model = lambda: torch.nn.Sequential(
                torch.nn.Linear(M, h_lays),  # M features to H hiden units
                torch.nn.ReLU(),  # 1st transfer function
                # Output layer:
                # H hidden units to C classes
                # the nodes and their activation before the transfer
                # function is often referred to as logits/logit output
                torch.nn.Linear(h_lays, C),  # C logits
                # To obtain normalised "probabilities" of each class
                # we use the softmax-funtion along the "class" dimension
                # (i.e. not the dimension describing observations)
                torch.nn.Softmax(
                    dim=1
                )  # final tranfer function, normalisation of logit output
            )

            loss_fn = torch.nn.CrossEntropyLoss()

            max_iter = 100
            print('Training model of type:\n{}\n'.format(str(model())))

            # Do cross-validation:
            errors = [
            ]  # make a list for storing generalizaition error in each loop
            # Loop over each cross-validation split. The CV.split-method returns the
            # indices to be used for training and testing in each split, and calling
            # the enumerate-method with this simply returns this indices along with
            # a counter k:
            # for k, (train_index, test_index) in enumerate(CV.split(X, YY)):
            #     print('\nCrossvalidation fold: {0}/{1}'.format(k + 1, K))
            #
            #     # Extract training and test set for current CV fold,
            #     # and convert them to PyTorch tensors
            #     X_train = torch.tensor(X[train_index, :], dtype=torch.float)
            #     y_train = torch.tensor(YY[train_index], dtype=torch.long)
            #     X_test = torch.tensor(X[test_index, :], dtype=torch.float)
            #     y_test = torch.tensor(YY[test_index], dtype=torch.long)

            net, final_loss, learning_curve = train_neural_net(
                model,
                loss_fn,
                X=X_train_in_torch,
                y=y_train_in_torch,
                n_replicates=3,
                max_iter=max_iter)

            print('\n\tBest loss: {}\n'.format(final_loss))

            softmax_logits = net(torch.tensor(X_test, dtype=torch.float))
            # Get the estimated class as the class with highest probability (argmax on softmax_logits)
            y_test_est = (torch.max(softmax_logits, dim=1)[1]).data.numpy()
            # Determine errors
            e = (y_test_est != y_test)

            eer = sum(e) / len(e)

            Error_test_ann_inner[ink] = eer

            # increment inner index
            ink += 1

        # save errors
        Error_test_baseline[outk] = Error_test_baseline_inner
        Error_test_ann[outk] = Error_test_ann_inner

        # Calculate error as in 11.4.1
        r_j = sum(i - j for i, j in zip(Error_test_ann_inner,
                                        Error_test_baseline_inner)) / len(
                                            Error_test_ann[outk])

        r_values[outk] = r_j

        # increment outter index
        outk += 1

    return Error_test_baseline, Error_test_ann, r_values
Пример #4
0
def two_level_cross_validation():
    # Model 1:
    # In our case linear model we got from part a

    # Imported from regression part a
    print("Optimal lambda from part 1: ", OPT_lambda_part_2)
    print("Also import data from part 1 of size:", X2.shape, YY.shape)

    N, M = X2.shape
    _, MM = XX.shape
    K = 10
    CV = skmd.KFold(K, shuffle=False)

    Error_train_lin = np.empty((K, 1))
    Error_test_lin = np.empty((K, 1))
    Opt_lambdas_lin = np.empty((K, 1))

    Error_test_baseline = np.empty((K, 1))

    Error_test_ann = np.empty((K, 1))
    Opt_h_ann = np.empty((K, 1))

    Error_train_rlr = np.empty((K, 1))
    Error_test_rlr = np.empty((K, 1))
    Error_train_nofeatures = np.empty((K, 1))
    Error_test_nofeatures = np.empty((K, 1))
    w_rlr = np.empty((M, K))
    mu = np.empty((K, M - 1))
    sigma = np.empty((K, M - 1))
    w_noreg = np.empty((M, K))

    k = 0

    for train_index, test_index in CV.split(X2, YY):
        # Linear regression
        X_train = X2[train_index]
        y_train = YY[train_index]
        X_test = X2[test_index]
        y_test = YY[test_index]
        internal_cross_validation = 10

        y_train = y_train.reshape((y_train.shape[0], ))
        y_test = y_test.reshape((y_test.shape[0], ))

        X_train = X_train.astype(np.float64)
        y_train = y_train.astype(np.float64)
        X_test = X_test.astype(np.float64)
        y_test = y_test.astype(np.float64)

        opt_val_err, opt_lambda, mean_w_vs_lambda, train_err_vs_lambda, test_err_vs_lambda = rlr_validate(
            X_train, y_train, lambdas, internal_cross_validation)

        Opt_lambdas_lin[k] = opt_lambda

        mu[k, :] = np.mean(X_train[:, 1:], 0)
        sigma[k, :] = np.std(X_train[:, 1:], 0)

        X_train[:, 1:] = (X_train[:, 1:] - mu[k, :]) / sigma[k, :]
        X_test[:, 1:] = (X_test[:, 1:] - mu[k, :]) / sigma[k, :]

        Xty = X_train.T @ y_train
        XtX = X_train.T @ X_train

        # Compute mean squared error without using the input data at all
        Error_train_nofeatures[k] = np.square(y_train - y_train.mean()).sum(
            axis=0) / y_train.shape[0]
        Error_test_nofeatures[k] = np.square(y_test - y_test.mean()).sum(
            axis=0) / y_test.shape[0]

        # Estimate weights for the optimal value of lambda, on entire training set
        lambdaI = opt_lambda * np.eye(M)
        lambdaI[0, 0] = 0  # Do no regularize the bias term
        w_rlr[:, k] = np.linalg.solve(XtX + lambdaI, Xty).squeeze()
        # Compute mean squared error with regularization with optimal lambda
        Error_train_rlr[k] = np.square(y_train - X_train @ w_rlr[:, k]).sum(
            axis=0) / y_train.shape[0]

        Error_test_rlr[k] = np.square(y_test - X_test @ w_rlr[:, k]).sum(
            axis=0) / y_test.shape[0]

        # Estimate weights for unregularized linear regression, on entire training set
        w_noreg[:, k] = np.linalg.solve(XtX, Xty).squeeze()
        # Compute mean squared error without regularization
        Error_train_lin[k] = np.square(y_train - X_train @ w_noreg[:, k]).sum(
            axis=0) / y_train.shape[0]

        # The importatn thing
        Error_test_lin[k] = np.square(y_test - X_test @ w_noreg[:, k]).sum(
            axis=0) / y_test.shape[0]

        # Baseline

        y_pred = np.mean(y_train)

        Error_test_baseline[k] = np.square(y_test - y_pred).sum(
            axis=0) / y_test.shape[0]

        # ANN
        # Cast to torch tensors

        # Just to work with all data

        X_train = XX[train_index]
        X_test = XX[test_index]

        X_train = X_train.astype(np.float64)
        X_test = X_test.astype(np.float64)

        y_train = y_train.reshape((y_train.shape[0], 1))
        y_test = y_test.reshape((y_test.shape[0], 1))

        X_train = torch.tensor(X_train, dtype=torch.float)
        y_train = torch.tensor(y_train, dtype=torch.float)
        X_test = torch.tensor(X_test, dtype=torch.float)
        # y_test = torch.tensor(y_test, dtype=torch.uint8)

        best_val = 10**30
        hopt = 0

        for n_hidden_units in h_vals:

            model = lambda: torch.nn.Sequential(
                torch.nn.Linear(MM, n_hidden_units
                                ),  # M features to H hiden units
                # 1st transfer function, either Tanh or ReLU:
                torch.nn.ReLU(),
                # torch.nn.Tanh(),
                torch.nn.Linear(n_hidden_units, 1
                                ),  # H hidden units to 1 output neuron
                # torch.nn.Sigmoid()  # final tranfer function
            )

            loss_fn = torch.nn.MSELoss()

            # Train for a maximum of 10000 steps, or until convergence (see help for the
            # function train_neural_net() for more on the tolerance/convergence))
            max_iter = 10000

            # Go to the file 'toolbox_02450.py' in the Tools sub-folder of the toolbox
            # and see how the network is trained (search for 'def train_neural_net',
            # which is the place the function below is defined)
            net, final_loss, learning_curve = train_neural_net(
                model,
                loss_fn,
                X=X_train,
                y=y_train,
                n_replicates=3,
                max_iter=max_iter)

            y_res = net(X_test)

            y_res = y_res.data.numpy()
            # y_test = y_test.data.numpy()

            eval_error = np.square(y_test -
                                   y_res).sum(axis=0) / y_test.shape[0]

            if eval_error < best_val:
                hopt = n_hidden_units
                best_val = eval_error

                learning_curve_best = learning_curve
                best_net = net

        Error_test_ann[k] = best_val
        Opt_h_ann[k] = hopt

        k += 1

    return Opt_h_ann, Error_test_ann, Opt_lambdas_lin, Error_test_lin, Error_test_baseline
Пример #5
0
        # the enumerate-method with this simply returns this indices along with
        # a counter k:
        # for k, (train_index, test_index) in enumerate(CV.split(X, YY)):
        #     print('\nCrossvalidation fold: {0}/{1}'.format(k + 1, K))
        #
        #     # Extract training and test set for current CV fold,
        #     # and convert them to PyTorch tensors
        #     X_train = torch.tensor(X[train_index, :], dtype=torch.float)
        #     y_train = torch.tensor(YY[train_index], dtype=torch.long)
        #     X_test = torch.tensor(X[test_index, :], dtype=torch.float)
        #     y_test = torch.tensor(YY[test_index], dtype=torch.long)

        net, final_loss, learning_curve = train_neural_net(
            model,
            loss_fn,
            X=torch.tensor(X_train, dtype=torch.float),
            y=torch.tensor(y_train, dtype=torch.long),
            n_replicates=3,
            max_iter=max_iter)

        print('\n\tBest loss: {}\n'.format(final_loss))

        softmax_logits = net(torch.tensor(X_test, dtype=torch.float))
        # Get the estimated class as the class with highest probability (argmax on softmax_logits)
        y_test_est = (torch.max(softmax_logits, dim=1)[1]).data.numpy()
        # Determine errors
        e = (y_test_est != y_test)

        eer = sum(e) / len(e)

        if eer < best_error_sofar:
def compare_ann_baseline_old():
    opt_lam =100
    h_lays = 15
    N, M = X.shape
    K = 10
    cvf = 10
    CV = skmd.KFold(K, random_state=17, shuffle=False)

    Error_test_baseline = []
    Error_test_ann = []

    r_values = []

    outk = 0
    for train_index, test_index in CV.split(X, Y):
        X_train = X[train_index]
        y_train = Y[train_index]
        X_test = X[test_index]
        y_test = Y[test_index]

        X_train = X_train.astype(np.float64)
        y_train = y_train.astype(np.float64)
        X_test = X_test.astype(np.float64)
        y_test = y_test.astype(np.float64)

        # print(test_index)
        # print(y_train)
        # print(len(y_train))

        CV = skmd.KFold(cvf, random_state=17, shuffle=True)

        Error_test_baseline_inner = []
        Error_test_ann_inner = []

        for inner_train_index, inner_test_index in CV.split(X_train, y_train):
            # print(inner_test_index)
            print(len(inner_test_index))
            # print(Y[inner_test_index])

            # print(np.matrix(Error_test_baseline_inner).shape)
            # print(np.matrix(Error_test_ann_inner).shape)

            X_train_in = X[inner_train_index].astype(np.float64)
            y_train_in = Y[inner_train_index].astype(np.float64)
            X_test_in = X[inner_test_index].astype(np.float64)
            y_test_in = Y[inner_test_index].astype(np.float64)

            X_train_in_torch = torch.tensor(X_train_in, dtype=torch.float)
            y_train_in_torch = torch.tensor(y_train_in, dtype=torch.float)
            X_test_in_torch = torch.tensor(X_test_in, dtype=torch.float)

            y_train_in = y_train_in.reshape((y_train_in.shape[0],))
            # print(y_test_in.shape)
            # print(y_test_in.shape[0])
            y_test_in = y_test_in.reshape((y_test_in.shape[0],))
            # print(y_test_in.shape)

            # Baseline

            y_pred = np.mean(y_train_in)

            eval_error = np.square(y_test_in - y_pred).sum(axis=0) / y_test.shape[0]

            Error_test_baseline_inner.append(eval_error)

            # ANN

            model = lambda: torch.nn.Sequential(
                torch.nn.Linear(M, h_lays),  # M features to H hiden units
                # 1st transfer function, either Tanh or ReLU:
                torch.nn.ReLU(),
                # torch.nn.Tanh(),
                torch.nn.Linear(h_lays, 1),  # H hidden units to 1 output neuron
                # torch.nn.Sigmoid()  # final tranfer function
            )

            loss_fn = torch.nn.MSELoss()

            # Train for a maximum of 10000 steps, or until convergence (see help for the
            # function train_neural_net() for more on the tolerance/convergence))
            max_iter = 50

            # Go to the file 'toolbox_02450.py' in the Tools sub-folder of the toolbox
            # and see how the network is trained (search for 'def train_neural_net',
            # which is the place the function below is defined)
            net, final_loss, learning_curve = train_neural_net(model,
                                                               loss_fn,
                                                               X=X_train_in_torch,
                                                               y=y_train_in_torch,
                                                               n_replicates=3,
                                                               max_iter=max_iter)

            y_res = net(X_test_in_torch)
            y_res = y_res.data.numpy()
            # print(y_res.shape)
            # y_test = y_test.data.numpy()

            eval_error = np.square(y_test_in - y_res).sum(axis=0) / y_test_in.shape[0]
            Error_test_ann_inner.append(eval_error)

        # save errors
        Error_test_baseline.append(Error_test_baseline_inner)
        Error_test_ann.append(Error_test_ann_inner)

        print(len(Error_test_baseline), len(Error_test_baseline[0]))
        print(len(Error_test_ann), len(Error_test_ann[0]))
        denominator = len(Error_test_ann_inner[outk])
        Error_test_ann_inner = list(map(np.mean, Error_test_ann_inner))

        # Calculate error as in 11.4.1
        r_j = sum(i-j for i, j in zip(Error_test_ann_inner, Error_test_baseline_inner)) / denominator
        r_values.append(r_j)

        outk += 1

    return Error_test_baseline, Error_test_ann, r_values
def compare_ann_lin_reg_old():
    opt_lam =100
    h_lays = 15
    N, M = X.shape
    K = 10
    cvf = 10
    CV = skmd.KFold(K, random_state=17, shuffle=False)

    Error_test_lin = [0 for i in range(K)]

    Error_test_ann = [0 for i in range(K)]

    r_values = [0 for i in range(K)]

    outk = 0
    for train_index, test_index in CV.split(X, Y):
        X_train = X[train_index]
        y_train = Y[train_index]
        X_test = X[test_index]
        y_test = Y[test_index]

        X_train = X_train.astype(np.float64)
        y_train = y_train.astype(np.float64)
        X_test = X_test.astype(np.float64)
        y_test = y_test.astype(np.float64)

        CV = skmd.KFold(cvf, random_state=17, shuffle=True)

        Error_test_lin_inner = [0 for i in range(cvf)]

        Error_test_ann_inner = [0 for i in range(cvf)]

        ink = 0
        for inner_train_index, inner_test_index in CV.split(X_train, y_train):

            X_train_in = X[inner_train_index].astype(np.float64)
            y_train_in = Y[inner_train_index].astype(np.float64)
            X_test_in = X[inner_test_index].astype(np.float64)
            y_test_in = Y[inner_test_index].astype(np.float64)

            X_train_in_torch = torch.tensor(X_train_in, dtype=torch.float)
            y_train_in_torch = torch.tensor(y_train_in, dtype=torch.float)
            X_test_in_torch = torch.tensor(X_test_in, dtype=torch.float)

            y_train_in = y_train_in.reshape((y_train_in.shape[0],))
            y_test_in = y_test_in.reshape((y_test_in.shape[0],))

            # Linear regressoin

            mu = np.mean(X_train_in[:, 1:], 0)
            sigma = np.std(X_train_in[:, 1:], 0)

            X_train_in[:, 1:] = (X_train_in[:, 1:] - mu) / sigma
            X_test_in[:, 1:] = (X_test_in[:, 1:] - mu) / sigma

            Xty = X_train_in.T @ y_train_in
            XtX = X_train_in.T @ X_train_in

            # Compute mean squared error without using the input data at all
            Error_train_nofeatures = np.square(y_train_in - y_train_in.mean()).sum(axis=0) / y_train_in.shape[0]
            Error_test_nofeatures = np.square(y_test_in - y_test_in.mean()).sum(axis=0) / y_test_in.shape[0]

            # Estimate weights for the optimal value of lambda, on entire training set
            lambdaI = opt_lam * np.eye(M)
            lambdaI[0, 0] = 0  # Do no regularize the bias term
            w_rlr = np.linalg.solve(XtX + lambdaI, Xty).squeeze()
            # Compute mean squared error with regularization with optimal lambda
            Error_train_rlr = np.square(y_train_in - X_train_in @ w_rlr).sum(axis=0) / y_train_in.shape[0]

            Error_test_rlr = np.square(y_test_in - X_test_in @ w_rlr).sum(axis=0) / y_test_in.shape[0]

            # Estimate weights for unregularized linear regression, on entire training set
            w_noreg = np.linalg.solve(XtX, Xty).squeeze()
            # Compute mean squared error without regularization
            Error_train_lin = np.square(y_train_in - X_train_in @ w_noreg).sum(axis=0) / y_train_in.shape[0]

            # The importatn thing
            Error_test_lin_e = np.square(y_test_in - X_test_in @ w_noreg).sum(axis=0) / y_test_in.shape[0]

            Error_test_lin_inner[ink] = Error_test_lin_e

            # ANN

            model = lambda: torch.nn.Sequential(
                torch.nn.Linear(M, h_lays),  # M features to H hiden units
                # 1st transfer function, either Tanh or ReLU:
                torch.nn.ReLU(),
                # torch.nn.Tanh(),
                torch.nn.Linear(h_lays, 1),  # H hidden units to 1 output neuron
                # torch.nn.Sigmoid()  # final tranfer function
            )

            loss_fn = torch.nn.MSELoss()

            # Train for a maximum of 10000 steps, or until convergence (see help for the
            # function train_neural_net() for more on the tolerance/convergence))
            max_iter = 10000

            # Go to the file 'toolbox_02450.py' in the Tools sub-folder of the toolbox
            # and see how the network is trained (search for 'def train_neural_net',
            # which is the place the function below is defined)
            net, final_loss, learning_curve = train_neural_net(model,
                                                               loss_fn,
                                                               X=X_train_in_torch,
                                                               y=y_train_in_torch,
                                                               n_replicates=3,
                                                               max_iter=max_iter)

            y_res = net(X_test_in_torch)

            y_res = y_res.data.numpy()
            # y_test = y_test.data.numpy()

            eval_error = np.square(y_test_in - y_res).sum(axis=0) / y_test_in.shape[0]

            Error_test_ann_inner[ink] = eval_error

            # increment inner index
            ink += 1

        # save errors
        Error_test_lin[outk] = Error_test_lin_inner
        Error_test_ann[outk] = Error_test_ann_inner

        # Calculate error as in 11.4.1
        r_j = sum(i-j for i,j in zip(Error_test_lin_inner,Error_test_ann_inner))/len(Error_test_lin[outk])

        r_values[outk] = r_j

        # increment outter index
        outk += 1



    return Error_test_lin,Error_test_ann,r_values