Exemplo n.º 1
0
def generate_train_vs_degree(x, y, z, reg, max_degree, hyperparam, filename):
    '''
    Function for plotting mse when the model is evaluated on the training set
        ax = matplotlib.axis object
        reg = regression function reg(X, data, hyperparam)
        max_degree = maximum degree of polynomial
        hyperparam = hyperparameter for calibrating model
    '''
    degrees = np.arange(0, max_degree+1)
    outfile = open(filename, "a")
    outfile.write("degree mse r2\n")
    for degree in degrees:
        """Simple training with no cross validation"""
        X = pf.generate_design_2Dpolynomial(x, y, degree)
        beta = reg(X, z, hyperparam=hyperparam)
        z_model = X @ beta
        mse = pf.mse(z, z_model)
        r2 = pf.mse(z, z_model)
        outstring = f"{degree} {mse} {r2}\n"
        outfile.write(outstring)
    outfile.close()
Exemplo n.º 2
0
def generate_train_vs_lambda(x, y, z, reg, degree, hyperparams, filename):
    '''
    Function for plotting mse ws hyperparam when the model is evaluated on
    the training set
        ax = matplotlib.axis object
        reg = regression function reg(X, data, hyperparam)
        degree = degree of polynomial
        hyperparams = hyperparameter for calibrating model
    '''
    outfile = open(filename, "a")
    outfile.write("lambda mse r2\n")
    for hyperparam in hyperparams:
        """Simple training with no cross validation"""
        X = pf.generate_design_2Dpolynomial(x, y, degree)
        beta = reg(X, z, hyperparam=hyperparam)
        z_model = X @ beta
        mse = pf.mse(z, z_model)
        r2 = pf.mse(z, z_model)
        outstring = f"{hyperparam} {mse} {r2}\n"
        outfile.write(outstring)
    outfile.close()
Exemplo n.º 3
0
def plot_bias_confidence(ax,
                         x,
                         y,
                         z,
                         reg,
                         degree,
                         hyperparam,
                         noise,
                         confidence=1.96,
                         **kwargs):
    """
    Function plotting betas and their confidence intervalls
    """
    X = pf.generate_design_2Dpolynomial(x, y, degree)
    beta = reg(X, z, hyperparam=hyperparam)
    weight = noise * np.sqrt(np.diag(np.linalg.inv(X.T @ X))) * confidence
    print(2 * weight)
    ax.errorbar(beta,
                np.arange(1,
                          len(beta) + 1),
                xerr=weight,
                fmt='.',
                **kwargs)
Exemplo n.º 4
0
def plot_train_vs_degree(ax,
                         x,
                         y,
                         z,
                         reg,
                         max_degree,
                         hyperparam,
                         plot_r2=False,
                         **kwargs):
    '''
    Function for plotting mse when the model is evaluated on the training set
        ax = matplotlib.axis object
        reg = regression function reg(X, data, hyperparam)
        max_degree = maximum degree of polynomial
        hyperparam = hyperparameter for calibrating model
    '''
    degrees = np.arange(0, max_degree + 1)
    error = []

    for degree in degrees:
        """Simple training with no cross validation"""
        X = pf.generate_design_2Dpolynomial(x, y, degree)
        beta = reg(X, z, hyperparam=hyperparam)
        z_model = X @ beta

        if plot_r2:
            #computing the MSE when no train test split is used
            error.append(pf.r2(z, z_model))
            label = 'R2 train'

        else:
            #computing the r2 score when no train test split is used
            error.append(pf.mse(z, z_model))
            label = 'MSE train'

    ax.plot(degrees, error, **kwargs, label=label)
Exemplo n.º 5
0
def plot_train_vs_lambda(ax,
                         x,
                         y,
                         z,
                         reg,
                         degree,
                         hyperparams,
                         r2=False,
                         **kwargs):
    '''
    Function for plotting mse ws hyperparam when the model is evaluated on
    the training set
        ax = matplotlib.axis object
        reg = regression function reg(X, data, hyperparam)
        degree = degree of polynomial
        hyperparams = hyperparameter for calibrating model
    '''
    error = []

    for hyperparam in hyperparams:
        """Simple training with no cross validation"""
        X = pf.generate_design_2Dpolynomial(x, y, degree)
        beta = reg(X, z, hyperparam=hyperparam)
        z_model = X @ beta

        if not r2:
            #computing the MSE when no train test split is used
            error.append(pf.mse(z, z_model))
            label = 'MSE train'

        if r2:
            #computing the r2 score when no train test split is used
            error.append(pf.r2(z, z_model))
            label = 'R2 train'

    ax.plot(hyperparams, error, **kwargs, label=label)
Exemplo n.º 6
0
oslo_data = oslo_data[::reduction,::reduction]
x_grid = x_grid[::reduction,::reduction]
y_grid = y_grid[::reduction,::reduction]


#flatten the data
x = x_grid.ravel()
y = y_grid.ravel()
z = oslo_data.ravel()

#plotting

reg = pf.ridge_regression
hyperparam = 0

X = pf.generate_design_2Dpolynomial(x, y, degree=10)
beta = reg(X, z, hyperparam=hyperparam)
z_pred = X @ beta

z_pred_grid = z_pred.reshape(x_grid.shape)

plt.figure()
plt.imshow(z_pred_grid,cmap='gray')
plt.xlabel('X')
plt.ylabel('Y')
#plt.savefig(figdir+'terrainpicture_ridge.pdf')
plt.show()
'''
plt.figure()
#plt.title('Terrain over inner Oslo fjord')
plt.imshow(oslo_data, cmap='gray')