Пример #1
0
def main():
    lasso_data_fname = "lasso_data.pickle"
    x_train, y_train, x_val, y_val, target_fn, coefs_true, featurize = setup_problem.load_problem(lasso_data_fname)

    # Generate features
    X_train = featurize(x_train)
    X_val = featurize(x_val)

    pred_fns = []
    x = np.sort(np.concatenate([np.arange(0,1,.001), x_train]))
    X = featurize(x)

    l2reg = 1
    estimator = RidgeRegression(l2_reg=l2reg, step_size=0.00005, max_num_epochs=2000)
    estimator.fit(X_train, y_train)
    name = "Ridge with L2Reg="+str(l2reg)
    pred_fns.append({"name":name, "preds": estimator.predict(X) })


    l2reg = 0
    estimator = RidgeRegression(l2_reg=l2reg, step_size=0.0005, max_num_epochs=500)
    estimator.fit(X_train, y_train)
    name = "Ridge with L2Reg="+str(l2reg)
    pred_fns.append({"name":name, "preds": estimator.predict(X) })

    # Let's plot prediction functions and compare coefficients for several fits
    # and the target function.

    pred_fns.append({"name": "Target Parameter Values (i.e. Bayes Optimal)", "coefs": coefs_true, "preds": target_fn(x)})

    plot_utils.plot_prediction_functions(x, pred_fns, x_train, y_train, legend_loc="best")
Пример #2
0
def main():
    lasso_data_fname = "lasso_data.pickle"
    x_train, y_train, x_val, y_val, target_fn, coefs_true, featurize = setup_problem.load_problem(lasso_data_fname)

    # Generate features
    X_train = featurize(x_train)
    X_val = featurize(x_val)

    # Let's plot prediction functions and compare coefficients for several fits
    # and the target function.
    pred_fns = []
    x = np.sort(np.concatenate([np.arange(0,1,.001), x_train]))

    pred_fns.append({"name": "Target Parameter Values (i.e. Bayes Optimal)", "coefs": coefs_true, "preds": target_fn(x)})

    estimator = MLPRegression(num_hidden_units=10, step_size=0.001, init_param_scale=.0005,  max_num_epochs=5000)
    x_train_as_column_vector = x_train.reshape(x_train.shape[0],1) # fit expects a 2-dim array
    x_as_column_vector = x.reshape(x.shape[0],1) # fit expects a 2-dim array
    estimator.fit(x_train_as_column_vector, y_train)
    name = "MLP regression - no features"
    pred_fns.append({"name":name, "preds": estimator.predict(x_as_column_vector) })

    X = featurize(x)
    estimator = MLPRegression(num_hidden_units=10, step_size=0.0005, init_param_scale=.01,  max_num_epochs=500)
    estimator.fit(X_train, y_train)
    name = "MLP regression - with features"
    pred_fns.append({"name":name, "preds": estimator.predict(X) })
    plot_utils.plot_prediction_functions(x, pred_fns, x_train, y_train, legend_loc="best")
def main():
    lasso_data_fname = "lasso_data.pkl"
    x_train, y_train, x_val, y_val, target_fn, coefs_true, featurize = setup_problem.load_problem(
        lasso_data_fname
    )

    # Generate features
    X_train = featurize(x_train)
    # X_val = featurize(x_val)

    # Let's plot prediction functions and compare coefficients for several fits
    # and the target function.
    pred_fns = []
    x = np.sort(np.concatenate([np.arange(0, 1, 0.001), x_train]))

    pred_fns.append(
        {
            "name": "Target Parameter Values (i.e. Bayes Optimal)",
            "coefs": coefs_true,
            "preds": target_fn(x),
        }
    )

    X = featurize(x)
    estimator = LinearRegression(step_size=0.001, max_num_epochs=1000)
    estimator.fit(X_train, y_train, print_every=100)
    name = "Linear regression"
    pred_fns.append({"name": name, "preds": estimator.predict(X)})
    plot_utils.plot_prediction_functions(
        x, pred_fns, x_train, y_train, legend_loc="best"
    )
    os.makedirs("img", exist_ok=True)
    plt.savefig(os.path.join("img", "linear_regression.png"))
    plt.show()
Пример #4
0
def main():
    lasso_data_fname = "lasso_data.pickle"
    x_train, y_train, x_val, y_val, target_fn, coefs_true, featurize = setup_problem.load_problem(lasso_data_fname)

    # Generate features
    X_train = featurize(x_train)#这里featurize是setup_problem.py里get_target_and_featurizer()return的函数
    X_val = featurize(x_val)

    # Let's plot prediction functions and compare coefficients for several fits
    # and the target function.
    pred_fns = []
    x = np.sort(np.concatenate([np.arange(0,1,.001), x_train]))

    pred_fns.append({"name": "Target Parameter Values (i.e. Bayes Optimal)", "coefs": coefs_true, "preds": target_fn(x)})

    X = featurize(x)
    estimator = LinearRegression(step_size=0.001, max_num_epochs=1000)
    estimator.fit(X_train, y_train)
    name = "Linear regression"
    pred_fns.append({"name":name, "preds": estimator.predict(X) })
    plot_utils.plot_prediction_functions(x, pred_fns, x_train, y_train, legend_loc="best")
def main():
    lasso_data_fname = "c:/Users/jack/ml_homework_2020/homework7/code/lasso_data.pickle"
    #lasso_data_fname = "/Users/shunshun/github/ml_homework_2020/homework7/code/lasso_data.pickle"
    #lasso_data_fname = "lasso_data.pickle"
    x_train, y_train, x_val, y_val, target_fn, coefs_true, featurize = setup_problem.load_problem(
        lasso_data_fname)

    # Generate features
    X_train = featurize(x_train)
    X_val = featurize(x_val)

    print(y_train[1])

    print(X_train.shape, y_train.shape)

    # Let's plot prediction functions and compare coefficients for several fits
    # and the target function.
    pred_fns = []
    x = np.sort(np.concatenate([np.arange(0, 1, .001), x_train]))

    pred_fns.append({
        "name": "Target Parameter Values (i.e. Bayes Optimal)",
        "coefs": coefs_true,
        "preds": target_fn(x)
    })

    X = featurize(x)
    estimator = LinearRegression(step_size=0.001, max_num_epochs=100)
    estimator.fit(X_train, y_train)
    name = "Linear regression"
    pred_fns.append({"name": name, "preds": estimator.predict(X)})
    plot_utils.plot_prediction_functions(x,
                                         pred_fns,
                                         x_train,
                                         y_train,
                                         legend_loc="best")