Example #1
0
                                                    y,
                                                    random_state=10,
                                                    test_size=0.3)
w = range(1, len(y_test) + 1)

reg = RandomForestRegressor(n_estimators=30,
                            criterion='mae',
                            max_depth=None,
                            min_samples_split=2,
                            min_samples_leaf=1,
                            min_weight_fraction_leaf=0.0,
                            max_features='auto',
                            max_leaf_nodes=None,
                            min_impurity_decrease=0.0,
                            min_impurity_split=None,
                            bootstrap=True,
                            oob_score=False,
                            n_jobs=1,
                            random_state=None,
                            verbose=0,
                            warm_start=False)
reg.fit(x_train, y_train)
y_predict = reg.predict(x_test)
fnc.errors(y_test, y_predict)
# plt.plot(w,y_predict,color='red')
# plt.plot(w,y_test,color='blue')
# plt.show()

data.to_csv("withoutOutliers.csv")

fnc.plot_act_pred(y_test, y_predict)
Example #2
0
                precompute=False,
                copy_X=True,
                max_iter=1000,
                tol=0.0001,
                warm_start=False,
                positive=False,
                random_state=None,
                selection='cyclic')
    reg.fit(x_train, y_train)
    y_predict = reg.predict(x_test)
    err.append(mean_squared_error(y_test, y_predict))
    err1.append(mean_absolute_error(y_test, y_predict))
    err2.append(r2_score(y_test, y_predict))
    err3.append(median_absolute_error(y_test, y_predict))
    err4.append(explained_variance_score(y_test, y_predict))
    errors(y_test, y_predict)
    print("*******************************************************")


def normalization(data):
    # data = np.array(data)
    # data = ((data - np.mean(data)) /
    #            np.std(data))
    # data = pd.DataFrame(data)
    return data


err = normalization(err)
err1 = normalization(err1)
err2 = normalization(err2)
err3 = normalization(err3)
Example #3
0
    model.add(
        Dense(12, input_dim=12, kernel_initializer='normal',
              activation='relu'))
    model.add(
        Dense(12, input_dim=12, kernel_initializer='normal',
              activation='relu'))
    model.add(Dense(1, kernel_initializer='normal'))

    # compile model
    model.compile(loss='mean_squared_error', optimizer='adam')
    return model


# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
x_train, x_test, y_train, y_test = train_test_split(X, Y)
# evaluate model with standardized dataset
estimator = KerasRegressor(build_fn=baseline_model,
                           nb_epoch=1,
                           batch_size=5,
                           verbose=0)

kfold = KFold(n_splits=30, random_state=seed)
results = cross_val_score(estimator, X, Y, cv=kfold)
print("Results: %.2f (%.2f) MSE" % (results.mean(), results.std()))
estimator.fit(x_train, y_train)
y_pred = estimator.predict(x_test)
# print(y_pred.shape())
fnc.errors(y_test, y_pred)