示例#1
3
    def test_handle_errors(self):
        with self.assertRaises(ValueError):
            # Wrong: size of target data not the same as size of
            # input data.
            GRNN().train(
                np.array([[0], [0]]), np.array([0])
            )

        with self.assertRaises(ValueError):
            # Wrong: 2-D target vector (must be 1-D)
            GRNN().train(
                np.array([[0], [0]]), np.array([[0]])
            )

        with self.assertRaises(AttributeError):
            # Wrong: can't use iterative learning process for this
            # algorithm
            GRNN().train_epoch()

        with self.assertRaises(ValueError):
            # Wrong: invalid feature size for prediction data
            grnet = GRNN()
            grnet.train(np.array([[0], [0]]), np.array([0]))
            grnet.predict(np.array([[0]]))
示例#2
0
def build_model2(std, train_size, t, x):
    train_size = int(t.shape[0] * train_size)

    # X_train = t[:train_size]
    # y_train = x[:train_size]
    # X_test = t[train_size:]
    # y_test = x[train_size:]

    X_train, X_test, y_train, y_test = train_test_split(t,
                                                        x,
                                                        train_size=train_size,
                                                        shuffle=True,
                                                        random_state=14)

    scaler_x = StandardScaler()
    scaler_y = StandardScaler()
    tmp_train_scaled_x = scaler_x.fit_transform(X_train[:, np.newaxis])
    tmp_test_scaled_x = scaler_x.transform(X_test[:, np.newaxis])
    tmp_train_scaled_y = scaler_y.fit_transform(y_train[:, np.newaxis])

    grnn = GRNN(std=std)
    grnn.fit(tmp_train_scaled_x, tmp_train_scaled_y)

    pred_x = grnn.predict(tmp_train_scaled_x)
    pred_x = scaler_y.inverse_transform(pred_x)
    mse = mean_squared_error(y_train, pred_x.flatten())
    print(f'RMSE = {np.sqrt(mse)}')

    plt.plot(t, x, c='r')
    plt.scatter(X_train, y_train, label='train')
    plt.scatter(X_train, pred_x, label='predict')
    plt.legend()
    plt.show()

    pred_x = grnn.predict(tmp_test_scaled_x)
    pred_x = scaler_y.inverse_transform(pred_x)
    mse = mean_squared_error(y_test, pred_x.flatten())
    print(f'RMSE = {np.sqrt(mse)}')

    plt.plot(t, x, c='r')
    plt.scatter(X_test, y_test, label='test')
    plt.scatter(X_test, pred_x, label='predict')
    plt.legend()
    plt.show()
示例#3
0
    def test_simple_grnn(self):
        dataset = datasets.load_diabetes()
        x_train, x_test, y_train, y_test = train_test_split(
            dataset.data, dataset.target, train_size=0.7,
            random_state=0
        )

        nw = GRNN(standard_deviation=0.1)
        nw.train(x_train, y_train)
        result = nw.predict(x_test)
        error = rmsle(result, y_test)

        self.assertAlmostEqual(error, 0.4245, places=4)
df_cluster = X_train
df_cluster[
    'Grade'] = Y_train  #Reproduce original data but only with training values
rbfk_net = RBFKMeans(
    n_clusters=prototypes)  #Chose number of clusters that you want
rbfk_net.train(df_cluster, epsilon=1e-5)
center = pd.DataFrame(rbfk_net.centers)

# Turn the centers into prototypes values needed
X_prototypes = center.iloc[:, 0:-1]
Y_prototypes = center.iloc[:,
                           -1]  #Y_prototypes is the last column of center since 'Grade' is the last feature added to center.

#Train GRNN
GRNNet = GRNN(std=0.1)
GRNNet.train(X_prototypes, Y_prototypes)

# Cross validataion
score = cross_val_score(GRNNet, X_train, Y_train, scoring='r2', cv=5)
print("")
print("Cross Validation: {0} (+/- {1})".format(score.mean().round(2),
                                               (score.std() * 2).round(2)))
print("")

#Prediction
Y_predict = GRNNet.predict(X)
print(Y.values * minmax + minval)
print((Y_predict * minmax + minval)[:, 0].round(2))
print("")
print("Accuracy: {0}".format(metrics.r2_score(Y, Y_predict).round(2)))
print("")