Пример #1
0
                                     batch_size=1000,
                                     n_iter=10,
                                     n_stable=10,
                                     verbose=True)

            X_train, X_test = X[train_index], X[test_index]
            y_train, y_test = y[train_index], y[test_index]

            # print("fitting model...")
            mlp.fit(X_train, y_train)

            # print("scoring model...")
            # print("predicted:", mlp.predict(X_test))
            # print("actual:", y_test)
            r2s.append(mlp.score(X_test, y_test))
            y_pred = mlp.predict(X_test)
            mses.append(mse(y_pred, y_test))
            mae_score = mae(y_pred, y_test)
            maes.append(mae_score)
            # print("MAE score =", mae_score)
            accs.append(accuracy_score([[round(y[0])] for y in y_pred],
                                       y_test))

        mean_mae = np.mean(maes)
        mean_mse = np.mean(mses)
        mean_r2 = np.mean(r2s)
        mean_acc = np.mean(accs)

        model = (mean_mse, mean_mae, mean_r2, mean_acc, dropout_rate,
                 regularize, learning_rule, kernel_shape[0], kernel_shape[1])
Пример #2
0
        if h % 100000 == 0:
            print(h)

    print("reshaping data...")
    Xs = dict()
    ys = dict()
    for h in feats:
        samples = len(feats[h]) // 384
        Xs[h] = np.array(feats[h]).reshape((samples, 6, 8, 8))
        ys[h] = np.array(labels[h])

    mse_scores = dict()
    acc_scores = dict()

    for h in feats:
        y_pred = mlp.predict(np.array(Xs[h]))
        mse_scores[h] = mse(y_pred, ys[h])
        acc_scores[h] = accuracy_score([[round(y[0])] for y in y_pred], ys[h])

    acc_data = []
    mse_data = []
    h_data = []
    for h in sorted(feats.keys()):
        h_data.append(h)
        mse_data.append(mse_scores[h])
        acc_data.append(1 - acc_scores[h])

    # fig, ax1 = plt.subplots()
    # fig.suptitle('Tree Height v. Prediction Error')
    # plt1 = ax1.plot(h_data, mse_data, color='blue', label='mse')
    # ax1.set_ylabel('mean squared regression error (MSE)')
Пример #3
0
    X, y = list(zip(*examples))
    X = np.array(X)
    y = np.array(y)
    del examples

    kf = KFold(n=len(X), n_folds=5, shuffle=True, random_state=np.random)
    train_index, test_index = next(iter(kf))
    X_train, X_test = X[train_index], X[test_index]
    y_train, y_test = y[train_index], y[test_index]
    del X
    del y

    print("fitting model...")
    mlp.fit(X_train, y_train)

    print("scoring model...")
    # print("predicted:", mlp.predict(X_test))
    # print("actual:", y_test)
    print("R^2 score =", mlp.score(X_test, y_test))
    y_pred = mlp.predict(X_test)
    print("MSE score =", mse(y_pred, y_test))
    print("MAE score =", mae(y_pred, y_test))
    print("accuracy_score =", accuracy_score([[round(y[0])] for y in y_pred], y_test))

    fn = os.path.join(settings['data-base'], 'nn_tanh3.pickle')
    pickle.dump(mlp, open(fn, 'wb'))