import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.preprocessing import MinMaxScaler
from neural_net import NeuralNetwork

#1D artificial data
X = np.arange(0, 20).reshape(20, 1) + np.random.randn(20, 1)
y = (np.arange(0, 20) + np.random.randn(20)).reshape(20, 1)
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
y = scaler.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.3,
                                                    random_state=101)
plt.plot(X, y, '*', label='train data')
plt.xlabel('features')
plt.ylabel('labels')
plt.title('DNN Regressor')

nn = NeuralNetwork(mode='regression')
nn.train(X_train, y_train, 1000)
y_pred = nn.predict(X_test)
print()
r2 = r2_score(y_test, y_pred)
print('The R^2 score is:', r2)

plt.plot(X_test, y_pred, 'r*', label='test data')
plt.legend()
Exemple #2
0
def run():
    df = pd.read_csv('spambase_data\\spambase.data', header=None)
    df = df.sample(frac=1).reset_index(drop=True)
    print(f'No. missing values: {df.isnull().sum().sum()}')

    X = df.drop(57, axis=1)
    y = df.loc[:, 57]

    # Feature selection

    abs_corr_w_target = X.apply(
        lambda col: col.corr(y)).abs().sort_values().to_frame()
    abs_corr = X.corr().abs()
    plotting_tools.plot_heatmap(abs_corr_w_target,
                                title='Correlation with target',
                                size=(8, 16),
                                one_dim=True)
    plotting_tools.plot_heatmap(abs_corr,
                                title='Correlation before feature selection',
                                size=(10, 16))

    to_drop = set()

    # Amount of variation
    variance = X.var(axis=0, ddof=1)
    to_drop.update(variance[variance < 0.01].index.values)

    # Correlation with target
    to_drop.update(abs_corr_w_target[abs_corr_w_target[0] < 0.01].index.values)

    # Pairwise correlation
    to_drop.update(preprocessing.find_correlated(abs_corr, abs_corr_w_target))

    to_drop = list(to_drop)
    nr_dropped = len(to_drop)
    X.drop(to_drop, axis=1, inplace=True)
    abs_corr = X.corr().abs()
    plotting_tools.plot_heatmap(abs_corr,
                                title='Correlation after feature selection',
                                size=(10, 16))
    print(f'Dropped features: {to_drop}')

    # Data standardization works better, use normalization only for tests
    # X = preprocessing.normalize_data(X)
    X = preprocessing.standardize_data(X)

    X = X.values
    y = y.values
    train_inputs, cv_inputs, test_inputs = np.split(
        X, [int(0.6 * len(df)), int(0.8 * len(df))])
    train_outputs, cv_outputs, test_outputs = np.split(
        y, [int(0.6 * len(df)), int(0.8 * len(df))])

    print(f'Training set size: {train_outputs.shape[0]}\n'
          f'Cross validation set size: {cv_outputs.shape[0]}\n'
          f'Test set size: {test_outputs.shape[0]}')

    model = NeuralNetwork([57 - nr_dropped, 32, 1],
                          activation_function='sigmoid')

    # Only use this part for tuning hyperparameters, slows down the program significantly
    # lambdas = list(np.arange(0.5, 1.5, 0.1))
    # model.plot_learning_curves(train_inputs, train_outputs, cv_inputs, cv_outputs,
    #                           learning_rate=1.5, epochs=500, lambda_=0.6)
    # model.plot_validation_curves(train_inputs, train_outputs, cv_inputs, cv_outputs,
    #                             learning_rate=1.5, epochs=1000, lambdas=lambdas)

    model.gradient_descent(train_inputs,
                           train_outputs,
                           1.5,
                           4000,
                           0.6,
                           gradient_check=False,
                           plot_cost=False)

    train_predictions = np.where(model.predict(train_inputs) > 0.5, 1, 0)
    test_predictions = np.where(model.predict(test_inputs) > 0.5, 1, 0)

    train_columns = {
        'Train predictions': train_predictions[:, 0],
        'Train outputs': train_outputs
    }
    test_columns = {
        'Test predictions': test_predictions[:, 0],
        'Test outputs': test_outputs
    }

    train_results = pd.DataFrame(train_columns)
    test_results = pd.DataFrame(test_columns)

    train_correct = pd.value_counts(train_results['Train predictions'] ==
                                    train_results['Train outputs'])[True]
    test_correct = pd.value_counts(
        test_results['Test predictions'] == test_results['Test outputs'])[True]

    test_positive_predictions = test_results[test_results['Test predictions']
                                             == 1]
    test_negative_predictions = test_results[test_results['Test predictions']
                                             == 0]

    test_is_positive_correct = pd.value_counts(
        test_positive_predictions['Test predictions'] ==
        test_positive_predictions['Test outputs'])
    test_is_negative_correct = pd.value_counts(
        test_negative_predictions['Test predictions'] ==
        test_negative_predictions['Test outputs'])

    test_true_positives = test_is_positive_correct[True]
    test_false_positives = test_is_positive_correct[False]
    test_true_negatives = test_is_negative_correct[True]
    test_false_negatives = test_is_negative_correct[False]

    test_precision = test_true_positives / (test_true_positives +
                                            test_false_positives)
    test_recall = test_true_positives / (test_true_positives +
                                         test_false_negatives)
    test_confusion_matrix = pd.DataFrame(
        [[test_true_positives, test_false_positives],
         [test_false_negatives, test_true_negatives]],
        columns=[1, 0],
        index=[1, 0])

    train_acc = train_correct / len(train_outputs)
    test_acc = test_correct / len(test_outputs)
    print(f'train_acc = {train_acc}')
    print(f'test_acc = {test_acc}')
    print(f'test_precision = {test_precision}')
    print(f'test_recall = {test_recall}')

    plotting_tools.plot_cm(test_confusion_matrix, title='Confusion matrix')
# functions
def n():
    return input('Ingrese un numero: ')


# input data
inputs = np.array([[0, 1, 0], [0, 0, 0], [0, 1, 1], [0, 0, 0], [1, 0, 0],
                   [1, 1, 1], [1, 0, 1]])
# output data
outputs = np.array([[0], [0], [0], [0], [1], [1], [1]])

# create a neural network
NeuralN = NeuralNetwork(inputs, outputs)
NeuralN.train()

# create two new examples to test and predict
example1 = np.array([[1, 1, 0]])
example2 = np.array([[0, 1, 1]])
exampleUser = np.array([n(), n(), n()])

# print and predict the examples
print(NeuralN.predict(example1), '- Correct: ', example1[0][0])
print(NeuralN.predict(example2), '- Correct: ', example2[0][0])
print(NeuralN.predict(exampleUser), '- Correct: ', exampleUser[0][0])

# plot the error over the entire training duration
plt.figure(figsize=(15, 5))
plt.plot(NeuralN.iters_hist, NeuralN.costerror_hist)
plt.xlabel('Iters')
plt.ylabel('Cost Error')
plt.show()
Exemple #4
0
def main():

    print("Fit pong screen into the window")
    print("Press 'up' or 'down' to start infering actions.")
    print("Press 'q' for quit.")

    infering = False

    #Load neural network
    nn = NeuralNetwork()
    nn.load()

    #Call function to clear buffer of pressed keys
    get_key_pressed()

    last_pos_h = 0
    last_pos_v = 0

    #Keeping getting track of the object locations and keys pressed
    while True:

        screen, obj_locations = get_screen_features()

        key_pressed = get_key_pressed()

        cv2.imshow("PilotoRobo - PythonJogaPong", screen)

        #Pass next frame every 10ms
        #Exit when 'q' is pressed
        if cv2.waitKey(1) == ord('q') or key_pressed == -1:
            cv2.destroyAllWindows()
            break

        #Calculate speed
        h_speed = obj_locations[0] - last_pos_h
        v_speed = obj_locations[1] - last_pos_v

        last_pos_h = obj_locations[0]
        last_pos_v = obj_locations[1]

        screen_features = np.insert(obj_locations, 2, [h_speed, v_speed])

        #Check whether we are already saving data
        if infering:

            prediction_probs = nn.predict([screen_features])[0]
            prediction = np.argmax(prediction_probs)

            if prediction == 0:
                print(prediction_probs, "Nothing")
                ReleaseKey(0x48)
                ReleaseKey(0x50)
            elif prediction == 1:
                print(prediction_probs, "Up")
                ReleaseKey(0x50)
                PressKey(0x48)
            elif prediction == 2:
                print(prediction_probs, "Down")
                ReleaseKey(0x48)
                PressKey(0x50)

        elif key_pressed > 0:
            print("Infering")
            infering = True
Exemple #5
0
        #check if an alpha was found, before check the accrued gradients' norm
        if (testConvergence and proxy.getAccruedGradientsNorm() < gtol):
            print "converged!!"
            break

        step += 1

    print 'steps', step
    print 'process time:', time.time() - startTime

    encoded_x, shape_x, encoded_y, shape_y = proxy.getAllData()
    X = np.frombuffer(base64.decodestring(encoded_x),
                      dtype=np.float64).reshape(shape_x)
    y = np.frombuffer(base64.decodestring(encoded_y),
                      dtype=np.int).reshape(shape_y)

    nn = NeuralNetwork(neuralNetLayers)
    encoded_params = proxy.getParameters()
    params = np.frombuffer(base64.decodestring(encoded_params),
                           dtype=np.float64)
    print 'Function cost:', nn.cost(params, X, y)

    nn.set_weights(params)
    correct = 0
    for i, e in enumerate(X):
        #print(e,nn.predict(e))
        prediction = list(nn.predict(e))
        #print "Label: ",y[i]," | Predictions: ",prediction
        if prediction.index(max(prediction)) == np.argmax(y[i]):
            correct += 1
    print "Correct: ", correct, "/", i, "(", float(correct) / float(i), "%)"