Esempio n. 1
0
    def test_predict(self, ):
        target = NeuralNetwork()
        target.addLayer(10, 10, lambda x: x**2)

        prediction = target.predict(
            tf.constant([1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
                        dtype=tf.dtypes.float32), [tf.zeros([10, 10])],
            [tf.zeros([10])])

        tf.debugging.assert_equal(prediction, tf.zeros(10))
Esempio n. 2
0
    def predict(self, data):
        nn = NeuralNetwork()
        l1 = Layer(56, 54)
        l2 = Layer(54, 25)

        nn.add(l1)
        nn.add(ActivationLayer(relu, relu_derivative))
        nn.add(l2)
        nn.add(ActivationLayer(sigmoid, sigmoid_derivative))

        l1.weights = np.load('weights1.npy')
        l2.weights = np.load('weights2.npy')

        l1.bias = np.load('bias1.npy')
        l2.bias = np.load('bias2.npy')

        out = nn.predict(data)
        pred = np.argmax(out)

        return pred
Esempio n. 3
0
for f in files:
    d = np.loadtxt(path + "/" + f)
    x.append(d)
    y_name = f.split("_")[0]
    y.append(wordList[y_name])
# print np.array(x), np.array(y)
x_data = np.array(x)
y_data = np.array(y)
l = LabelBinarizer()
y_data = l.fit_transform(y_data)
result = l.classes_
pickle.dump(result, open('result.pkl', 'wb'))
# x_train, x_test, y_train, y_test = train_test_split(x_data, y_data)
# labels_train = LabelBinarizer().fit_transform(y_train)
# labels_test = LabelBinarizer().fit_transform(y_test)

# print labels_test

nn = NeuralNetwork([960, 1500, 3], "logistic")
print "start"
nn.fit(x_data, y_data, epochs=1000)
pickle.dump(nn, open('nn.pkl', 'wb'))
predictions = []
for i in range(x_data.shape[0]):
    o = nn.predict(x_data[i])
    d = result[np.argmax(o)]
    predictions.append(d)

for i in predictions:
    print i
Esempio n. 4
0
for f in files:
    d = np.loadtxt(path + "/" + f)
    x.append(d)
    y_name = f.split("_")[0]
    y.append(wordList[y_name])
# print np.array(x), np.array(y)
x_data = np.array(x)
y_data = np.array(y)
l = LabelBinarizer()
y_data = l.fit_transform(y_data)
result = l.classes_
pickle.dump(result, open('result.pkl', 'wb'))
# x_train, x_test, y_train, y_test = train_test_split(x_data, y_data)
# labels_train = LabelBinarizer().fit_transform(y_train)
# labels_test = LabelBinarizer().fit_transform(y_test)

# print labels_test

nn = NeuralNetwork([960, 1500, 3], "logistic")
print "start"
nn.fit(x_data, y_data, epochs=1000)
pickle.dump(nn, open('nn.pkl', 'wb'))
predictions = []
for i in range(x_data.shape[0]):
    o = nn.predict(x_data[i])
    d = result[np.argmax(o)]
    predictions.append(d)

for i in predictions:
    print i
import numpy as np
from sklearn.datasets import load_digits
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from neuralNetwork import NeuralNetwork

digits = load_digits()
X = digits.data
y = digits.target

# preprocessing
X -= X.min()
X /= X.max()

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
y_train = LabelBinarizer().fit_transform(y_train)

nn = NeuralNetwork([64, 100, 10], 'logistic')
nn.fit(X_train, y_train, epochs=3000)

predictions = nn.predict(X_test)
predictions = np.array(predictions)

print(confusion_matrix(y_test, predictions))
print(classification_report(y_test, predictions))
Esempio n. 6
0
    theta = np.block([t.reshape(t.size, order='F') for t in Theta])
    sample_nn = NeuralNetwork(X_s, y_s, Theta, sample_dims)

    grads = sample_nn.cost_grad(theta)
    n_grads = sample_nn.cost_grad_numerical(theta)
    print('----------------------')
    print('Analytical | Numerical')
    print('----------------------')
    for g, n_g in zip(grads, n_grads):
        print('   {0: .4f} |   {1: .4f}'.format(g, n_g))
    diff = np.linalg.norm(n_grads - grads) / np.linalg.norm(n_grads + grads)
    print('Relative difference: {0}'.format(diff))

    print('========== Part 2.4: Regularized Neural Networks ==========')
    Theta = [data['Theta1'], data['Theta2']]
    theta = np.block([t.reshape(t.size, order='F') for t in Theta])
    nn.update_lambda(3)
    J = nn.cost(theta)
    print('Regularized cost: {0:0.6f} (expected: 0.576051)'.format(J))

    print('========== Part 2.5: Learning Parameters ==========')
    Theta = [
        nn.initialize_weights(layer_dims[i], layer_dims[i + 1], 0.12)
        for i in range(len(layer_dims) - 1)
    ]
    theta = np.block([t.reshape(t.size, order='F') for t in Theta])
    nn.update_lambda(1)
    J, theta = nn.optimize(theta)
    p = nn.predict(theta)
    print('Trained accuracy: {}'.format(np.mean(p == y) * 100))
Esempio n. 7
0
import numpy as np
from neuralNetwork import NeuralNetwork
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn import datasets

print('[INFO] Load MNIST dataset')
digits = datasets.load_digits()
data = digits.data.astype('float')
data = (data - data.min()) / (data.max() - data.min())
print('[INFO] Sample {}, dim {}'.format(data.shape[0], data.shape[1]))

#split train, test set
X_train, X_test, y_train, y_test = train_test_split(data,
                                                    digits.target,
                                                    test_size=0.25)

#convert label one-hot encoding
y_train = LabelBinarizer().fit_transform(y_train)
y_test = LabelBinarizer().fit_transform(y_test)

print('[INFO] Train neural network')
nn = NeuralNetwork([X_train.shape[1], 32, 16, 10])
print('[INFO] Neural network info {}'.format(nn))
nn.fit(X_train, y_train, epochs=1000)

print('[INFO] Evaluate')
predict = nn.predict(X_test)
predict = np.argmax(predict, axis=1)
print(classification_report(np.argmax(y_test, axis=1), predict))
Esempio n. 8
0
def train_net_predict_energy(L=10, N=5000):
    ising = Ising(L, N)
    X, y = ising.generateTrainingData1D()
    y /= L
    n_samples, n_features = X.shape

    nn = NeuralNetwork(inputs=L,
                       neurons=L * L,
                       outputs=1,
                       activations='sigmoid',
                       cost='mse',
                       silent=False)
    nn.addLayer(neurons=L * L)
    nn.addLayer(neurons=L * L)
    nn.addOutputLayer(activations='identity')

    validation_skip = 10
    epochs = 1000
    nn.fit(X.T,
           y,
           shuffle=True,
           batch_size=1000,
           validation_fraction=0.2,
           learning_rate=0.001,
           verbose=False,
           silent=False,
           epochs=epochs,
           validation_skip=validation_skip,
           optimizer='adam')

    # Use the net to predict the energies for the validation set.
    x_validation = nn.x_validation
    y_validation = nn.predict(x_validation)
    target_validation = nn.target_validation

    # Sort the targets for better visualization of the network output.
    ind = np.argsort(target_validation)
    y_validation = np.squeeze(y_validation.T[ind])
    target_validation = np.squeeze(target_validation.T[ind])

    # We dont want to plot the discontinuities in the target.
    target_validation[np.where(
        np.abs(np.diff(target_validation)) > 1e-5)] = np.nan

    plt.rc('text', usetex=True)
    plt.figure()
    plt.plot(target_validation, 'k--', label=r'Target')
    plt.plot(y_validation, 'r.', markersize=0.5, label=r'NN output')
    plt.legend(fontsize=10)
    plt.xlabel(r'Validation sample', fontsize=10)
    plt.ylabel(r'$E / L$', fontsize=10)
    #plt.savefig(os.path.join(os.path.dirname(__file__), 'figures', 'nn_1d_energy_predict' + str(L) + '.png'), transparent=True, bbox_inches='tight')

    # Plot the training / validation loss during training.
    training_loss = nn.training_loss
    validation_loss = nn.validation_loss

    # There are more training loss values than validation loss values, lets
    # align them so the plot makes sense.
    xaxis_validation_loss = np.zeros_like(validation_loss)
    xaxis_validation_loss[0] = 0
    xaxis_validation_loss[1:-1] = np.arange(validation_skip,
                                            len(training_loss),
                                            validation_skip)
    xaxis_validation_loss[-1] = len(training_loss)

    plt.figure()
    plt.semilogy(training_loss, 'r-', label=r'Training loss')
    plt.semilogy(xaxis_validation_loss,
                 validation_loss,
                 'k--',
                 label=r'Validation loss')
    plt.legend(fontsize=10)
    plt.xlabel(r'Epoch', fontsize=10)
    plt.ylabel(r'Cost $C(\theta)$', fontsize=10)
    #plt.savefig(os.path.join(os.path.dirname(__file__), 'figures', 'nn_1d_loss' + str(L) + '.png'), transparent=True, bbox_inches='tight')
    plt.show()