Esempio n. 1
0
def test_neuralNetwork_fit_adam():
    np.random.seed(2019)
    X = np.random.normal(size=(1, 500))
    target = 3.9285985 * X

    nn = NeuralNetwork(inputs=1,
                       neurons=3,
                       outputs=1,
                       activations='tanh',
                       silent=True)
    nn.addLayer()
    nn.addLayer()
    nn.addOutputLayer(activations='identity')
    nn.fit(X,
           target,
           shuffle=True,
           batch_size=100,
           validation_fraction=0.2,
           learning_rate=0.05,
           verbose=True,
           silent=False,
           epochs=100,
           optimizer='adam')
    loss = nn.loss
    nn.fit(X,
           target,
           shuffle=True,
           batch_size=100,
           validation_fraction=0.2,
           learning_rate=0.05,
           verbose=True,
           silent=False,
           epochs=100,
           optimizer='adam')

    assert loss > nn.loss
Esempio n. 2
0
def test_neuralNetwork_fit_sgd():
    np.random.seed(2019)
    X = np.random.normal(size=(1, 500))
    target = 3.9285985 * X

    nn = NeuralNetwork(inputs=1,
                       neurons=3,
                       outputs=1,
                       activations='sigmoid',
                       silent=True)
    nn.addLayer()
    nn.addLayer()
    nn.addOutputLayer(activations='identity')
    nn.fit(X,
           target,
           shuffle=True,
           batch_size=100,
           validation_fraction=0.2,
           learning_rate=0.05,
           verbose=False,
           silent=True,
           epochs=100)

    loss_after_100 = nn.loss
    nn.fit(X,
           target,
           shuffle=True,
           batch_size=100,
           validation_fraction=0.2,
           learning_rate=0.05,
           verbose=False,
           silent=True,
           epochs=100)
    loss_after_200 = nn.loss

    assert loss_after_200 < loss_after_100
Esempio n. 3
0
for f in files:
    d = np.loadtxt(path + "/" + f)
    x.append(d)
    y_name = f.split("_")[0]
    y.append(wordList[y_name])
# print np.array(x), np.array(y)
x_data = np.array(x)
y_data = np.array(y)
l = LabelBinarizer()
y_data = l.fit_transform(y_data)
result = l.classes_
pickle.dump(result, open('result.pkl', 'wb'))
# x_train, x_test, y_train, y_test = train_test_split(x_data, y_data)
# labels_train = LabelBinarizer().fit_transform(y_train)
# labels_test = LabelBinarizer().fit_transform(y_test)

# print labels_test

nn = NeuralNetwork([960, 1500, 3], "logistic")
print "start"
nn.fit(x_data, y_data, epochs=1000)
pickle.dump(nn, open('nn.pkl', 'wb'))
predictions = []
for i in range(x_data.shape[0]):
    o = nn.predict(x_data[i])
    d = result[np.argmax(o)]
    predictions.append(d)

for i in predictions:
    print i
Esempio n. 4
0
for f in files:
    d = np.loadtxt(path + "/" + f)
    x.append(d)
    y_name = f.split("_")[0]
    y.append(wordList[y_name])
# print np.array(x), np.array(y)
x_data = np.array(x)
y_data = np.array(y)
l = LabelBinarizer()
y_data = l.fit_transform(y_data)
result = l.classes_
pickle.dump(result, open('result.pkl', 'wb'))
# x_train, x_test, y_train, y_test = train_test_split(x_data, y_data)
# labels_train = LabelBinarizer().fit_transform(y_train)
# labels_test = LabelBinarizer().fit_transform(y_test)

# print labels_test

nn = NeuralNetwork([960, 1500, 3], "logistic")
print "start"
nn.fit(x_data, y_data, epochs=1000)
pickle.dump(nn, open('nn.pkl', 'wb'))
predictions = []
for i in range(x_data.shape[0]):
    o = nn.predict(x_data[i])
    d = result[np.argmax(o)]
    predictions.append(d)

for i in predictions:
    print i
import numpy as np
from sklearn.datasets import load_digits
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from neuralNetwork import NeuralNetwork

digits = load_digits()
X = digits.data
y = digits.target

# preprocessing
X -= X.min()
X /= X.max()

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
y_train = LabelBinarizer().fit_transform(y_train)

nn = NeuralNetwork([64, 100, 10], 'logistic')
nn.fit(X_train, y_train, epochs=3000)

predictions = nn.predict(X_test)
predictions = np.array(predictions)

print(confusion_matrix(y_test, predictions))
print(classification_report(y_test, predictions))
Esempio n. 6
0
def train_net_predict_energy(L=10, N=5000):
    ising = Ising(L, N)
    X, y = ising.generateTrainingData1D()
    y /= L
    n_samples, n_features = X.shape

    nn = NeuralNetwork(inputs=L,
                       neurons=L * L,
                       outputs=1,
                       activations='sigmoid',
                       cost='mse',
                       silent=False)
    nn.addLayer(neurons=L * L)
    nn.addLayer(neurons=L * L)
    nn.addOutputLayer(activations='identity')

    validation_skip = 10
    epochs = 1000
    nn.fit(X.T,
           y,
           shuffle=True,
           batch_size=1000,
           validation_fraction=0.2,
           learning_rate=0.001,
           verbose=False,
           silent=False,
           epochs=epochs,
           validation_skip=validation_skip,
           optimizer='adam')

    # Use the net to predict the energies for the validation set.
    x_validation = nn.x_validation
    y_validation = nn.predict(x_validation)
    target_validation = nn.target_validation

    # Sort the targets for better visualization of the network output.
    ind = np.argsort(target_validation)
    y_validation = np.squeeze(y_validation.T[ind])
    target_validation = np.squeeze(target_validation.T[ind])

    # We dont want to plot the discontinuities in the target.
    target_validation[np.where(
        np.abs(np.diff(target_validation)) > 1e-5)] = np.nan

    plt.rc('text', usetex=True)
    plt.figure()
    plt.plot(target_validation, 'k--', label=r'Target')
    plt.plot(y_validation, 'r.', markersize=0.5, label=r'NN output')
    plt.legend(fontsize=10)
    plt.xlabel(r'Validation sample', fontsize=10)
    plt.ylabel(r'$E / L$', fontsize=10)
    #plt.savefig(os.path.join(os.path.dirname(__file__), 'figures', 'nn_1d_energy_predict' + str(L) + '.png'), transparent=True, bbox_inches='tight')

    # Plot the training / validation loss during training.
    training_loss = nn.training_loss
    validation_loss = nn.validation_loss

    # There are more training loss values than validation loss values, lets
    # align them so the plot makes sense.
    xaxis_validation_loss = np.zeros_like(validation_loss)
    xaxis_validation_loss[0] = 0
    xaxis_validation_loss[1:-1] = np.arange(validation_skip,
                                            len(training_loss),
                                            validation_skip)
    xaxis_validation_loss[-1] = len(training_loss)

    plt.figure()
    plt.semilogy(training_loss, 'r-', label=r'Training loss')
    plt.semilogy(xaxis_validation_loss,
                 validation_loss,
                 'k--',
                 label=r'Validation loss')
    plt.legend(fontsize=10)
    plt.xlabel(r'Epoch', fontsize=10)
    plt.ylabel(r'Cost $C(\theta)$', fontsize=10)
    #plt.savefig(os.path.join(os.path.dirname(__file__), 'figures', 'nn_1d_loss' + str(L) + '.png'), transparent=True, bbox_inches='tight')
    plt.show()
Esempio n. 7
0
def R2_versus_lasso():
    L = 3
    N = 10000
    training_fraction = 0.4
    ising = Ising(L, N)
    D, ry = ising.generateDesignMatrix1D()
    X, y = ising.generateTrainingData1D()
    y /= L

    D_train = D[int(training_fraction * N):, :]
    ry_train = ry[int(training_fraction * N):]
    D_validation = D[:int(training_fraction * N), :]
    ry_validation = ry[:int(training_fraction * N)]

    lasso = LeastSquares(method='lasso', backend='skl')
    lasso.setLambda(1e-2)
    lasso.fit(D_train, ry_train)
    lasso.y = ry_validation
    lasso_R2 = sklearn.metrics.mean_squared_error(
        ry_validation / L,
        lasso.predict(D_validation) / L)

    n_samples, n_features = X.shape

    nn = NeuralNetwork(inputs=L * L,
                       neurons=L,
                       outputs=1,
                       activations='identity',
                       cost='mse',
                       silent=False)
    nn.addLayer(neurons=1)
    nn.addOutputLayer(activations='identity')

    validation_skip = 100
    epochs = 50000
    nn.fit(D.T,
           ry,
           shuffle=True,
           batch_size=2000,
           validation_fraction=1 - training_fraction,
           learning_rate=0.0001,
           verbose=False,
           silent=False,
           epochs=epochs,
           validation_skip=validation_skip,
           optimizer='adam')

    plt.rc('text', usetex=True)
    validation_loss = nn.validation_loss_improving
    validation_ep = np.linspace(0, epochs, len(nn.validation_loss_improving))
    plt.semilogy(validation_ep, validation_loss, 'r-', label=r'NN')
    plt.semilogy([0, epochs],
                 np.array([lasso_R2, lasso_R2]),
                 'k--',
                 label=r'Lasso')
    plt.xlabel(r'Epoch', fontsize=10)
    plt.ylabel(r'Mean squared error', fontsize=10)
    plt.legend(fontsize=10)
    plt.xlim((0, epochs))
    ax = plt.gca()
    ymin, ymax = ax.get_ylim()
    if ymin > pow(10, -5):
        ymin = pow(10, -5)
    #plt.ylim((ymin,ymax))
    plt.savefig(os.path.join(os.path.dirname(__file__), 'figures',
                             'NN_compare_lasso.png'),
                transparent=True,
                bbox_inches='tight')