Ejemplo n.º 1
0
def finding_best_net():
    best_net = None
    result = {}
    best_val = -1

    hidden_sizes = [40, 60, 80, 100, 120]
    lrs = [1e-4, 5e-4, 1e-3, 5e-3]
    regs = [1e-6, 5e-6, 1e-5, 5e-5]
    lr_decays = [0.95, 0.99, 0.999]

    input_size = 32 * 32 * 3
    num_classes = 10

    nets = {}
    i = 0
    grid_search = [(x, y, z) for x in lrs for y in regs for z in lr_decays]

    for hidden_size in hidden_sizes:
        for lr, reg, lr_decay in grid_search:
            print('hidden {} -- lr {} -- reg {} -- lr_decay {}'.format(
                hidden_size, lr, reg, lr_decay))
            print('Done {:2} out of {}'.format(
                i + 1,
                len(grid_search) * len(hidden_sizes)))
            net = TwoLayerNet(input_size, hidden_size, num_classes)
            # Training
            net.train(x_train,
                      y_train,
                      x_val,
                      y_val,
                      num_iters=2000,
                      lr=lr,
                      lr_decay=lr_decay,
                      reg=reg,
                      verbose=False)
            # Predict
            y_train_pred = net.predict(x_train)
            y_val_pred = net.predict(x_val)
            # Scoring
            train_accu = np.mean(y_train_pred == y_train)
            val_accu = np.mean(y_val_pred == y_val)
            # Store results
            result[(hidden_size, lr, reg, lr_decay)] = (train_accu, val_accu)
            nets[(hidden_size, lr, reg, lr_decay)] = net

            if val_accu > best_val:
                best_val = val_accu
                best_net = net
            i += 1
Ejemplo n.º 2
0
class NeuralNetwork():
    def __init__(self):
        self.input_size = 28 * 28
        self.hidden_size = 50
        self.num_classes = 10
        self.net = TwoLayerNet(self.input_size, self.hidden_size,
                               self.num_classes)
        self.fit()

    def fit(self):
        minst = tf.keras.datasets.mnist
        (X_train, y_train), (X_test, y_test) = minst.load_data()
        num_train = 30000
        num_val = 1000

        # Train set
        mask = list(range(num_train, num_train + num_val))
        X_val = X_train[mask]
        y_val = y_train[mask]

        mask = list(range(num_train))
        X_train = X_train[mask]
        y_train = y_train[mask]

        # Preprocessing: reshape the images data into rows
        X_train = np.reshape(X_train, (X_train.shape[0], -1))
        X_val = np.reshape(X_val, (X_val.shape[0], -1))
        stats = self.net.train(X_train,
                               y_train,
                               X_val,
                               y_val,
                               num_iters=1500,
                               batch_size=200,
                               learning_rate=1e-4,
                               learning_rate_decay=0.95,
                               reg=0.25,
                               verbose=True)
        # val_acc = (self.net.predict(X_val) == y_val).mean()
        # print('Validation accuracy: ', val_acc)
        # plt.subplot(2, 1, 1)
        # plt.plot(stats['loss_history'])
        # plt.title('Loss history')
        # plt.xlabel('Iteration')
        # plt.ylabel('Loss')
        #
        # plt.subplot(2, 1, 2)
        # plt.plot(stats['train_acc_history'], label='train')
        # plt.plot(stats['val_acc_history'], label='val')
        # plt.title('Classification accuracy history')
        # plt.xlabel('Epoch')
        # plt.ylabel('Clasification accuracy')
        # plt.show()

    def predict(self, x):
        return self.net.predict(x)
Ejemplo n.º 3
0
def main():
    x_train = ucihar.load_data('train', 'X')
    y_train = ucihar.load_data('train', 'y')

    x_test = ucihar.load_data('test', 'X')
    y_test = ucihar.load_data('test', 'y')

    net = TwoLayerNet(input_size=ucihar.X_DIMMENSIONS,
                      hidden_size=50,
                      output_size=ucihar.Y_CLASSES)

    stats = net.train(x_train,
                      y_train,
                      x_test,
                      y_test,
                      num_iters=100000,
                      batch_size=64,
                      learning_rate=1e-2,
                      learning_rate_decay=1.)

    predictions = net.predict(x_test)
    val_acc = (predictions == y_test).mean()
    print('Validation accuracy: ', val_acc)

    try:
        from sklearn.metrics import classification_report, confusion_matrix
        print(confusion_matrix(y_test, predictions))
        print(classification_report(y_test, predictions))
    except ImportError:
        pass

    try:
        import matplotlib.pyplot as plt
        # Plot the loss function and train / validation accuracies
        plt.subplot(1, 2, 1)
        plt.plot(stats['loss_history'])
        plt.title('Loss history')
        plt.xlabel('Iteration')
        plt.ylabel('Loss')

        plt.subplot(1, 2, 2)
        train_plot, = plt.plot(stats['train_acc_history'], label='train')
        val_plot, = plt.plot(stats['val_acc_history'], label='val')
        plt.legend(handles=[train_plot, val_plot])
        plt.title('Classification accuracy history')
        plt.xlabel('Epoch')
        plt.ylabel('Clasification accuracy')
        plt.show()
    except ImportError:
        pass
Ejemplo n.º 4
0
# batch = [40, 50, 60, 70, 80]
batch = [50]
bestValAcc = 0
bestNN = None
bestTrain = None
bestParams = []

for hidden in hidden_size:
    for eta in learningRates:
        for r in regularization:
            for t in iteration:
                for b in batch:
                    net = TwoLayerNet(input_size, hidden, NUM_CLASS)
                    train = net.train(X_train, y_train, X_train, y_train,\
                        num_iters=t, batch_size=b, learning_rate=eta, learning_rate_decay=0.95, reg=r, verbose=False)
                    y_train_pred = net.predict(X_train)
                    y_val_pred = net.predict(X_val)
                    trainAcc = np.mean(y_train == y_train_pred)
                    valAcc = np.mean(y_val == y_val_pred)
                    print 'learning rate: %e reg: %.2f iteration: %d batch: %d train accuracy: %.4f val accuracy: %.4f'\
                    % (eta, r, t, b, trainAcc, valAcc)
                    if valAcc > bestValAcc:
                        bestValAcc = valAcc
                        bestNN = net
                        bestTrain = train
                        bestParams = [hidden, eta, r, t, b]

print 'best validation accuracy achieved: %.4f' % bestValAcc
print bestParams
f1 = open('./res.txt', 'w+')
f1.write('best validation accuracy achieved: %.4f' % bestValAcc)
Ejemplo n.º 5
0
# In[ ]:

input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)

# Train the network
stats = net.train(X_train, y_train, X_val, y_val,
            num_epochs=30, batch_size=500,
            learning_rate=1e-4, learning_rate_decay=0.9,
            reg=0.2, verbose=True)

# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
print 'Validation accuracy: ', val_acc

# # Debug the training
# With the default parameters we provided above, you should get a validation accuracy of about 0.29 on the validation set. This isn't very good.
#
# One strategy for getting insight into what's wrong is to plot the loss function and the accuracies on the training and validation sets during optimization.
#
# Another strategy is to visualize the weights that were learned in the first layer of the network. In most neural networks trained on visual data, the first layer weights typically show some visible structure when visualized.

# In[ ]:

# Plot the loss function and train / validation accuracies
if False:
    plt.subplot(2, 1, 1)
    plt.plot(stats['loss_history'])
Ejemplo n.º 6
0
learning_rates = [1e-3]
regularization_strengths = [7e-6]
learning_history = {}
for lr in learning_rates:
    for reg in regularization_strengths:
        nn = TwoLayerNet(input_size, hidden_size, num_classes)
        report = nn.train(X_train,
                          y_train,
                          X_val,
                          y_val,
                          num_iters=num_iters,
                          batch_size=200,
                          learning_rate=lr,
                          learning_rate_decay=0.98,
                          reg=reg)
        train_acc = np.mean(nn.predict(X_train) == y_train)
        val_acc = np.mean(nn.predict(X_val) == y_val)
        learning_history[(lr, reg)] = (train_acc, val_acc)
        if val_acc > best_val:
            best_val = val_acc
            best_nn = nn

for lr, reg in sorted(learning_history):
    train_acc, val_acc = learning_history[(lr, reg)]
    print('lr %e reg %e train accuracy: %f val accuracy: %f' %
          (lr, reg, train_acc, val_acc))

# print loss curve
plt.subplot(2, 1, 1)
plt.plot(report['loss_history'])
plt.title('Loss history')
Ejemplo n.º 7
0
# Invoke the above function to get our data.
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
print 'Train data shape: ', X_train.shape
print 'Train labels shape: ', y_train.shape
print 'Validation data shape: ', X_val.shape
print 'Validation labels shape: ', y_val.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape

input_size = 32 * 32 * 3
hidden_size = 60
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)

t_start = time.time()
# Train the network
stats = net.train(X_train, y_train, X_val, y_val,
            num_iters=10000, batch_size=200,
            learning_rate=1e-3, learning_rate_decay=0.95,
            reg=0.1, verbose=True)
t_end = time.time()

train_acc = (net.predict(X_train) == y_train).mean()
print 'Training accuracy: ', train_acc
val_acc = (net.predict(X_val) == y_val).mean()
print 'Validation accuracy: ', val_acc
test_acc = (net.predict(X_test) == y_test).mean()
print 'Test accuracy: ', test_acc
time_taken = t_end - t_start
print 'Training time (in mins): ', time_taken/60
Ejemplo n.º 8
0
net = TwoLayerNet(input_size, hidden_size, num_classes)

# Train the network
stats = net.train(X_train,
                  y_train,
                  X_val,
                  y_val,
                  num_iters=1000,
                  batch_size=200,
                  learning_rate=1e-4,
                  learning_rate_decay=0.95,
                  reg=0.25,
                  verbose=True)

# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
print('Validation accuracy: ', val_acc)

##
#  Plot the loss function and train / validation accuracies
plt.subplot(2, 1, 1)
plt.plot(stats['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')

plt.subplot(2, 1, 2)
l1 = plt.plot(stats['train_acc_history'], label='train')
l2 = plt.plot(stats['val_acc_history'], label='val')
#plt.legend(handles=[l1, l2])
plt.title('Classification accuracy history')
Ejemplo n.º 9
0
    # X_train = X_train[:-10000:, :]
    # y_train = y_train[:-10000:]

    for lr in learning_rates:
        for reg in regularization_strengths:
            svm = TwoLayerNet(X_train.shape[1], 500, 10)
            svm.train(X_train,
                      y_train,
                      X_val,
                      y_val,
                      learning_rate=lr,
                      reg=reg,
                      num_iters=1000,
                      batch_size=2000,
                      verbose=True)
            y_train_pred = svm.predict(X_train)
            accuracy_train = np.mean(y_train == y_train_pred)
            y_val_pred = svm.predict(X_val)
            accuracy_val = np.mean(y_val == y_val_pred)

            results[(lr, reg)] = (accuracy_train, accuracy_val)

            if best_val < accuracy_val:
                best_val = accuracy_val
                best_svm = svm

    for lr, reg in sorted(results):
        accuracy_train, accuracy_val = results[(lr, reg)]
        print('lr %e reg %e train accuracy: %f val accuracy: %f' %
              (lr, reg, accuracy_train, accuracy_val))
Ejemplo n.º 10
0
    x_val -= mean_img

    return x_train, y_train, x_val, y_val, x_test, y_test


x_train, y_train, x_val, y_val, x_test, y_test = load_CIFAR10_data()

input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)

# Training the network using SGD
stats = net.train(x_train, y_train, x_val, y_val, num_iters=1200, verbose=True)

val_acc = np.mean(net.predict(x_val) == y_val)
print("Validation accuracy: ", val_acc)  # 44.5%

test_acc = (net.predict(x_test) == y_test).mean()
print('Test accuracy: ', test_acc)


# Plot the loss function and train / validation accuracies
def visualize_loss_acc(stats):
    plt.subplot(2, 1, 1)
    plt.plot(stats['loss_his'])
    plt.title('Loss history')
    plt.xlabel('Iteration')
    plt.ylabel('Loss')

    plt.subplot(2, 1, 2)