Exemplo n.º 1
0
def grid_custom(diz):
    import itertools

    l = []
    lab = [i for i in diz]

    for i in diz:
        l.append(diz[i])

    lines = []
    for i in list(itertools.product(*l)):
        p = dict(zip(lab, list(i)))
        hidden_size = p['hidden_size']
        del p['hidden_size']

        net = TwoLayerNet(input_size, hidden_size, num_classes)

        # Train the network
        stats = net.train(X_train, y_train, X_val, y_val, **p)

        # Predict on the validation set
        val_loss = net.loss(X_val, y_val)[0]
        val_acc = (net.predict(X_val) == y_val).mean()
        train_loss = net.loss(X_train, y_train)[0]
        train_acc = (net.predict(X_train) == y_train).mean()

        del p['verbose']
        line = []
        line.append(hidden_size)
        for val in p:
            line.append(p[val])

        line.append(train_loss)
        line.append(val_loss)
        line.append(train_acc)
        line.append(val_acc)

        lines.append(line)
        print('params', line)
        print('train acc: ', train_acc)
        print('test acc: ', val_acc)
        pass

    df = pd.DataFrame(lines, columns=['hidden_size', 'num_iters', 'batch_size',\
                               'learning_rate', 'reg', 'train_loss', 'val_loss',\
                                'train_acc', 'val_acc'])
    df.sort_values(by=['val_acc'], ascending=False).to_csv('results_twolayernet.csv', index=False)
    return 
Exemplo n.º 2
0
def init_toy_model():
    np.random.seed(0)
    return TwoLayerNet(input_size, hidden_size, num_classes, std=1e-1)
Exemplo n.º 3
0
plt.imshow(
    visualize_grid(X_train[:100, :].reshape(100, 32, 32, 3),
                   padding=3).astype('uint8'))
plt.gca().axis('off')
plt.show()

# Train a network
# To train our network we will use SGD. In addition, we will
# adjust the learning rate with an exponential learning rate schedule as
# optimization proceeds; after each epoch, we will reduce the learning rate by
# multiplying it by a decay rate.

input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)
# Train the network
stats = net.train(X_train,
                  y_train,
                  X_val,
                  y_val,
                  num_iters=1000,
                  batch_size=200,
                  learning_rate=1e-4,
                  learning_rate_decay=0.95,
                  reg=0.25,
                  verbose=True)

# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
print('Validation accuracy: ', val_acc)
Exemplo n.º 4
0
# **Approximate results**. You should be aim to achieve a classification
# accuracy of greater than 48% on the validation set. Our best network gets
# over 52% on the validation set.
#
# **Experiment**: You goal in this exercise is to get as good of a result on
# CIFAR-10 as you can (52% could serve as a reference), with a fully-connected
# Neural Network. Feel free implement your own techniques (e.g. PCA to reduce
# dimensionality, or adding dropout, or adding features to the solver, etc.).

# **Explain your hyperparameter tuning process in the report.**

input_size = 32 * 32 * 3
hidden_size = 80
num_classes = 10
# store the best model into this
best_net = TwoLayerNet(input_size, hidden_size, num_classes)
stats = best_net.train(X_train,
                       y_train,
                       X_val,
                       y_val,
                       num_iters=1000,
                       batch_size=250,
                       learning_rate=0.001,
                       learning_rate_decay=0.99,
                       reg=0.12,
                       verbose=True)
# # Predict on the validation set
val_acc = (best_net.predict(X_val) == y_val).mean()
print('Validation accuracy: ', val_acc)

#Plot the loss function and train / validation accuraciess
Exemplo n.º 5
0
plt.imshow(
    visualize_grid(X_train[:100, :].reshape(100, 32, 32, 3),
                   padding=3).astype('uint8'))
plt.gca().axis('off')
plt.show()

# Train a network
# To train our network we will use SGD. In addition, we will
# adjust the learning rate with an exponential learning rate schedule as
# optimization proceeds; after each epoch, we will reduce the learning rate by
# multiplying it by a decay rate.

input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)
# Train the network
stats = net.train(X_train,
                  y_train,
                  X_val,
                  y_val,
                  num_iters=1000,
                  batch_size=200,
                  learning_rate=1e-4,
                  learning_rate_decay=0.95,
                  reg=0.25,
                  verbose=True)

# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
print('Validation accuracy: ', val_acc)
Exemplo n.º 6
0
plt.imshow(
    visualize_grid(X_train[:100, :].reshape(100, 32, 32, 3),
                   padding=3).astype('uint8'))
plt.gca().axis('off')
plt.show()

# Train a network
# To train our network we will use SGD. In addition, we will
# adjust the learning rate with an exponential learning rate schedule as
# optimization proceeds; after each epoch, we will reduce the learning rate by
# multiplying it by a decay rate.

input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)
# Train the network
stats = net.train(X_train,
                  y_train,
                  X_val,
                  y_val,
                  num_iters=1000,
                  batch_size=200,
                  learning_rate=1e-4,
                  learning_rate_decay=0.95,
                  reg=0.25,
                  verbose=True)

# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
print('Validation accuracy: ', val_acc)
Exemplo n.º 7
0
plt.imshow(
    visualize_grid(X_train[:100, :].reshape(100, 32, 32, 3),
                   padding=3).astype('uint8'))
plt.gca().axis('off')
plt.show()

# Train a network
# To train our network we will use SGD. In addition, we will
# adjust the learning rate with an exponential learning rate schedule as
# optimization proceeds; after each epoch, we will reduce the learning rate by
# multiplying it by a decay rate.

input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)
# Train the network
stats = net.train(X_train,
                  y_train,
                  X_val,
                  y_val,
                  num_iters=1000,
                  batch_size=200,
                  learning_rate=1e-4,
                  learning_rate_decay=0.95,
                  reg=0.25,
                  verbose=True)

# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
print('Validation accuracy: ', val_acc)