Пример #1
0
def init_toy_model():
    np.random.seed(0)
    input_size = 4
    hidden_size = 10
    num_classes = 3
    num_inputs = 5

    return TwoLayerNet(input_size, hidden_size, num_classes, std=1e-1)
Пример #2
0
    def getQoS(self):
        X_test, y_test = self._get_test_data()

        input_size = 32 * 32 * 3
        hidden_size = 110
        num_classes = 10

        net = TwoLayerNet(input_size, hidden_size, num_classes)
        try:
            net.params['W1'] = pickle.load(open(self.run_dir + "model_nn_w1.p",
                                                "rb"),
                                           encoding='latin1')
            net.params['b1'] = pickle.load(open(self.run_dir + "model_nn_b1.p",
                                                "rb"),
                                           encoding='latin1')
            net.params['W2'] = pickle.load(open(self.run_dir + "model_nn_w2.p",
                                                "rb"),
                                           encoding='latin1')
            net.params['b2'] = pickle.load(open(self.run_dir + "model_nn_b2.p",
                                                "rb"),
                                           encoding='latin1')

            test_accuracy = (net.predict(X_test) == y_test).mean()
        except:
            test_accuracy = 0.0
        print("qos", str(test_accuracy))
        return test_accuracy * 100.0
def NeuralNet(train_data, train_label, validation_data, validation_label,
              test_data, test_label):
    input_size = 32 * 32 * 3
    hidden_size = 50
    num_classes = 10
    net = TwoLayerNet(input_size, hidden_size, num_classes)

    print train_label.shape
    # Train the network
    stats = net.train(train_data,
                      train_label,
                      validation_data,
                      validation_label,
                      num_iters=4000,
                      batch_size=1500,
                      learning_rate=5e-3,
                      learning_rate_decay=0.96,
                      reg=2,
                      verbose=True,
                      method='adam')

    # Predict on the validation set
    val_acc = (net.predict(validation_data) == validation_label).mean()
    print('Validation accuracy: ', val_acc)

    # Plot the loss function and train / validation accuracies
    plt.subplot(2, 1, 1)
    plt.plot(stats['loss_history'])
    plt.title('Loss history')
    plt.xlabel('Iteration')
    plt.ylabel('Loss')

    plt.subplot(2, 1, 2)
    plt.plot(stats['train_acc_history'], label='train')
    plt.plot(stats['val_acc_history'], label='val')
    plt.title('Classification accuracy history')
    plt.xlabel('Epoch')
    plt.ylabel('Clasification accuracy')
    plt.legend()
    plt.show()

    show_net_weights(net)
Пример #4
0
def main(job_id, params):
    print('Training new neural net and calculating validation value job #%d' % job_id)
    np.random.seed(0)  # For repeatability.
    X_train, y_train, X_val, y_val, _, _ = get_CIFAR10_data()
    
    input_size = 32 * 32 * 3
    num_classes = 10
    hidden_size = params['hidden_size']
    lr = params['learning_rate']
    lrdc = params['learning_rate_decay']
    r = params['regularization']
    net = TwoLayerNet(input_size, hidden_size, num_classes) 
    stats = net.train(X_train, y_train, X_val, y_val,
                      num_iters=600, batch_size=200,
                      learning_rate=lr, learning_rate_decay=lrdc,
                      reg=r)

    val_acc = (net.predict(X_val) == y_val).mean()
    print('Validation accuracy: ', val_acc)
    
    # We wish to maximize the validation accuracy, so minimize the negative.
    return -val_acc
################################################################################
# TODO: Train a two-layer neural network on image features. You may want to    #
# cross-validate various parameters as in previous sections. Store your best   #
# model in the best_net variable.                                              #
################################################################################
#pass

learning_rates = [1]
regularization_strengths = [0, 0.25, 0.5, 1]
learning_rate_decay = [1, 0.95, 0.8]

for _l in learning_rates:
    for _r in regularization_strengths:
        for _d in learning_rate_decay:
            net = TwoLayerNet(input_dim, hidden_dim, num_classes)
            # Train the network
            stats = net.train(X_train_feats,
                              y_train,
                              X_val_feats,
                              y_val,
                              num_iters=1000,
                              batch_size=200,
                              learning_rate=_l,
                              learning_rate_decay=_d,
                              reg=_r,
                              verbose=True)

            # Predict on the validation set
            val_accuracy = (net.predict(X_val_feats) == y_val).mean()
            print(
Пример #6
0
#        plt.subplot(examples_per_class, len(classes), i * len(classes) + cls + 1)
#        plt.imshow(X_test[idx].astype('uint8'))
#        plt.axis('off')
#        if i == 0:
#            plt.title(cls_name)
#plt.show()

print X_train_feats.shape

from cs231n.classifiers.neural_net import TwoLayerNet

input_dim = X_train_feats.shape[1]
hidden_dim = 500
num_classes = 10

net = TwoLayerNet(input_dim, hidden_dim, num_classes)
best_net = None

################################################################################
# TODO: Train a two-layer neural network on image features. You may want to    #
# cross-validate various parameters as in previous sections. Store your best   #
# model in the best_net variable.                                              #
################################################################################
best_val = -1
input_size = 32 * 32 * 3
hidden_size = 50
learning_rates = [1e-1, 1]
regularization_strengths = [1e-3, 5e-3]
results = {}
for rate in learning_rates:
    for strength in regularization_strengths:
Пример #7
0
# Preprocessing: Remove the bias dimension
# Make sure to run this cell only ONCE
print(X_train_feats.shape)
X_train_feats = X_train_feats[:, :-1]
X_val_feats = X_val_feats[:, :-1]
X_test_feats = X_test_feats[:, :-1]

print(X_train_feats.shape)

from cs231n.classifiers.neural_net import TwoLayerNet

input_dim = X_train.shape[1]
hidden_dim = 600
num_classes = 10

net = TwoLayerNet(input_dim, hidden_dim, num_classes)
best_net = None

################################################################################
# TODO: Train a two-layer neural network on image features. You may want to    #
# cross-validate various parameters as in previous sections. Store your best   #
# model in the best_net variable.                                              #
################################################################################
# Train the network
stats = net.train(X_train,
                  y_train,
                  X_val,
                  y_val,
                  num_iters=1000,
                  batch_size=200,
                  learning_rate=5e-3,
Пример #8
0
#
# For completeness, we should also try training a neural network on image features. This approach should outperform all previous approaches: you should easily be able to achieve over 55% classification accuracy on the test set; our best model achieves about 60% classification accuracy.

# In[ ]:

print(X_train_feats.shape)

# In[ ]:

from cs231n.classifiers.neural_net import TwoLayerNet

input_dim = X_train_feats.shape[1]
hidden_dim = 500
num_classes = 10

net = TwoLayerNet(input_dim, hidden_dim, num_classes)
best_net = None

################################################################################
# TODO: Train a two-layer neural network on image features. You may want to    #
# cross-validate various parameters as in previous sections. Store your best   #
# model in the best_net variable.                                              #
################################################################################
pass
################################################################################
#                              END OF YOUR CODE                                #
################################################################################

# In[ ]:

# Run your neural net classifier on the test set. You should be able to
Пример #9
0
                    i * len(classes) + cls + 1)
        plt.imshow(X_test[idx].astype('uint8'))
        plt.axis('off')
        if i == 0:
            plt.title(cls_name)
plt.show()

print X_train_feats.shape

from cs231n.classifiers.neural_net import TwoLayerNet

input_dim = X_train_feats.shape[1]
hidden_dim = 500
num_classes = 10

net = TwoLayerNet(input_dim, hidden_dim, num_classes)
best_net = None

################################################################################
# Train a two-layer neural network on image features. You may want to          #
# cross-validate various parameters as in previous sections. Store your best   #
# model in the best_net variable.                                              #
################################################################################
results = {}
best_val = -1
best_static = None
learning_rates = [0.5, 1, 5]
regularization_strengths = [0.01, 0.001, 0.001]
num_iterations = 2000
for lr in learning_rates:
    for reg in regularization_strengths:
Пример #10
0
num_classes = 10
best_net = None
best_val_acc = 0
results = {}

learning_rates = [0.3, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.5]
hidden_sizes = [500]
regs = [1e-5, 1e-4, 1e-3, 1e-2]
num_iters = [1000, 2000, 5000]

for learnRate in learning_rates:
    for hiddenSize in hidden_sizes:
        for theReg in regs:
            for numIter in num_iters:

                net = TwoLayerNet(input_dim, hiddenSize, num_classes)

                # Train the network
                stats = net.train(X_train_feats,
                                  y_train,
                                  X_val_feats,
                                  y_val,
                                  num_iters=numIter,
                                  batch_size=200,
                                  learning_rate=learnRate,
                                  learning_rate_decay=0.95,
                                  reg=theReg,
                                  verbose=True)

                # Predict on the validation set
                val_acc = (net.predict(X_val_feats) == y_val).mean()
Пример #11
0
hidden_sizes = [80, 90, 100]
learning_rates = [1.8e-3, 2e-3, 2.2e-3]
regularization_strengths = [0.5, 0.6, 0.7, 0.8]
batch_sizes = [256, 512, 1024, 2048]

hidden_sizes = [100]
learning_rates = [2e-3]
regularization_strengths = [0.5]
batch_sizes = [1024]

for learning_rate in learning_rates:
    for reg in regularization_strengths:
        for hidden_size in hidden_sizes:
            for batch_size in batch_sizes:
                net = TwoLayerNet(input_size, hidden_size, num_classes)
                stats = net.train(X_train,
                                  y_train,
                                  X_val,
                                  y_val,
                                  num_iters=5000,
                                  batch_size=batch_size,
                                  learning_rate=learning_rate,
                                  learning_rate_decay=0.95,
                                  reg=reg,
                                  verbose=True)
                # training and validation set
                training_accuracy = stats['train_acc_history'][-1]
                print('training accuracy: %f' % (training_accuracy, ))
                validation_accuracy = stats['val_acc_history'][-1]
                print('validation accuracy: %f' % (validation_accuracy, ))
Пример #12
0
                    i * len(classes) + cls + 1)
        plt.imshow(X_test[idx].astype('uint8'))
        plt.axis('off')
        if i == 0:
            plt.title(cls_name)
plt.show()

print(X_train_feats.shape)

from cs231n.classifiers.neural_net import TwoLayerNet

input_dim = X_train_feats.shape[1]
hidden_dim = 500
num_classes = 10

net = TwoLayerNet(input_dim, hidden_dim, num_classes)
best_net = None

################################################################################
# TODO: Train a two-layer neural network on image features. You may want to    #
# cross-validate various parameters as in previous sections. Store your best   #
# model in the best_net variable.                                              #
################################################################################
workers = 10
best_accuracy = 0

learning_rates = [0.9]
numer_of_training_epochs = [4000]
reg = [0.002286, 0.002287, 0.002288, 0.002289, 0.00229]
#learning_rates = [10 ** np.random.uniform(-4, -2) for _ in range(workers)]
#numer_of_training_epochs = [random.randint(4000,5000) for _ in range(workers)]
Пример #13
0
print "Train labels shape: ", y_train.shape
print "Validation data shape: ", X_val.shape
print "Validation labels shape: ", y_val.shape
print "Test data shape: ", X_test.shape
print "Test labels shape: ", y_test.shape


# # Train a network
# To train our network we will use SGD with momentum. In addition, we will adjust the learning rate with an exponential learning rate schedule as optimization proceeds; after each epoch, we will reduce the learning rate by multiplying it by a decay rate.

# In[ ]:

input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)

# Train the network
stats = net.train(
    X_train,
    y_train,
    X_val,
    y_val,
    num_iters=1000,
    batch_size=200,
    learning_rate=1e-4,
    learning_rate_decay=0.95,
    reg=0.5,
    verbose=True,
)
Пример #14
0
hidden_size = [500]
results = {}
number_of_iters = [5000]
learning_rates = [0.5,
                  0.6]  # np.logspace(-10, 10, 8) #-10, -9, -8, -7, -6, -5, -4
regularization_strengths = [3e-3]
batch_sizes = [200]

for hid_size in hidden_size:
    hidden_size = hid_size
    for reg in regularization_strengths:
        for lr in learning_rates:
            for num_iter in number_of_iters:
                for bs in batch_sizes:
                    tic = time.time()
                    net = TwoLayerNet(input_size, hidden_size, num_classes)
                    print(
                        "hid_size %d / lr %.2E / reg %.2e / num_iter %d / batch_s %d"
                        % (hid_size, lr, reg, num_iter, bs))
                    training_results = net.train(X_train_feats,
                                                 y_train,
                                                 X_val_feats,
                                                 y_val,
                                                 num_iters=num_iter,
                                                 batch_size=bs,
                                                 learning_rate=lr,
                                                 learning_rate_decay=0.95,
                                                 reg=reg,
                                                 verbose=True)
                    val_acc = (net.predict(X_val_feats) == y_val).mean()
                    toc = time.time()
Пример #15
0
# Invoke the above function to get our data.
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)

#%%  Train a network, we will use SGD, and we will adjust the learning rate with an
# exponential learning rate schedule as optimization proceeds.

input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)

# Train the network
stats = net.train(X_train,
                  y_train,
                  X_val,
                  y_val,
                  num_iters=1000,
                  batch_size=200,
                  learning_rate=1e-4,
                  learning_rate_decay=0.95,
                  reg=0.25,
                  verbose=True)

# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
x_test_feats /= std_feat

# Preprocessing: Add a bias dimension
X_train_feats = np.hstack(
    [x_train_feats, np.ones((x_train_feats.shape[0], 1))])
X_val_feats = np.hstack([x_val_feats, np.ones((x_val_feats.shape[0], 1))])
X_test_feats = np.hstack([x_test_feats, np.ones((x_test_feats.shape[0], 1))])

print(x_train_feats.shape)
from cs231n.classifiers.neural_net import TwoLayerNet

input_dim = x_train_feats.shape[1]
hidden_dim = 500
num_classes = 10

net = TwoLayerNet(input_dim, hidden_dim, num_classes)
best_net = None
results = {}
best_val = -1
best_net = None

learning_rates = [1e-2, 1e-1, 5e-5, 1, 5]
regularization_strengths = [1e-3, 5e-3, 1e-2, 1e-1, 0.5, 1]

for lr in learning_rates:
    for reg in regularization_strengths:
        net = TwoLayerNet(input_dim, hidden_dim, num_classes)
        #Train the network
        stats = net.train(x_train_feats,
                          y_train,
                          x_val_feats,
Пример #17
0
# Ideas: PCA, Dropout, adding features

input_size = 32 * 32 * 3
num_classes = 10

lrates = [0.001]
regs = [0.02]
hidden_sizes = [100]

best_accuracy = 0
for lrate in lrates:
    for reg in regs:
        for hidden_size in hidden_sizes:
            # Train the network with the combination
            net = TwoLayerNet(input_size, hidden_size, num_classes)
            stats, test_net = net.train(X_train,
                                        y_train,
                                        X_val,
                                        y_val,
                                        num_iters=10000,
                                        batch_size=200,
                                        learning_rate=lrate,
                                        learning_rate_decay=.95,
                                        reg=reg,
                                        verbose=True)

            if stats['val_acc_history'][-1] > best_accuracy:
                best_net = test_net
                best_accuracy = stats['val_acc_history'][-1]
                best_loss = np.mean(stats["loss_history"][-10:-1])
Пример #18
0
def init_toy_model():
    np.random.seed(0)
    # 以0为种子对网络参数初始化,每次都会得到同样的W和d
    return TwoLayerNet(input_size, hidden_size, num_classes, std=1e-1)
Пример #19
0
input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10



best_val = -1
best_stats = None
learning_rates = [1e-4,2e-4,5e-4,8e-4]
regularization_strengths = [0.2,0.3,0.4]
results = {} 
iters = 2000 #100
for lr in learning_rates:
    for rs in regularization_strengths:
        net = TwoLayerNet(input_size, hidden_size, num_classes)

        # Train the network
        stats = net.train(X_train, y_train, X_val, y_val,num_iters=iters, batch_size=200,learning_rate=lr, learning_rate_decay=0.95,reg=rs)
        
        y_train_pred = net.predict(X_train)
        acc_train = np.mean(y_train == y_train_pred)
        y_val_pred = net.predict(X_val)
        acc_val = np.mean(y_val == y_val_pred)
        
        results[(lr, rs)] = (acc_train, acc_val)
        
        if best_val < acc_val:
            best_stats = stats
            best_val = acc_val
            best_net = net
# differences from the ones we saw above for the poorly tuned network.          #
#                                                                               #
# Tweaking hyperparameters by hand can be fun, but you might find it useful to  #
# write code to sweep through possible combinations of hyperparameters          #
# automatically like we did on the previous exercises.                          #
#################################################################################
results = {}
best_val = -1
learning_rates = [1e-3]
regularization_strengths = [0.05,0.1,0.5]
hidden_size = [50,100]

for lr in learning_rates:
    for reg in regularization_strengths:
        for hnum in hidden_size:
            net = TwoLayerNet(input_size, hnum, num_classes)
            stats = net.train(X_train, y_train, X_val, y_val,
                num_iters=1000, batch_size=200,
                learning_rate=lr, learning_rate_decay=0.95,
                reg=reg, verbose=True)
            y_train_pred = net.predict(X_train)
            y_val_pred = net.predict(X_val)
            tmp_train_accuracy=np.mean(y_train == y_train_pred)
            tmp_val_accuracy=np.mean(y_val == y_val_pred)
            results[(lr,reg,hnum)]=[tmp_train_accuracy,tmp_val_accuracy]
            if tmp_val_accuracy>best_val:
                best_val=tmp_val_accuracy
                best_net=copy.deepcopy(net)
#################################################################################
#                               END OF YOUR CODE                                #
#################################################################################
# cross-validate various parameters as in previous sections. Store your best   #
# model in the best_net variable.                                              #
################################################################################

learning_rates = [3e-1, 1]
regularization_strengths = [1e-4, 3e-4, 1e-3, 3e-3]

# learning_rates = np.logspace(-2, 1, 5)
# regularization_strengths = np.logspace(-5, -2, 5)

results = {}
best_val = -1   # The highest validation accuracy that we have seen so far.

for lr in learning_rates:
  for reg in regularization_strengths:
    net = TwoLayerNet(input_dim, hidden_dim, num_classes)

    # Train the network
    stats = net.train(X_train_feats, y_train, X_val_feats, y_val,
                num_iters=2001, batch_size=200,
                learning_rate=lr, learning_rate_decay=0.95,
                reg=reg, verbose=False)

    y_train_pred = net.predict(X_train_feats)
    training_accuracy = np.mean(y_train == y_train_pred)
    y_val_pred = net.predict(X_val_feats)
    validation_accuracy = np.mean(y_val == y_val_pred)
    results[(lr, reg)] = (training_accuracy, validation_accuracy)

    if best_val < validation_accuracy:
      best_val = validation_accuracy
                    i * len(classes) + cls + 1)
        plt.imshow(X_test[idx].astype('uint8'))
        plt.axis('off')
        if i == 0:
            plt.title(cls_name)
plt.show()

#%%

from cs231n.classifiers.neural_net import TwoLayerNet

input_dim = X_train_feats.shape[1]
hidden_dim = 550
num_classes = 10

net = TwoLayerNet(input_dim, hidden_dim, num_classes)
best_net = None

################################################################################
# Train a two-layer neural network on image features. You may want to    #
# cross-validate various parameters as in previous sections. Store your best   #
# model in the best_net variable.                                              #

learning_rates = [1, 8e-1, 6e-1]
regularization_strengths = [6e-5, 8e-4, 1e-3]

results = {}
best_val = -1
best_net = None

for lr in learning_rates:
Пример #23
0
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)

# # Train a network
# To train our network we will use SGD with momentum. In addition,
#we will adjust the learning rate with an exponential learning rate schedule
#as optimization proceeds; after each epoch, we will reduce the learning rate
#by multiplying it by a decay rate.

# In[ ]:

input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)

# Train the network
stats = net.train(X_train,
                  y_train,
                  X_val,
                  y_val,
                  num_iters=1000,
                  batch_size=200,
                  learning_rate=1e-4,
                  learning_rate_decay=0.95,
                  reg=0.25,
                  verbose=True)

# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()

# Invoke the above function to get our data.
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
print 'Train data shape: ', X_train.shape
print 'Train labels shape: ', y_train.shape
print 'Validation data shape: ', X_val.shape
print 'Validation labels shape: ', y_val.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape

#%%
input_size = 32 * 32 * 3
hidden_size = 110
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)

# Train the network, experiment with learning rates, regularization
stats = net.train(X_train, y_train, X_val, y_val,
            num_iters=2500, batch_size=200,
            learning_rate=5e-4, learning_rate_decay=0.95,
            reg=1, verbose=True)
            
# Predict on the validation set
val_acc = np.mean(net.predict(X_val) == y_val)
print 'Validation accuracy: ', val_acc

#%%

# Plot the loss function and train / validation accuracies
plt.subplot(2, 1, 1)
print 'Train labels shape: ', y_train.shape
print 'Validation data shape: ', X_val.shape
print 'Validation labels shape: ', y_val.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape


# # Train a network
# To train our network we will use SGD with momentum. In addition, we will adjust the learning rate with an exponential learning rate schedule as optimization proceeds; after each epoch, we will reduce the learning rate by multiplying it by a decay rate.

# In[11]:

input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)

# Train the network
stats = net.train(X_train, y_train, X_val, y_val,
            num_iters=1000, batch_size=200,
            learning_rate=1e-4, learning_rate_decay=0.95,
            reg=0.5, verbose=True)

# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
print 'Validation accuracy: ', val_acc



# # Debug the training
# With the default parameters we provided above, you should get a validation accuracy of about 0.29 on the validation set. This isn't very good.

# Invoke the above function to get our data.
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)


input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)

# Train the network
stats = net.train(X_train, y_train, X_val, y_val,num_iters=1000, batch_size=200,learning_rate=1e-4, learning_rate_decay=0.95,reg=0.25, verbose=True)
# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
print('Validation accuracy: ', val_acc)

# Plot the loss function and train / validation accuracies
plt.subplot(2, 1, 1)
plt.plot(stats['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')

plt.subplot(2, 1, 2)
Пример #27
0
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)

# # Train a network
# To train our network we will use SGD with momentum. In addition, we will
# adjust the learning rate with an exponential learning rate schedule as
# optimization proceeds; after each epoch, we will reduce the learning
# rate by multiplying it by a decay rate.

input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)

# Train the network
stats = net.train(X_train,
                  y_train,
                  X_val,
                  y_val,
                  num_iters=1000,
                  batch_size=200,
                  learning_rate=1e-3,
                  learning_rate_decay=0.95,
                  reg=0.25,
                  verbose=True)

# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
Пример #28
0
    X_val = X_val.reshape(num_validation, -1)
    X_test = X_test.reshape(num_test, -1)

    print('Train data shape:', X_train.shape)
    print('Train labels shape:', y_train.shape)
    print('Validation data shape:', X_val.shape)
    print('Validation labels shape:', y_val.shape)
    print('Test data shape:', X_test.shape)
    print('Test labels shape:', y_test.shape)

    #%%
    # train a nerwork
    input_size = 32*32*3
    hidden_size = 81
    num_classes = 10
    net = TwoLayerNet(input_size, hidden_size, num_classes)

    # Train the network
    stats = net.train(X_train, y_train, X_val, y_val,
                num_iters=1500, batch_size=200,
                learning_rate=7.5e-4, learning_rate_decay=0.95,
                reg=1.0, verbose=True)

    # Predict on the validation set
    val_acc = (net.predict(X_val) == y_val).mean()
    print('Validation accuracy: ', val_acc)

    #%%
    # Debug the training
    # plot the loss function and train/validation accuracies
Пример #29
0
# Invoke the above function to get our data.
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)



input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)

# Train the network
stats = net.train(X_train, y_train, X_val, y_val,
            num_iters=1000, batch_size=200,
            learning_rate=1e-4, learning_rate_decay=0.95,
            reg=0.25, verbose=True)

# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
print('Validation accuracy: ', val_acc)



# Plot the loss function and train / validation accuracies
plt.subplot(2, 1, 1)
Пример #30
0
#net = TwoLayerNet(input_size, hi_size, num_classes)
#%%
count = 0  # for tracking iterations, visualization, not required for model
#for hi_size in hidden_range:
for lr in learning_range:
    for reg in regularization_range:

        #n = 0
        # lr = learning_range[n]
        #reg = regularization_range[n]

        count += 1  # track how many times we have calculated the results
        print('Calculate %d out of %d' % (count, total_para_sets))

        net = TwoLayerNet(input_size, hi_size, num_classes)
        # Train the network
        stats = net.train(
            X_train_feats0,
            y_train,
            X_val_feats0,
            y_val,
            num_iters=1000,
            batch_size=
            400,  # increase the number of iteration, and batch_size will help to increase number of epoches
            # in the train(), iterations_per_epoch = max(num_train / batch_size, 1), increase batch_size
            # will decrease iterations_per_epoch, and increase number of epoches
            # iterations // iterations_per_epoch, increase the num_iter, will help increase number of epoches
            learning_rate=lr,
            learning_rate_decay=0.95,
            reg=reg,
Пример #31
0
def init_toy_model():
    np.random.seed(0)
    return TwoLayerNet(input_size, hidden_size, num_classes, std=1e-1)
Пример #32
0
    for reg_str in regularization_strengths:
        for train_circle in training_epoch:
            for lr_rate in learning_rate:
                for hid_size in hidden_size:
                    whole_time -= 1
                    print "now rest times is:", whole_time
                    """
                    print
                    print "now rest times is:", whole_time
                    print "now hidden size is: ", hid_size
                    print "now learning rate is: ", lr_rate
                    print "now training epoch is:", train_circle
                    print "now regualrization streagths is: ", reg_str
                    print "now batch size is: ", bat_size
                    """
                    net = TwoLayerNet(input_size, hid_size, num_classes)

                    now_stats = net.train(X_train,
                                          y_train,
                                          X_val,
                                          y_val,
                                          learning_rate=lr_rate,
                                          learning_rate_decay=0.95,
                                          reg=reg_str,
                                          num_iters=train_circle,
                                          batch_size=bat_size,
                                          verbose=False)

                    y_pred_val = net.predict(X_val)
                    acc_val = np.mean(y_pred_val == y_val)
Пример #33
0
print(X_train_feats.shape)

from cs231n.classifiers.neural_net import TwoLayerNet

input_dim = X_train_feats.shape[1]
hidden_dim = 500
num_classes = 10

best_net = None
best_val = -1
learning_rates = [9e-1,7e-1,5e-1,3e-1,1e-1,9e-2,7e-2,5e-2,3e-2]
regularization_strengths = [1e-3,3e-3,5e-3,7e-3,9e-3]

for lr in learning_rates:
    for reg in regularization_strengths:
        net = TwoLayerNet(input_dim, hidden_dim, num_classes)
        net.train(X_train_feats, y_train, X_val_feats, y_val,
                learning_rate=lr, learning_rate_decay=0.95,
                reg=reg, num_iters=6000, batch_size=200, verbose=False)
       
        train_accuracy = (net.predict(X_train_feats) == y_train).mean()
        val_accuracy = (net.predict(X_val_feats) == y_val).mean()
        print('lr %e reg %e train accuracy: %f val accuracy: %f' % (lr,reg,train_accuracy,val_accuracy))
        if val_accuracy > best_val:
            best_val = val_accuracy
            best_net = net

print('best validation accuracy achieved during cross-validation: %f' % best_val)

test_accuracy = (best_net.predict(X_test_feats) == y_test).mean()
print(test_accuracy)
def show_net_weights(net):
  W1 = net.params['W1']
  W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2)
  plt.imshow(visualize_grid(W1, padding=3).astype('uint8'))
  plt.gca().axis('off')
  plt.show()

#show_net_weights(net)

best_net = None
best_loss = float('inf')

rates = [0.1,0.01,0.001]

for rate in rates:
    net = TwoLayerNet(input_size, hidden_size, num_classes)

    # Train the network
    stats = net.train(X_train, y_train, X_val, y_val,
                num_iters=1000, batch_size=200,
                learning_rate=rate, learning_rate_decay=0.98,
                reg=0.3, verbose=False)

    loss_avg = np.mean(stats['loss_history'][-1])
    print "Rate: ", rate, " loss: ", loss_avg

    val_acc = (net.predict(X_val) == y_val).mean()
    print 'Validation accuracy: ', val_acc

    if val_acc > best_loss:
        best_net = net