示例#1
0
    def getQoS(self):
        X_test, y_test = self._get_test_data()

        input_size = 32 * 32 * 3
        hidden_size = 110
        num_classes = 10

        net = TwoLayerNet(input_size, hidden_size, num_classes)
        try:
            net.params['W1'] = pickle.load(open(self.run_dir + "model_nn_w1.p",
                                                "rb"),
                                           encoding='latin1')
            net.params['b1'] = pickle.load(open(self.run_dir + "model_nn_b1.p",
                                                "rb"),
                                           encoding='latin1')
            net.params['W2'] = pickle.load(open(self.run_dir + "model_nn_w2.p",
                                                "rb"),
                                           encoding='latin1')
            net.params['b2'] = pickle.load(open(self.run_dir + "model_nn_b2.p",
                                                "rb"),
                                           encoding='latin1')

            test_accuracy = (net.predict(X_test) == y_test).mean()
        except:
            test_accuracy = 0.0
        print("qos", str(test_accuracy))
        return test_accuracy * 100.0
def NeuralNet(train_data, train_label, validation_data, validation_label,
              test_data, test_label):
    input_size = 32 * 32 * 3
    hidden_size = 50
    num_classes = 10
    net = TwoLayerNet(input_size, hidden_size, num_classes)

    print train_label.shape
    # Train the network
    stats = net.train(train_data,
                      train_label,
                      validation_data,
                      validation_label,
                      num_iters=4000,
                      batch_size=1500,
                      learning_rate=5e-3,
                      learning_rate_decay=0.96,
                      reg=2,
                      verbose=True,
                      method='adam')

    # Predict on the validation set
    val_acc = (net.predict(validation_data) == validation_label).mean()
    print('Validation accuracy: ', val_acc)

    # Plot the loss function and train / validation accuracies
    plt.subplot(2, 1, 1)
    plt.plot(stats['loss_history'])
    plt.title('Loss history')
    plt.xlabel('Iteration')
    plt.ylabel('Loss')

    plt.subplot(2, 1, 2)
    plt.plot(stats['train_acc_history'], label='train')
    plt.plot(stats['val_acc_history'], label='val')
    plt.title('Classification accuracy history')
    plt.xlabel('Epoch')
    plt.ylabel('Clasification accuracy')
    plt.legend()
    plt.show()

    show_net_weights(net)
示例#3
0
def main(job_id, params):
    print('Training new neural net and calculating validation value job #%d' % job_id)
    np.random.seed(0)  # For repeatability.
    X_train, y_train, X_val, y_val, _, _ = get_CIFAR10_data()
    
    input_size = 32 * 32 * 3
    num_classes = 10
    hidden_size = params['hidden_size']
    lr = params['learning_rate']
    lrdc = params['learning_rate_decay']
    r = params['regularization']
    net = TwoLayerNet(input_size, hidden_size, num_classes) 
    stats = net.train(X_train, y_train, X_val, y_val,
                      num_iters=600, batch_size=200,
                      learning_rate=lr, learning_rate_decay=lrdc,
                      reg=r)

    val_acc = (net.predict(X_val) == y_val).mean()
    print('Validation accuracy: ', val_acc)
    
    # We wish to maximize the validation accuracy, so minimize the negative.
    return -val_acc
示例#4
0

best_val = -1
best_stats = None
learning_rates = [1e-4,2e-4,5e-4,8e-4]
regularization_strengths = [0.2,0.3,0.4]
results = {} 
iters = 2000 #100
for lr in learning_rates:
    for rs in regularization_strengths:
        net = TwoLayerNet(input_size, hidden_size, num_classes)

        # Train the network
        stats = net.train(X_train, y_train, X_val, y_val,num_iters=iters, batch_size=200,learning_rate=lr, learning_rate_decay=0.95,reg=rs)
        
        y_train_pred = net.predict(X_train)
        acc_train = np.mean(y_train == y_train_pred)
        y_val_pred = net.predict(X_val)
        acc_val = np.mean(y_val == y_val_pred)
        
        results[(lr, rs)] = (acc_train, acc_val)
        
        if best_val < acc_val:
            best_stats = stats
            best_val = acc_val
            best_net = net
            
# Print out results.
for lr, reg in sorted(results):
    train_accuracy, val_accuracy = results[(lr, reg)]
    print ('lr %e reg %e train accuracy: %f val accuracy: %f' % (lr, reg, train_accuracy, val_accuracy))
################################################################################
# TODO:                                                                        #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save    #
# the best trained classifer in best_svm. You might also want to play          #
# with different numbers of bins in the color histogram. If you are careful    #
# you should be able to get accuracy of near 0.44 on the validation set.       #
################################################################################
for lr in learning_rates:
    for reg in regularization_strengths:
        print (lr,reg)
        stats = net.train(X_train_feats, y_train, X_val_feats, y_val,
            num_iters=1000, batch_size=200,
            learning_rate=lr, learning_rate_decay=0.95,
            reg=reg, verbose=True)
        y_train_pred = net.predict(X_train_feats)
        y_val_pred = net.predict(X_val_feats)
        tmp_train_accuracy=np.mean(y_train == y_train_pred)
        tmp_val_accuracy=np.mean(y_val == y_val_pred)
        results[(lr,reg)]=[tmp_train_accuracy,tmp_val_accuracy]
        if tmp_val_accuracy>best_val:
            best_val=tmp_val_accuracy
            best_net=copy.deepcopy(net)
################################################################################
#                              END OF YOUR CODE                                #
################################################################################

# Print out results.
for lr, reg in sorted(results):
    train_accuracy, val_accuracy = results[(lr, reg)]
    print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
for lr in learning_rates:
    for reg in regularization_strengths:
        net = TwoLayerNet(input_dim, hidden_dim, num_classes)
        #Train the network
        stats = net.train(x_train_feats,
                          y_train,
                          x_val_feats,
                          y_val,
                          num_iters=3000,
                          batch_size=200,
                          learning_rate=lr,
                          learning_rate_decay=0.95,
                          reg=reg,
                          verbose=False)

        val_acc = (net.predict(x_val_feats) == y_val).mean()
        if val_acc > best_val:
            best_val = val_acc
            best_net = net
        results[(lr, reg)] = val_acc

        plt.cla()
        plt.plot(stats['loss_history'])
        plt.xlabel('Iteration number')
        plt.ylabel('loss')
        info = 'loss_history_lr={}_re={}'.format(lr, reg)
        plt.title(info)
        plt.savefig("./plots/" + info + '.png')
        plt.close()

        plt.cla()
示例#7
0
文件: myNN.py 项目: wycjl/CS231n


input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)

# Train the network
stats = net.train(X_train, y_train, X_val, y_val,
            num_iters=1000, batch_size=200,
            learning_rate=1e-4, learning_rate_decay=0.95,
            reg=0.25, verbose=True)

# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
print('Validation accuracy: ', val_acc)



# Plot the loss function and train / validation accuracies
plt.subplot(2, 1, 1)
plt.plot(stats['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')

plt.subplot(2, 1, 2)
plt.plot(stats['train_acc_history'], label='train')
plt.plot(stats['val_acc_history'], label='val')
plt.title('Classification accuracy history')
示例#8
0
net = TwoLayerNet(input_dim, hidden_dim, num_classes)
best_net = None

################################################################################
# TODO: Train a two-layer neural network on image features. You may want to    #
# cross-validate various parameters as in previous sections. Store your best   #
# model in the best_net variable.                                              #
################################################################################
pass
################################################################################
#                              END OF YOUR CODE                                #
################################################################################

# In[ ]:

# Run your neural net classifier on the test set. You should be able to
# get more than 55% accuracy.

test_acc = (net.predict(X_test_feats) == y_test).mean()
print(test_acc)

# # Bonus: Design your own features!
#
# You have seen that simple image features can improve classification performance. So far we have tried HOG and color histograms, but other types of features may be able to achieve even better classification performance.
#
# For bonus points, design and implement a new type of feature and use it for image classification on CIFAR-10. Explain how your feature works and why you expect it to be useful for image classification. Implement it in this notebook, cross-validate any hyperparameters, and compare its performance to the HOG + Color histogram baseline.

# # Bonus: Do something extra!
# Use the material and code we have presented in this assignment to do something interesting. Was there another question we should have asked? Did any cool ideas pop into your head as you were working on the assignment? This is your chance to show off!
示例#9
0
            for l in range(len(regularization_strengths)):
                print('Initialization combinaton:', i, j, k, l)
                net = TwoLayerNet(input_size, hidden_sizes[i], num_classes)
                # Train the network
                stats = net.train(X_train_feats,
                                  y_train,
                                  X_val_feats,
                                  y_val,
                                  num_iters=2000,
                                  batch_size=batch_sizes[k],
                                  learning_rate=learning_rates[j],
                                  learning_rate_decay=0.95,
                                  reg=regularization_strengths[l],
                                  verbose=True)
                # Predict on the validation set
                val_acc = (net.predict(X_val_feats) == y_val).mean()
                if best_val < val_acc:
                    best_val = val_acc
                    best_net = net
                    best_combination = [i, j, k, l]
                    combination_acc_history[i, j, k, l] = val_acc
                print('Best combination until now:', best_combination,
                      "with acc = ", best_val)
################################################################################
#                              END OF YOUR CODE                                #
################################################################################
plt.subplot(2, 1, 1)
plt.plot(stats['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')
#################################################################################
results = {}
best_val = -1
learning_rates = [1e-3]
regularization_strengths = [0.05,0.1,0.5]
hidden_size = [50,100]

for lr in learning_rates:
    for reg in regularization_strengths:
        for hnum in hidden_size:
            net = TwoLayerNet(input_size, hnum, num_classes)
            stats = net.train(X_train, y_train, X_val, y_val,
                num_iters=1000, batch_size=200,
                learning_rate=lr, learning_rate_decay=0.95,
                reg=reg, verbose=True)
            y_train_pred = net.predict(X_train)
            y_val_pred = net.predict(X_val)
            tmp_train_accuracy=np.mean(y_train == y_train_pred)
            tmp_val_accuracy=np.mean(y_val == y_val_pred)
            results[(lr,reg,hnum)]=[tmp_train_accuracy,tmp_val_accuracy]
            if tmp_val_accuracy>best_val:
                best_val=tmp_val_accuracy
                best_net=copy.deepcopy(net)
#################################################################################
#                               END OF YOUR CODE                                #
#################################################################################

# Print out results.
for lr, reg,hnum in sorted(results):
    train_accuracy, val_accuracy = results[(lr, reg,hnum)]
    print 'lr %e reg %e hiddennum %d train accuracy: %f val accuracy: %f' % (
示例#11
0
net = TwoLayerNet(input_size, hidden_size, num_classes)

# Train the network
stats = net.train(X_train,
                  y_train,
                  X_val,
                  y_val,
                  num_iters=1000,
                  batch_size=200,
                  learning_rate=1e-4,
                  learning_rate_decay=0.95,
                  reg=0.5,
                  verbose=True)

# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
print 'Validation accuracy: ', val_acc

show_graph = 0
if (show_graph != 0):
    # Plot the loss function and train / validation accuracies
    plt.subplot(2, 1, 1)
    plt.plot(stats['loss_history'])
    plt.title('Loss history')
    plt.xlabel('Iteration')
    plt.ylabel('Loss')

    plt.subplot(2, 1, 2)
    plt.plot(stats['train_acc_history'], label='train')
    plt.plot(stats['val_acc_history'], label='val')
    plt.title('Classification accuracy history')
示例#12
0
for lr in np.arange(0.5, 2, 0.1):
    for reg in np.arange(0.001, 0.01, 0.002):
        for hid in hidden_dim:
            print 'At hidden size: %d, LR: %f, and REG: %f' % (hid, lr, reg)
            net_iter = TwoLayerNet(input_dim, hid, num_classes)
            stats_iter = net_iter.train(X_train_feats,
                                        y_train,
                                        X_val_feats,
                                        y_val,
                                        num_iters=3000,
                                        batch_size=200,
                                        learning_rate=lr,
                                        learning_rate_decay=0.95,
                                        reg=reg,
                                        verbose=True)
            y_val_pred_iter = net_iter.predict(X_val_feats)
            y_train_pred_iter = net_iter.predict(X_train_feats)
            val_acc = np.mean(y_val == y_val_pred_iter)
            train_acc = np.mean(y_train == y_train_pred_iter)
            print 'Validation accuracy: %f' % (val_acc)
            if (val_acc > best_val):
                best_val = val_acc
                best_net = net_iter
                print 'Best so far: %f' % (val_acc)
################################################################################
#                              END OF YOUR CODE                                #
################################################################################

# Run your neural net classifier on the test set. You should be able to
# get more than 55% accuracy.
示例#13
0
# 用神经网络训练图片特征
from cs231n.classifiers.neural_net import TwoLayerNet

input_dim = X_train_feats.shape[1]
hidden_dim = 500
num_classes = 10
net = TwoLayerNet(input_dim, hidden_dim, num_classes)
best_net = None

learning_rate = [2.5e-3, 1e-3, 5e-2, 1e-2, 1.5e-1, 1e-1, 1, 0.5, 5]
regularization_strengths = [5e-3, 1e-3, 2.5e-2, 1e-2, 1.5e-1, 1e-1, 1, 0.5]
for lr in learning_rate:
    for rs in regularization_strengths:
        net = TwoLayerNet(input_dim, hidden_dim, num_classes)
        stats = net.train(X_train_feats, y_train, X_val_feats, y_val, num_iters=1000, batch_size=200, learning_rate=lr,
                          learning_rate_decay=0.95, reg=rs, verbose=False)
        val_accuracy = np.mean(net.predict(X_val_feats) == y_val)
        if val_accuracy > best_val:
            best_val = val_accuracy
            best_net = net
        results[(lr, rs)] = val_accuracy

for lr, reg in sorted(results):
    val_accuracy = results[(lr, reg)]
    print('lr %e reg %e val accuracy: %f' % (lr, reg, val_accuracy))
print('best validation accuracy achieved during cross-validation: %f' % best_val)
# Evaluate your trained SVM on the test set
test_acc = (net.predict(X_test_feats) == y_test).mean()
print('Test accuracy: ', test_acc)
        net = TwoLayerNet(input_dim, hidden_dim, num_classes)

        # Train the network
        stats = net.train(X_train_feats,
                          y_train,
                          X_val_feats,
                          y_val,
                          num_iters=1000,
                          batch_size=200,
                          learning_rate=alpha,
                          learning_rate_decay=0.95,
                          reg=lam,
                          verbose=True)

        # Predict on the validation set
        val_acc = float(np.mean(net.predict(X_val_feats) == y_val))
        print 'Validation accuracy: ', val_acc

        if val_acc > best_val:
            best_val = val_acc
            best_alpha = alpha
            best_lam = lam
            best_net = net
################################################################################
#                              END OF YOUR CODE                                #
################################################################################

# Run your neural net classifier on the test set. You should be able to
# get more than 55% accuracy.

test_acc = np.mean(best_net.predict(X_test_feats) == y_test)
# regularization_strengths = np.logspace(-5, -2, 5)

results = {}
best_val = -1   # The highest validation accuracy that we have seen so far.

for lr in learning_rates:
  for reg in regularization_strengths:
    net = TwoLayerNet(input_dim, hidden_dim, num_classes)

    # Train the network
    stats = net.train(X_train_feats, y_train, X_val_feats, y_val,
                num_iters=2001, batch_size=200,
                learning_rate=lr, learning_rate_decay=0.95,
                reg=reg, verbose=False)

    y_train_pred = net.predict(X_train_feats)
    training_accuracy = np.mean(y_train == y_train_pred)
    y_val_pred = net.predict(X_val_feats)
    validation_accuracy = np.mean(y_val == y_val_pred)
    results[(lr, reg)] = (training_accuracy, validation_accuracy)

    if best_val < validation_accuracy:
      best_val = validation_accuracy
      best_net = net

for lr, reg in sorted(results):
    train_accuracy, val_accuracy = results[(lr, reg)]
    print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
                lr, reg, train_accuracy, val_accuracy)

print 'best validation accuracy achieved during cross-validation: %f' % best_val
print 'Test labels shape: ', y_test.shape

#%%
input_size = 32 * 32 * 3
hidden_size = 110
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)

# Train the network, experiment with learning rates, regularization
stats = net.train(X_train, y_train, X_val, y_val,
            num_iters=2500, batch_size=200,
            learning_rate=5e-4, learning_rate_decay=0.95,
            reg=1, verbose=True)
            
# Predict on the validation set
val_acc = np.mean(net.predict(X_val) == y_val)
print 'Validation accuracy: ', val_acc

#%%

# Plot the loss function and train / validation accuracies
plt.subplot(2, 1, 1)
plt.plot(stats['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')

plt.subplot(2, 1, 2)
plt.plot(stats['train_acc_history'], label='train')
plt.plot(stats['val_acc_history'], label='val')
plt.title('Classification accuracy history')
示例#17
0
best_val = -1
best_static = None
learning_rates = [0.5, 1, 5]
regularization_strengths = [0.01, 0.001, 0.001]
num_iterations = 2000
for lr in learning_rates:
    for reg in regularization_strengths:
        net = TwoLayerNet(input_dim, hidden_dim, num_classes)
        stats = net.train(X_train_feats,
                          y_train,
                          X_val_feats,
                          y_val,
                          num_iters=num_iterations,
                          learning_rate=lr,
                          reg=reg)
        y_train_pred = net.predict(X_train_feats)
        y_val_pred = net.predict(X_val_feats)
        train_accuracy = np.mean(y_train == y_train_pred)
        valid_accuracy = np.mean(y_val == y_val_pred)

        results[(lr, reg)] = (train_accuracy, valid_accuracy)

        if best_val < valid_accuracy:
            best_static = stats
            best_val = valid_accuracy
            best_net = net

for lr, reg in sorted(results):
    train_accuracy, val_accuracy = results[(lr, reg)]
    print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
        lr, reg, train_accuracy, val_accuracy)
示例#18
0
net = TwoLayerNet(input_size, hidden_size, num_classes)

# Train the network
stats = net.train(X_train,
                  y_train,
                  X_val,
                  y_val,
                  num_iters=1000,
                  batch_size=200,
                  learning_rate=1e-3,
                  learning_rate_decay=0.95,
                  reg=0.25,
                  verbose=True)

# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
print('Validation accuracy: ', val_acc)

# # Debug the training
# With the default parameters we provided above, you should get a validation accuracy of about 0.29 on the validation set. This isn't very good.
#
# One strategy for getting insight into what's wrong is to plot the loss function and the accuracies on the training and validation sets during optimization.
#
# Another strategy is to visualize the weights that were learned in the
# first layer of the network. In most neural networks trained on visual
# data, the first layer weights typically show some visible structure when
# visualized.

# Plot the loss function and train / validation accuracies
plt.subplot(2, 1, 1)
plt.plot(stats['loss_history'])
# In[11]:

input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)

# Train the network
stats = net.train(X_train, y_train, X_val, y_val,
            num_iters=1000, batch_size=200,
            learning_rate=1e-4, learning_rate_decay=0.95,
            reg=0.5, verbose=True)

# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
print 'Validation accuracy: ', val_acc



# # Debug the training
# With the default parameters we provided above, you should get a validation accuracy of about 0.29 on the validation set. This isn't very good.
# 
# One strategy for getting insight into what's wrong is to plot the loss function and the accuracies on the training and validation sets during optimization.
# 
# Another strategy is to visualize the weights that were learned in the first layer of the network. In most neural networks trained on visual data, the first layer weights typically show some visible structure when visualized.

# In[12]:

# Plot the loss function and train / validation accuracies
plt.subplot(2, 1, 1)
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape

input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)

# Train the network
stats = net.train(X_train, y_train, X_val, y_val,
            num_iters=1000, batch_size=200,
            learning_rate=1e-4, learning_rate_decay=0.95,
            reg=0.5, verbose=True)

# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
print 'Validation accuracy: ', val_acc


# Plot the loss function and train / validation accuracies
plt.subplot(2, 1, 1)
plt.plot(stats['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')

plt.subplot(2, 1, 2)
plt.plot(stats['train_acc_history'], label='train')
plt.plot(stats['val_acc_history'], label='val')
plt.title('Classification accuracy history')
plt.xlabel('Epoch')
示例#21
0
regularization_strengths = [1e-3, 5e-3]
results = {}
for rate in learning_rates:
    for strength in regularization_strengths:
        net = TwoLayerNet(input_dim, hidden_dim, num_classes)
        stats = net.train(X_train_feats,
                          y_train,
                          X_val_feats,
                          y_val,
                          num_iters=1000,
                          batch_size=400,
                          learning_rate=rate,
                          learning_rate_decay=0.95,
                          reg=strength,
                          verbose=True)
        learning_accuracy = np.mean(net.predict(X_train_feats) == y_train)
        validation_accuracy = np.mean(net.predict(X_val_feats) == y_val)
        print rate, strength, learning_accuracy, validation_accuracy
        if validation_accuracy > best_val:
            best_val = validation_accuracy
            best_net = net
        results[(rate, strength)] = (learning_accuracy, validation_accuracy)

# Print out results.
for lr, reg in sorted(results):
    train_accuracy, val_accuracy = results[(lr, reg)]
    print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
        lr, reg, train_accuracy, val_accuracy)

print 'best validation accuracy achieved during cross-validation: %f' % best_val
################################################################################
net = TwoLayerNet(input_size, hidden_size, num_classes)

# 开始训练
stats = net.train(X_train,
                  y_train,
                  X_val,
                  y_val,
                  num_iters=1000,
                  batch_size=200,
                  learning_rate=1e-4,
                  learning_rate_decay=0.95,
                  reg=0.25,
                  verbose=True)

# 在验证集上进行预测
val_acc = (net.predict(X_val) == y_val).mean()
print('Validation accuracy:', val_acc)

# 调试训练结果
# 1:可以绘制损失函数值以及在优化过程中训练集和验证集之间的准确性
# Plot the loss function and train / validation accuracies
# plt.subplot(2, 1, 1)
# plt.plot(stats['loss_history'])
# plt.title('Loss history')
# plt.xlabel('Iteration')
# plt.ylabel('Loss')

# plt.subplot(2, 1, 2)
# plt.plot(stats['train_acc_history'], label='train')
# plt.plot(stats['val_acc_history'], label='val')
# plt.title('Classification accuracy history')
        for _d in learning_rate_decay:
            net = TwoLayerNet(input_dim, hidden_dim, num_classes)
            # Train the network
            stats = net.train(X_train_feats,
                              y_train,
                              X_val_feats,
                              y_val,
                              num_iters=1000,
                              batch_size=200,
                              learning_rate=_l,
                              learning_rate_decay=_d,
                              reg=_r,
                              verbose=True)

            # Predict on the validation set
            val_accuracy = (net.predict(X_val_feats) == y_val).mean()
            print(
                'Validation accuracy: {0}. learning rate : {1}. regularization strength : {2}. decay : {3} '
                .format(val_accuracy, _l, _r, _d))

            if (val_accuracy > best_val):
                best_val = val_accuracy
                best_net = net
                best_stats = stats

# Plot the loss function and train / validation accuracies
plt.subplot(2, 1, 1)
plt.plot(best_stats['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')
示例#24
0
                    print "now batch size is: ", bat_size
                    """
                    net = TwoLayerNet(input_size, hid_size, num_classes)

                    now_stats = net.train(X_train,
                                          y_train,
                                          X_val,
                                          y_val,
                                          learning_rate=lr_rate,
                                          learning_rate_decay=0.95,
                                          reg=reg_str,
                                          num_iters=train_circle,
                                          batch_size=bat_size,
                                          verbose=False)

                    y_pred_val = net.predict(X_val)
                    acc_val = np.mean(y_pred_val == y_val)

                    filename = './files/loop_%d_acc_%.2f.txt' % (whole_time,
                                                                 acc_val)

                    content = "now accuracy is:" + str(
                        acc_val) + "now rest times is:" + str(
                            whole_time
                        ) + '\n' + "now hidden size is: " + str(
                            hid_size
                        ) + '\n' + "now learning rate is: " + str(
                            lr_rate
                        ) + '\n' + "now training epoch is:" + str(
                            train_circle
                        ) + '\n' + "now regualrization streagths is: " + str(
示例#25
0
from cs231n.classifiers.neural_net import TwoLayerNet

input_dim = X_train_feats.shape[1]
hidden_dim = 500
num_classes = 10

best_net = None
best_val = -1
learning_rates = [9e-1,7e-1,5e-1,3e-1,1e-1,9e-2,7e-2,5e-2,3e-2]
regularization_strengths = [1e-3,3e-3,5e-3,7e-3,9e-3]

for lr in learning_rates:
    for reg in regularization_strengths:
        net = TwoLayerNet(input_dim, hidden_dim, num_classes)
        net.train(X_train_feats, y_train, X_val_feats, y_val,
                learning_rate=lr, learning_rate_decay=0.95,
                reg=reg, num_iters=6000, batch_size=200, verbose=False)
       
        train_accuracy = (net.predict(X_train_feats) == y_train).mean()
        val_accuracy = (net.predict(X_val_feats) == y_val).mean()
        print('lr %e reg %e train accuracy: %f val accuracy: %f' % (lr,reg,train_accuracy,val_accuracy))
        if val_accuracy > best_val:
            best_val = val_accuracy
            best_net = net

print('best validation accuracy achieved during cross-validation: %f' % best_val)

test_accuracy = (best_net.predict(X_test_feats) == y_test).mean()
print(test_accuracy)
示例#26
0
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape

input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)

# Train the network
stats = net.train(X_train, y_train, X_val, y_val,
            num_iters=1000, batch_size=200,
            learning_rate=1e-4, learning_rate_decay=0.95,
            reg=0.5, verbose=True)

# Predict on the validation set
val_acc = np.mean(net.predict(X_val) == y_val)
print 'Validation accuracy: ', val_acc

# Plot the loss function and train / validation accuracies
plt.subplot(2, 1, 1)
plt.plot(stats['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')

plt.subplot(2, 1, 2)
plt.plot(stats['train_acc_history'], label='train')
plt.plot(stats['val_acc_history'], label='val')
plt.title('Classification accuracy history')
plt.xlabel('Epoch')
plt.ylabel('Clasification accuracy')