コード例 #1
0
ファイル: two_layer_net.py プロジェクト: hayksar/cs231n
def show_net_weights(net):
    W1 = net.params['W1']
    W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2)
    plt.imshow(visualize_grid(W1, padding=3).astype('uint8'))
    plt.gca().axis('off')
    plt.savefig('nn_vis_grid_test.png')
    plt.show()
コード例 #2
0
def show_net_weights(model):
    plt.figure()
    plt.imshow(
        visualize_grid(model['W1'].T.reshape(-1, 32, 32, 3),
                       padding=3).astype('uint8'))
    plt.gca().axis('off')
    plt.show()
コード例 #3
0
ファイル: two_layer_net.py プロジェクト: coscetti/cs231n
def show_net_weights(net):
    W1 = net.params['W1']
    W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2)
    fig = plt.figure()
    plt.imshow(visualize_grid(W1, padding=3).astype('uint8'))
    plt.gca().axis('off')
    # plt.show()
    fig.savefig('weight_nn.png', transparent=True)
コード例 #4
0
def show_net_weights(net):
    W1 = net.params['W1']
    W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2)
    plt.figure()
    plt.imshow(visualize_grid(W1, padding=3).astype('uint8'))
    plt.figure()
    W2 = net.params['W2']
    W2 = (W2 - np.min(W2)) / (np.max(W2) - np.min(W2)) * 255
    plt.imshow(W2.astype("uint8"))
    plt.gca().axis('off')
    plt.show()
コード例 #5
0
def show_net_weights(net):
    """"
    W1 = net.params['W1']
    W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2)
    """
    W1 = net.params['W1']
    print '#orig W1.shape:', W1.shape
    W1 = W1.T.reshape(-1, 32, 32, 3)
    print '#output W1.shape:', W1.shape
    plt.imshow(visualize_grid(W1, padding=1).astype('uint8'))
    plt.gca().axis('off')
    plt.show()
コード例 #6
0
                batch_size=50,
                update_rule='adam',
                optim_config={
                    'learning_rate': 1e-3,
                },
                verbose=True,
                print_every=20)
solver.train()

# Visualize Filters
# You can visualize the first-layer convolutional filters from the trained network
# by running the following:

from cs231n.vis_utils import visualize_grid

grid = visualize_grid(model.params['W1'].transpose(0, 2, 3, 1))
fig = plt.figure()
plt.imshow(grid.astype('uint8'))
plt.axis('off')
plt.gcf().set_size_inches(5, 5)
# plt.show()
fig.savefig('conv_filters.png', transparent=True)

# Spatial Batch Normalization
# We already saw that batch normalization is a very useful technique for training
# deep fully-connected networks. As proposed in the original paper [3], batch
# normalization can also be used for convolutional networks, but we need to tweak
# it a bit; the modification will be called "spatial batch normalization."

# Normally batch-normalization accepts inputs of shape (N, D) and produces
# outputs of shape (N, D), where we normalize across the minibatch dimension N.
コード例 #7
0
def show_net_weights(net):
  W1 = net.params['W1']
  W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2)
  plt.imshow(visualize_grid(W1, padding=3).astype('uint8'))
  plt.gca().axis('off')
  plt.show()
コード例 #8
0
ファイル: train.py プロジェクト: wuwuwuxxx/cs231n
data = get_CIFAR10_data()

model = FiveLayerConvNet(reg=0.0005)

solver = Solver(model, data,
                num_epochs=75, batch_size=64,
                update_rule='rmsprop',
                optim_config={
                  'learning_rate': 1e-2,
                },
                lr_decay=0.1,
                verbose=True, print_every=100)
solver.train()

train_acc = predict_train(model, data['X_train'], data['y_train'])
val_acc = predict(model, data['X_val'], data['y_val'])
test_acc = predict(model, data['X_test'], data['y_test'])
print ' train_acc:', train_acc, ' val_acc:', val_acc, ' test_acc', test_acc

plt.subplot(2, 1, 1)
p_train, = plt.plot(solver.train_acc_history)
p_val, = plt.plot(solver.val_acc_history)
plt.legend([p_train, p_val], ['training accuracy', 'validation accuracy'])

plt.subplot(2, 1, 2)
grid = visualize_grid(model.params['W1'].transpose(0, 2, 3, 1))
plt.imshow(grid.astype('uint8'))
plt.axis('off')
plt.gcf().set_size_inches(5, 5)
plt.show()
コード例 #9
0
ファイル: Two_layerNN.py プロジェクト: yipeng5/CS231n_note
 def show_network_weights(network):
     W1 = network.params['W1']
     W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2)
     plt.imshow(visualize_grid(W1, padding=1).astype('uint8'))
     plt.gca().axis('off')
     plt.show()
コード例 #10
0
def visualize_filters(model):
    grid = visualize_grid(model.params['W1'].transpose(0, 2, 3, 1))
    plt.imshow(grid.astype('uint8'))
    plt.axis('off')
    plt.gcf().set_size_inches(10, 10)
    plt.show()
コード例 #11
0
def show_net_weights(net):
    W1 = net.params["W1"]
    W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2)
    plt.imshow(visualize_grid(W1, padding=3).astype("uint8"))
    plt.gca().axis("off")
    plt.show()
コード例 #12
0
def show_net_weights(net):
    W1 = net.params['W1']
    W1 = W1.reshape(3, 32, 32, -1).transpose(3, 1, 2, 0)
    plt.imshow(visualize_grid(W1, padding=3).astype('uint8'))
    plt.gca().axis('off')
コード例 #13
0
print
print 'Best overall validation accuracy achieved during cross-validation: %f' % best[
    'val_acc']
print 'Best hyperparameters: fs {init[filter_size]}, nf {init[num_filters]}, '\
        'lr {train[learning_rate]:.4e}, reg {train[reg]:.4e}, epochs {train[num_epochs]:.2f}'.format(
                val_acc, train_acc, **hyperparameters)
print 'Training took %fm (%fs)' % ((toc - tic) / 60, toc - tic)

# Plot the loss function and train / validation accuracies for best model
plt.subplot(2, 1, 1)
plt.plot(best['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')

plt.subplot(2, 1, 2)
plt.plot(best['train_acc_history'])
plt.plot(best['val_acc_history'])
plt.legend(['Training accuracy', 'Validation accuracy'], loc='lower right')
plt.xlabel('Check')
plt.ylabel('Clasification accuracy')
plt.show()

# # Visualize weights
# We can visualize the convolutional weights from the first layer. If
# everything worked properly, these will usually be edges and blobs of
# various colors and orientations.
grid = visualize_grid(best['model']['W1'].transpose(0, 2, 3, 1))
plt.imshow(grid.astype('uint8'))
plt.show()
コード例 #14
0
ファイル: convnet.py プロジェクト: dusenberrymw/CS231n
print 'Best overall validation accuracy achieved during cross-validation: %f' % best['val_acc']
print 'Best hyperparameters: fs {init[filter_size]}, nf {init[num_filters]}, '\
        'lr {train[learning_rate]:.4e}, reg {train[reg]:.4e}, epochs {train[num_epochs]:.2f}'.format(
                val_acc, train_acc, **hyperparameters)
print 'Training took %fm (%fs)' % ((toc-tic)/60, toc-tic)

# Plot the loss function and train / validation accuracies for best model
plt.subplot(2, 1, 1)
plt.plot(best['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')

plt.subplot(2, 1, 2)
plt.plot(best['train_acc_history'])
plt.plot(best['val_acc_history'])
plt.legend(['Training accuracy', 'Validation accuracy'], loc='lower right')
plt.xlabel('Check')
plt.ylabel('Clasification accuracy')
plt.show()


# # Visualize weights
# We can visualize the convolutional weights from the first layer. If 
# everything worked properly, these will usually be edges and blobs of 
# various colors and orientations.
grid = visualize_grid(best['model']['W1'].transpose(0, 2, 3, 1))
plt.imshow(grid.astype('uint8'))
plt.show()