コード例 #1
0
 def test_plot_conv_activity(self, net_fitted, X_train):
     from nolearn.lasagne.visualize import plot_conv_activity
     plot_conv_activity(net_fitted.layers_['conv1'], X_train[:1])
     plot_conv_activity(net_fitted.layers_['conv2'], X_train[10:11],
                        figsize=(3, 4))
     plt.clf()
     plt.cla()
コード例 #2
0
ファイル: test_visualize.py プロジェクト: 52nlp/nolearn
 def test_plot_conv_activity(self, net_fitted, X_train):
     from nolearn.lasagne.visualize import plot_conv_activity
     plot_conv_activity(net_fitted.layers_['conv1'], X_train[:1])
     plot_conv_activity(net_fitted.layers_['conv2'], X_train[10:11],
                        figsize=(3, 4))
     plt.clf()
     plt.cla()
コード例 #3
0
def predict(path, debug=False):
    global net, image_size
    #    img = load_image(path)
    X = np.asarray([path])
    img = np.asarray([load_image(path)], dtype="float32")
    cats = load_classes()
    probs = net.predict_proba(X)
    if debug == True:
        for layer in net.layers:
            name = layer[0].__name__
            filename = os.path.splitext(path)[0]
            try:
                os.mkdir("debug/" + filename)
            except:
                pass
            if "2D" in name:
                real_name = layer[1]["name"]
                try:
                    plt = plot_conv_activity(net.layers_[real_name], img)
                    plt.savefig("debug/" + filename + "/" + real_name +
                                "_activity.png")
                    plt.close()
                    #plt = plot_conv_weights(net.layers_[real_name]) DISABLED: Eats too much memory, pretty useless anyhow
                    #plt.savefig("debug/"+filename+"/"+real_name+"_weights.png")
                except Exception as e:
                    print(e)

    for i, x in enumerate(probs[0]):
        print(cats[i].name, x)
    result = [(cats[np.argmax(x)].id, cats[np.argmax(x)].name, x[np.argmax(x)])
              for x in probs]
    return result[0]
コード例 #4
0
ファイル: train.py プロジェクト: Wajsbrot/bees
name = exp_name + '_' + str(date.today())
with open('models/conv_net_'+name+'.pkl', 'wb') as f:
    cPickle.dump(conv_net, f, -1)
conv_net.save_params_to('models/params_'+name)

# ----- Train set ----
train_predictions = conv_net.predict_proba(X)
make_submission_file(train_predictions[:sample_size], images_id[:sample_size],
                     output_filepath='models/training_'+name+'.csv')

# ----- Test set ----
X_test, _, images_id_test = load_numpy_arrays(args['test_file'])
print "Test:"
print "X_test.shape:", X_test.shape
predictions = conv_net.predict_proba(X_test)
make_submission_file(predictions, images_id_test,
                     output_filepath='submissions/submission_'+name+'.csv')

# ----- Make plots ----
plot_loss(conv_net, "models/loss_"+name+".png", show=False)

plot_conv_weights(conv_net.layers_[1], figsize=(4, 4))
plt.savefig('models/weights_'+name+'.png')

plot_conv_activity(conv_net.layers_[1], X[0:1])
plt.savefig('models/activity_'+name+'.png')

plot_occlusion(conv_net, X[:5], y[:5])
plt.savefig('models/occlusion_'+name+'.png')
コード例 #5
0
 def plot_conv_activity(self, net, X, **kwargs):
     from nolearn.lasagne.visualize import plot_conv_activity
     plot_conv_activity(net.layers_['conv1'], X, **kwargs)
     plt.clf()
     plt.cla()
コード例 #6
0
ファイル: mnist_nolearn.py プロジェクト: wwt1990/MNIST
from nolearn.lasagne.visualize import plot_occlusion, plot_saliency

draw_to_notebook(net0)
plot_loss(net0)
#plot helps determine if we are overfitting:
#If the train loss is much lower than the validation loss,
#we should probably do something to regularize the net.

# visualize layer weights
plot_conv_weights(net0.layers_[1], figsize = (4,4))
#If the weights just look like noise, we might have to do something
#(e.g. use more filters so that each can specialize better).

# visualize layers' activities
x = X_train[0:1] # an image in the bc01 format (so use X[0:1] instead of just X[0]).
plot_conv_activity(net0.layers_[1], x)

plot_occlusion(net0, X_train[:5], y_train[:5])
plot_saliency(net0, X_train[:5])


from nolearn.lasagne import PrintLayerInfo

layers1 = [
    (InputLayer, {'shape': (None, 1, 28, 28)}),

    (Conv2DLayer, {'num_filters': 32, 'filter_size': (3, 3)}),
    (MaxPool2DLayer, {'pool_size': (2, 2)}),

    (Conv2DLayer, {'num_filters': 64, 'filter_size': (3, 3)}),
    (Conv2DLayer, {'num_filters': 64, 'filter_size': (3, 3)}),
コード例 #7
0
ファイル: test_lasagne.py プロジェクト: alobrix/Deep-Learning
def test_visualize_functions_with_cnn(mnist):
    # this test simply tests that no exception is raised when using
    # the plotting functions

    from nolearn.lasagne import NeuralNet
    from nolearn.lasagne.visualize import plot_conv_activity
    from nolearn.lasagne.visualize import plot_conv_weights
    from nolearn.lasagne.visualize import plot_loss
    from nolearn.lasagne.visualize import plot_occlusion

    X, y = mnist
    X_train, y_train = X[:100].reshape(-1, 1, 28, 28), y[:100]
    X_train = X_train.reshape(-1, 1, 28, 28)
    num_epochs = 3

    nn = NeuralNet(
        layers=[
            ('input', InputLayer),
            ('conv1', Conv2DLayer),
            ('conv2', Conv2DLayer),
            ('pool2', MaxPool2DLayer),
            ('conv3', Conv2DLayer),
            ('conv4', Conv2DLayer),
            ('pool4', MaxPool2DLayer),
            ('hidden1', DenseLayer),
            ('output', DenseLayer),
        ],
        input_shape=(None, 1, 28, 28),
        output_num_units=10,
        output_nonlinearity=softmax,
        more_params=dict(
            conv1_filter_size=(5, 5),
            conv1_num_filters=16,
            conv2_filter_size=(3, 3),
            conv2_num_filters=16,
            pool2_ds=(3, 3),
            conv3_filter_size=(3, 3),
            conv3_num_filters=16,
            conv4_filter_size=(3, 3),
            conv4_num_filters=16,
            pool4_ds=(2, 2),
            hidden1_num_units=16,
        ),
        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,
        max_epochs=num_epochs,
    )

    nn.fit(X_train, y_train)

    plot_loss(nn)
    plot_conv_weights(nn.layers_['conv1'])
    plot_conv_weights(nn.layers_['conv2'], figsize=(1, 2))
    plot_conv_activity(nn.layers_['conv3'], X_train[:1])
    plot_conv_activity(nn.layers_['conv4'], X_train[10:11], figsize=(3, 4))
    plot_occlusion(nn, X_train[:1], y_train[:1])
    plot_occlusion(nn,
                   X_train[2:4],
                   y_train[2:4],
                   square_length=3,
                   figsize=(5, 5))

    # clear figures from memory
    plt.clf()
    plt.cla()
コード例 #8
0
ファイル: test_visualize.py プロジェクト: felixlaumon/nolearn
    def plot_conv_activity(self, net, X, **kwargs):
        from nolearn.lasagne.visualize import plot_conv_activity

        plot_conv_activity(net.layers_["conv1"], X, **kwargs)
        plt.clf()
        plt.cla()
コード例 #9
0
ファイル: test_lasagne.py プロジェクト: alobrix/Deep-Learning
def test_visualize_functions_with_cnn(mnist):
    # this test simply tests that no exception is raised when using
    # the plotting functions

    from nolearn.lasagne import NeuralNet
    from nolearn.lasagne.visualize import plot_conv_activity
    from nolearn.lasagne.visualize import plot_conv_weights
    from nolearn.lasagne.visualize import plot_loss
    from nolearn.lasagne.visualize import plot_occlusion

    X, y = mnist
    X_train, y_train = X[:100].reshape(-1, 1, 28, 28), y[:100]
    X_train = X_train.reshape(-1, 1, 28, 28)
    num_epochs = 3

    nn = NeuralNet(
        layers=[
            ('input', InputLayer),
            ('conv1', Conv2DLayer),
            ('conv2', Conv2DLayer),
            ('pool2', MaxPool2DLayer),
            ('conv3', Conv2DLayer),
            ('conv4', Conv2DLayer),
            ('pool4', MaxPool2DLayer),
            ('hidden1', DenseLayer),
            ('output', DenseLayer),
            ],
        input_shape=(None, 1, 28, 28),
        output_num_units=10,
        output_nonlinearity=softmax,

        more_params=dict(
            conv1_filter_size=(5, 5), conv1_num_filters=16,
            conv2_filter_size=(3, 3), conv2_num_filters=16,
            pool2_ds=(3, 3),
            conv3_filter_size=(3, 3), conv3_num_filters=16,
            conv4_filter_size=(3, 3), conv4_num_filters=16,
            pool4_ds=(2, 2),
            hidden1_num_units=16,
            ),

        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,

        max_epochs=num_epochs,
        )

    nn.fit(X_train, y_train)

    plot_loss(nn)
    plot_conv_weights(nn.layers_['conv1'])
    plot_conv_weights(nn.layers_['conv2'], figsize=(1, 2))
    plot_conv_activity(nn.layers_['conv3'], X_train[:1])
    plot_conv_activity(nn.layers_['conv4'], X_train[10:11], figsize=(3, 4))
    plot_occlusion(nn, X_train[:1], y_train[:1])
    plot_occlusion(nn, X_train[2:4], y_train[2:4], square_length=3,
                   figsize=(5, 5))

    # clear figures from memory
    plt.clf()
    plt.cla()