Exemplo n.º 1
0
def run_mlp():
    # # define the model layers
    # layer1 = Dense(input_size=784, output_size=1000, activation='rectifier')
    # layer2 = Dense(inputs_hook=(1000, layer1.get_outputs()), output_size=1000, activation='rectifier')
    # classlayer3 = SoftmaxLayer(inputs_hook=(1000, layer2.get_outputs()), output_size=10, out_as_probs=False)
    # # add the layers to the prototype
    # mlp = Prototype(layers=[layer1, layer2, classlayer3])

    # test the new way to automatically fill in inputs_hook for models
    mlp = Prototype()
    mlp.add(Dense(input_size=784, output_size=1000, activation='rectifier', noise='dropout'))
    mlp.add(Dense(output_size=1500, activation='tanh', noise='dropout'))
    mlp.add(SoftmaxLayer(output_size=10))

    mnist = MNIST()

    optimizer = AdaDelta(model=mlp, dataset=mnist, epochs=10)
    optimizer.train()

    test_data, test_labels = mnist.test_inputs, mnist.test_targets
    test_data = test_data[:25]
    test_labels = test_labels[:25]
    # use the run function!
    yhat = mlp.run(test_data)
    print('-------')
    print('Prediction: %s' % str(yhat))
    print('Actual:     %s' % str(test_labels.astype('int32')))
Exemplo n.º 2
0
def run_mlp():
    # # define the model layers
    # layer1 = BasicLayer(input_size=784, output_size=1000, activation='rectifier')
    # layer2 = BasicLayer(inputs_hook=(1000, layer1.get_outputs()), output_size=1000, activation='rectifier')
    # classlayer3 = SoftmaxLayer(inputs_hook=(1000, layer2.get_outputs()), output_size=10, out_as_probs=False)
    # # add the layers to the prototype
    # mlp = Prototype(layers=[layer1, layer2, classlayer3])

    # test the new way to automatically fill in inputs_hook for models
    mlp = Prototype()
    mlp.add(BasicLayer(input_size=784, output_size=1000, activation="rectifier", noise="dropout"))
    mlp.add(BasicLayer(output_size=1500, activation="tanh", noise="dropout"))
    mlp.add(SoftmaxLayer(output_size=10))

    mnist = MNIST()

    optimizer = AdaDelta(model=mlp, dataset=mnist, n_epoch=10)
    optimizer.train()

    test_data, test_labels = mnist.getSubset(subset=TEST)
    test_data = test_data[:25].eval()
    test_labels = test_labels[:25].eval()
    # use the run function!
    yhat = mlp.run(test_data)
    print("-------")
    print("Prediction: %s" % str(yhat))
    print("Actual:     %s" % str(test_labels.astype("int32")))
def create_mlp():
    # define the model layers
    relu_layer1 = BasicLayer(input_size=784, output_size=1000, activation='rectifier')
    relu_layer2 = BasicLayer(inputs_hook=(1000, relu_layer1.get_outputs()), output_size=1000, activation='rectifier')
    class_layer3 = SoftmaxLayer(inputs_hook=(1000, relu_layer2.get_outputs()), output_size=10, out_as_probs=False)
    # add the layers as a Prototype
    mlp = Prototype(layers=[relu_layer1, relu_layer2, class_layer3])

    mnist = MNIST()

    optimizer = AdaDelta(model=mlp, dataset=mnist, n_epoch=20)
    optimizer.train()

    test_data, test_labels = mnist.getSubset(TEST)
    test_data = test_data[:25].eval()
    test_labels = test_labels[:25].eval()

    # use the run function!
    preds = mlp.run(test_data)
    log.info('-------')
    log.info("predicted: %s",str(preds))
    log.info("actual:    %s",str(test_labels.astype('int32')))
Exemplo n.º 4
0
                     outputs=10,
                     out_as_probs=True)
    # create the mlp from the two layers
    mlp = Prototype(layers=[layer1, layer1_act, layer2])
    # define the loss function
    loss = Neg_LL(inputs=mlp.get_outputs(),
                  targets=vector("y", dtype="int64"),
                  one_hot=False)

    # make an optimizer to train it (AdaDelta is a good default)
    # optimizer = AdaDelta(model=mlp, dataset=mnist, n_epoch=20)
    optimizer = AdaDelta(dataset=mnist, loss=loss, epochs=20)
    # perform training!
    # optimizer.train()
    mlp.train(optimizer)

    # test it on some images!
    test_data, test_labels = mnist.test_inputs, mnist.test_targets
    test_data = test_data[:25]
    test_labels = test_labels[:25]
    # use the run function!
    preds = mlp.run(test_data)[0]
    print('-------')
    print(argmax(preds, axis=1))
    print(test_labels.astype('int32'))
    print()
    print()
    del mnist
    del mlp
    del optimizer
Exemplo n.º 5
0
    # define the loss function
    loss = Neg_LL(inputs=mlp.get_outputs(), targets=vector("y", dtype="int64"), one_hot=False)

    #plot the loss
    if BOKEH_AVAILABLE:
        plot = Plot("mlp_mnist", monitor_channels=Monitor("loss", loss.get_loss()), open_browser=True)
    else:
        plot = None

    # make an optimizer to train it (AdaDelta is a good default)
    # optimizer = AdaDelta(model=mlp, dataset=mnist, n_epoch=20)
    optimizer = AdaDelta(dataset=mnist, loss=loss, epochs=20)
    # perform training!
    # optimizer.train()
    mlp.train(optimizer, plot=plot)

    # test it on some images!
    test_data, test_labels = mnist.test_inputs, mnist.test_targets
    test_data = test_data[:25]
    test_labels = test_labels[:25]
    # use the run function!
    preds = mlp.run(test_data)
    print('-------')
    print(argmax(preds, axis=1))
    print(test_labels.astype('int32'))
    print()
    print()
    del mnist
    del mlp
    del optimizer