Example #1
0
def run_mlp():
    # test the new way to automatically fill in inputs for models
    mlp = Prototype()
    x = ((None, 784), matrix("x"))
    mlp.add(Dense(inputs=x, outputs=1000, activation='rectifier'))
    mlp.add(Dense, outputs=1500, activation='tanh')
    mlp.add(Softmax, outputs=10, out_as_probs=False)

    # define our loss to optimize for the model (and the target variable)
    # targets from MNIST are int64 numbers 0-9
    y = lvector('y')
    loss = Neg_LL(inputs=mlp.models[-1].p_y_given_x, targets=y, one_hot=False)

    mnist = MNIST()

    optimizer = AdaDelta(model=mlp, loss=loss, dataset=mnist, epochs=10)
    optimizer.train()

    test_data, test_labels = mnist.test_inputs, mnist.test_targets
    test_data = test_data[:25]
    test_labels = test_labels[:25]
    # use the run function!
    yhat = mlp.run(test_data)
    print('-------')
    print('Prediction: %s' % str(yhat))
    print('Actual:     %s' % str(test_labels.astype('int32')))
def create_mlp():
    # define the model layers
    relu_layer1 = Dense(input_size=784,
                        output_size=1000,
                        activation='rectifier')
    relu_layer2 = Dense(inputs_hook=(1000, relu_layer1.get_outputs()),
                        output_size=1000,
                        activation='rectifier')
    class_layer3 = SoftmaxLayer(inputs_hook=(1000, relu_layer2.get_outputs()),
                                output_size=10,
                                out_as_probs=False)
    # add the layers as a Prototype
    mlp = Prototype(layers=[relu_layer1, relu_layer2, class_layer3])

    mnist = MNIST()

    optimizer = AdaDelta(model=mlp, dataset=mnist, epochs=20)
    optimizer.train()

    test_data, test_labels = mnist.test_inputs[:25], mnist.test_targets[:25]

    # use the run function!
    preds = mlp.run(test_data)
    log.info('-------')
    log.info("predicted: %s", str(preds))
    log.info("actual:    %s", str(test_labels.astype('int32')))
def run_mlp():
    # test the new way to automatically fill in inputs for models
    mlp = Prototype()
    x = ((None, 784), matrix("x"))
    mlp.add(Dense(inputs=x, outputs=1000, activation='rectifier'))
    mlp.add(Dense, outputs=1500, activation='tanh')
    mlp.add(Softmax, outputs=10, out_as_probs=False)

    # define our loss to optimize for the model (and the target variable)
    # targets from MNIST are int64 numbers 0-9
    y = lvector('y')
    loss = Neg_LL(inputs=mlp.models[-1].p_y_given_x, targets=y, one_hot=False)

    mnist = MNIST()

    optimizer = AdaDelta(model=mlp, loss=loss, dataset=mnist, epochs=10)
    optimizer.train()

    test_data, test_labels = mnist.test_inputs, mnist.test_targets
    test_data = test_data[:25]
    test_labels = test_labels[:25]
    # use the run function!
    yhat = mlp.run(test_data)
    print('-------')
    print('Prediction: %s' % str(yhat))
    print('Actual:     %s' % str(test_labels.astype('int32')))
def create_mlp():
    # define the model layers
    relu_layer1 = Dense(input_size=784, output_size=1000, activation='rectifier')
    relu_layer2 = Dense(inputs_hook=(1000, relu_layer1.get_outputs()), output_size=1000, activation='rectifier')
    class_layer3 = SoftmaxLayer(inputs_hook=(1000, relu_layer2.get_outputs()), output_size=10, out_as_probs=False)
    # add the layers as a Prototype
    mlp = Prototype(layers=[relu_layer1, relu_layer2, class_layer3])

    mnist = MNIST()

    optimizer = AdaDelta(model=mlp, dataset=mnist, epochs=20)
    optimizer.train()

    test_data, test_labels = mnist.test_inputs[:25], mnist.test_targets[:25]

    # use the run function!
    preds = mlp.run(test_data)
    log.info('-------')
    log.info("predicted: %s",str(preds))
    log.info("actual:    %s",str(test_labels.astype('int32')))
Example #5
0
    # first need a target variable
    labels = T.lvector('ys')
    # negative log-likelihood for classification cost
    loss = Neg_LL(inputs=lenet.models[-1].p_y_given_x,
                  targets=labels,
                  one_hot=False)
    # make a monitor to view average accuracy per batch
    accuracy = Monitor(name='Accuracy',
                       expression=1 -
                       (T.mean(T.neq(lenet.models[-1].y_pred, labels))),
                       valid=True,
                       test=True)

    # Now grab our MNIST dataset. The version given here has each image as a single 784-dimensional vector.
    # because convolutions work over 2d, let's reshape our data into the (28,28) images they originally were
    # (only one channel because they are black/white images not rgb)
    mnist = MNIST()
    process_image = lambda img: np.reshape(img, (1, 28, 28))
    mnist.train_inputs = ModifyStream(mnist.train_inputs, process_image)
    mnist.valid_inputs = ModifyStream(mnist.valid_inputs, process_image)
    mnist.test_inputs = ModifyStream(mnist.test_inputs, process_image)

    # finally define our optimizer and train the model!
    optimizer = AdaDelta(model=lenet,
                         dataset=mnist,
                         loss=loss,
                         epochs=10,
                         batch_size=64)
    # train!
    optimizer.train(monitor_channels=accuracy)
Example #6
0
    ################
    # Now that our model is complete, let's define the loss function to optimize
    # first need a target variable
    labels = T.lvector('ys')
    # negative log-likelihood for classification cost
    loss = Neg_LL(inputs=lenet.models[-1].p_y_given_x, targets=labels, one_hot=False)
    # make a monitor to view average accuracy per batch
    accuracy = Monitor(name='Accuracy',
                       expression=1-(T.mean(T.neq(lenet.models[-1].y_pred, labels))),
                       valid=True, test=True)

    # Now grab our MNIST dataset. The version given here has each image as a single 784-dimensional vector.
    # because convolutions work over 2d, let's reshape our data into the (28,28) images they originally were
    # (only one channel because they are black/white images not rgb)
    mnist = MNIST()
    process_image = lambda img: np.reshape(img, (1, 28, 28))
    mnist.train_inputs = ModifyStream(mnist.train_inputs, process_image)
    mnist.valid_inputs = ModifyStream(mnist.valid_inputs, process_image)
    mnist.test_inputs = ModifyStream(mnist.test_inputs, process_image)

    # finally define our optimizer and train the model!
    optimizer = AdaDelta(
        model=lenet,
        dataset=mnist,
        loss=loss,
        epochs=10,
        batch_size=64
    )
    # train!
    optimizer.train(monitor_channels=accuracy)
Example #7
0
print "Creating model..."
in_shape = (None, 28*28)
in_var = matrix('xs')
mlp = Prototype()
mlp.add(Dense(inputs=(in_shape, in_var), outputs=512, activation='relu'))
mlp.add(Noise, noise='dropout', noise_level=0.5)
mlp.add(Dense, outputs=512, activation='relu')
mlp.add(Noise, noise='dropout', noise_level=0.5)
mlp.add(Softmax, outputs=10, out_as_probs=False)

print "Training..."
target_var = lvector('ys')
loss = Neg_LL(inputs=mlp.models[-1].p_y_given_x, targets=target_var, one_hot=False)

optimizer = AdaDelta(model=mlp, loss=loss, dataset=data, epochs=10)
optimizer.train()

print "Predicting..."
predictions = mlp.run(data.test_inputs)

print "Accuracy: ", float(sum(predictions==data.test_targets)) / len(data.test_targets)


# now run the dataset from kaggle
test_features = np.array(pd.read_csv("test.csv"))
predictions = mlp.run(test_features)

f = open('mlp_  predictions', 'w')
for i, digit in enumerate(predictions):
    f.write(str(i+1)+","+str(digit)+"\n")
labels = T.lvector('ys')

loss = Neg_LL(inputs=lenet.models[-1].get_outputs(), targets=labels, one_hot=False)

#accuracy = Monitor(name="Accuracy", expression=1-(T.mean(T.neq(lenet.models[-1].y_pred, labels))),
#                   valid=True, test=True)


def greyscale_image(img):
    img = img.transpose(2, 1, 0)
    arr = np.average(img, 0).astype(int)
    return arr[None, :, :]


def target_preprocess(img):
    x, y, _ = filter_test.find_goals(img)[0]
    return x/img.shape[0], y/img.shape[1]

data = ImageDataset("training_data/filtered_pics/", test_filter="**1.jpg", valid_filter="**2.jpg",
                    targets_preprocess=target_preprocess,
                    inputs_preprocess=greyscale_image)

print("Building optimizer")
optimizer = AdaDelta(model=lenet, loss=loss, dataset=data, epochs=10)
optimizer.train()

print("Predicting...")
predictions = lenet.run(data.test_inputs)

print("Accuracy: ", float(sum(predictions == data.test_targets)) / len(data.test_targets))