示例#1
0
def run_mlp():
    # test the new way to automatically fill in inputs for models
    mlp = Prototype()
    x = ((None, 784), matrix("x"))
    mlp.add(Dense(inputs=x, outputs=1000, activation='rectifier'))
    mlp.add(Dense, outputs=1500, activation='tanh')
    mlp.add(Softmax, outputs=10, out_as_probs=False)

    # define our loss to optimize for the model (and the target variable)
    # targets from MNIST are int64 numbers 0-9
    y = lvector('y')
    loss = Neg_LL(inputs=mlp.models[-1].p_y_given_x, targets=y, one_hot=False)

    mnist = MNIST()

    optimizer = AdaDelta(model=mlp, loss=loss, dataset=mnist, epochs=10)
    optimizer.train()

    test_data, test_labels = mnist.test_inputs, mnist.test_targets
    test_data = test_data[:25]
    test_labels = test_labels[:25]
    # use the run function!
    yhat = mlp.run(test_data)
    print('-------')
    print('Prediction: %s' % str(yhat))
    print('Actual:     %s' % str(test_labels.astype('int32')))
def create_mlp():
    # define the model layers
    relu_layer1 = Dense(input_size=784,
                        output_size=1000,
                        activation='rectifier')
    relu_layer2 = Dense(inputs_hook=(1000, relu_layer1.get_outputs()),
                        output_size=1000,
                        activation='rectifier')
    class_layer3 = SoftmaxLayer(inputs_hook=(1000, relu_layer2.get_outputs()),
                                output_size=10,
                                out_as_probs=False)
    # add the layers as a Prototype
    mlp = Prototype(layers=[relu_layer1, relu_layer2, class_layer3])

    mnist = MNIST()

    optimizer = AdaDelta(model=mlp, dataset=mnist, epochs=20)
    optimizer.train()

    test_data, test_labels = mnist.test_inputs[:25], mnist.test_targets[:25]

    # use the run function!
    preds = mlp.run(test_data)
    log.info('-------')
    log.info("predicted: %s", str(preds))
    log.info("actual:    %s", str(test_labels.astype('int32')))
def run_mlp():
    # test the new way to automatically fill in inputs for models
    mlp = Prototype()
    x = ((None, 784), matrix("x"))
    mlp.add(Dense(inputs=x, outputs=1000, activation='rectifier'))
    mlp.add(Dense, outputs=1500, activation='tanh')
    mlp.add(Softmax, outputs=10, out_as_probs=False)

    # define our loss to optimize for the model (and the target variable)
    # targets from MNIST are int64 numbers 0-9
    y = lvector('y')
    loss = Neg_LL(inputs=mlp.models[-1].p_y_given_x, targets=y, one_hot=False)

    mnist = MNIST()

    optimizer = AdaDelta(model=mlp, loss=loss, dataset=mnist, epochs=10)
    optimizer.train()

    test_data, test_labels = mnist.test_inputs, mnist.test_targets
    test_data = test_data[:25]
    test_labels = test_labels[:25]
    # use the run function!
    yhat = mlp.run(test_data)
    print('-------')
    print('Prediction: %s' % str(yhat))
    print('Actual:     %s' % str(test_labels.astype('int32')))
示例#4
0
    def testAutoEncoder(self):
        try:
            s = (None, 3)
            x = matrix('xs')
            e = Dense(inputs=(s, x), outputs=int(s[1]*2), activation='sigmoid')
            W = e.get_param("W")
            d = Dense(inputs=e, outputs=s[1], params={'W': W.T}, activation='sigmoid')
            ae = Prototype([e, d])

            x2 = matrix('xs1')
            W2 = d.get_param("W")
            e2 = Dense(inputs=(s, x2), outputs=int(s[1]*2), params={"W": W2.T, "b": e.get_param('b')}, activation='sigmoid')
            W3 = e2.get_param("W")
            d2 = Dense(inputs=e2, outputs=s[1], params={"W": W3.T, 'b': d.get_param('b')}, activation='sigmoid')
            ae2 = Prototype([e2, d2])

            aerun1 = ae.run(np.array([[.1,.5,.9]], dtype='float32'))
            ae2run1 = ae.run(np.array([[.1,.5,.9]], dtype='float32'))
            self.assertTrue(np.array_equal(aerun1, ae2run1))

            data = np.ones((10,3), dtype='float32')*.1
            data = np.vstack([data, np.ones((10,3), dtype='float32')*.2])
            data = np.vstack([data, np.ones((10,3), dtype='float32')*.3])
            data = np.vstack([data, np.ones((10,3), dtype='float32')*.4])
            data = np.vstack([data, np.ones((10,3), dtype='float32')*.5])
            data = np.vstack([data, np.ones((10,3), dtype='float32')*.6])
            data = np.vstack([data, np.ones((10,3), dtype='float32')*.7])
            data = np.vstack([data, np.ones((10,3), dtype='float32')*.8])
            data = np.vstack([data, np.ones((10,3), dtype='float32')*.9])
            data = np.vstack([data, np.ones((10,3), dtype='float32')*0])
            dataset = NumpyDataset(data)
            sgd = SGD(dataset=dataset, model=ae, loss=BinaryCrossentropy(inputs=ae.get_outputs(), targets=x), epochs=5)
            sgd.train()

            aerun2 = ae.run(np.array([[.1,.5,.9]], dtype='float32'))
            ae2run2 = ae2.run(np.array([[.1,.5,.9]], dtype='float32'))

            self.assertFalse(np.array_equal(aerun2, aerun1))
            self.assertFalse(np.array_equal(ae2run2, ae2run1))
            self.assertTrue(np.array_equal(aerun2, ae2run2))

            sgd2 = SGD(dataset=dataset, model=ae2, loss=BinaryCrossentropy(inputs=ae2.get_outputs(), targets=x2), epochs=5)
            sgd2.train()

            aerun3 = ae.run(np.array([[.1,.5,.9]], dtype='float32'))
            ae2run3 = ae2.run(np.array([[.1,.5,.9]], dtype='float32'))

            self.assertFalse(np.array_equal(aerun3, aerun2))
            self.assertFalse(np.array_equal(ae2run3, ae2run2))
            self.assertTrue(np.array_equal(aerun3, ae2run3))


        finally:
            del x, e, d, ae, x2, e2, d2, ae2
def create_mlp():
    # define the model layers
    relu_layer1 = Dense(input_size=784, output_size=1000, activation='rectifier')
    relu_layer2 = Dense(inputs_hook=(1000, relu_layer1.get_outputs()), output_size=1000, activation='rectifier')
    class_layer3 = SoftmaxLayer(inputs_hook=(1000, relu_layer2.get_outputs()), output_size=10, out_as_probs=False)
    # add the layers as a Prototype
    mlp = Prototype(layers=[relu_layer1, relu_layer2, class_layer3])

    mnist = MNIST()

    optimizer = AdaDelta(model=mlp, dataset=mnist, epochs=20)
    optimizer.train()

    test_data, test_labels = mnist.test_inputs[:25], mnist.test_targets[:25]

    # use the run function!
    preds = mlp.run(test_data)
    log.info('-------')
    log.info("predicted: %s",str(preds))
    log.info("actual:    %s",str(test_labels.astype('int32')))
示例#6
0
文件: mlp_impl.py 项目: eggie5/kaggle
mlp = Prototype()
mlp.add(Dense(inputs=(in_shape, in_var), outputs=512, activation='relu'))
mlp.add(Noise, noise='dropout', noise_level=0.5)
mlp.add(Dense, outputs=512, activation='relu')
mlp.add(Noise, noise='dropout', noise_level=0.5)
mlp.add(Softmax, outputs=10, out_as_probs=False)

print "Training..."
target_var = lvector('ys')
loss = Neg_LL(inputs=mlp.models[-1].p_y_given_x, targets=target_var, one_hot=False)

optimizer = AdaDelta(model=mlp, loss=loss, dataset=data, epochs=10)
optimizer.train()

print "Predicting..."
predictions = mlp.run(data.test_inputs)

print "Accuracy: ", float(sum(predictions==data.test_targets)) / len(data.test_targets)


# now run the dataset from kaggle
test_features = np.array(pd.read_csv("test.csv"))
predictions = mlp.run(test_features)

f = open('mlp_  predictions', 'w')
for i, digit in enumerate(predictions):
    f.write(str(i+1)+","+str(digit)+"\n")
f.close()


print "done"
示例#7
0
    def testAutoEncoder(self):
        try:
            s = (None, 3)
            x = matrix('xs')
            e = Dense(inputs=(s, x),
                      outputs=int(s[1] * 2),
                      activation='sigmoid')
            W = e.get_param("W")
            d = Dense(inputs=e,
                      outputs=s[1],
                      params={'W': W.T},
                      activation='sigmoid')
            ae = Prototype([e, d])

            x2 = matrix('xs1')
            W2 = d.get_param("W")
            e2 = Dense(inputs=(s, x2),
                       outputs=int(s[1] * 2),
                       params={
                           "W": W2.T,
                           "b": e.get_param('b')
                       },
                       activation='sigmoid')
            W3 = e2.get_param("W")
            d2 = Dense(inputs=e2,
                       outputs=s[1],
                       params={
                           "W": W3.T,
                           'b': d.get_param('b')
                       },
                       activation='sigmoid')
            ae2 = Prototype([e2, d2])

            aerun1 = ae.run(np.array([[.1, .5, .9]], dtype='float32'))
            ae2run1 = ae.run(np.array([[.1, .5, .9]], dtype='float32'))
            self.assertTrue(np.array_equal(aerun1, ae2run1))

            data = np.ones((10, 3), dtype='float32') * .1
            data = np.vstack([data, np.ones((10, 3), dtype='float32') * .2])
            data = np.vstack([data, np.ones((10, 3), dtype='float32') * .3])
            data = np.vstack([data, np.ones((10, 3), dtype='float32') * .4])
            data = np.vstack([data, np.ones((10, 3), dtype='float32') * .5])
            data = np.vstack([data, np.ones((10, 3), dtype='float32') * .6])
            data = np.vstack([data, np.ones((10, 3), dtype='float32') * .7])
            data = np.vstack([data, np.ones((10, 3), dtype='float32') * .8])
            data = np.vstack([data, np.ones((10, 3), dtype='float32') * .9])
            data = np.vstack([data, np.ones((10, 3), dtype='float32') * 0])
            dataset = NumpyDataset(data)
            sgd = SGD(dataset=dataset,
                      model=ae,
                      loss=BinaryCrossentropy(inputs=ae.get_outputs(),
                                              targets=x),
                      epochs=5)
            sgd.train()

            aerun2 = ae.run(np.array([[.1, .5, .9]], dtype='float32'))
            ae2run2 = ae2.run(np.array([[.1, .5, .9]], dtype='float32'))

            self.assertFalse(np.array_equal(aerun2, aerun1))
            self.assertFalse(np.array_equal(ae2run2, ae2run1))
            self.assertTrue(np.array_equal(aerun2, ae2run2))

            sgd2 = SGD(dataset=dataset,
                       model=ae2,
                       loss=BinaryCrossentropy(inputs=ae2.get_outputs(),
                                               targets=x2),
                       epochs=5)
            sgd2.train()

            aerun3 = ae.run(np.array([[.1, .5, .9]], dtype='float32'))
            ae2run3 = ae2.run(np.array([[.1, .5, .9]], dtype='float32'))

            self.assertFalse(np.array_equal(aerun3, aerun2))
            self.assertFalse(np.array_equal(ae2run3, ae2run2))
            self.assertTrue(np.array_equal(aerun3, ae2run3))

        finally:
            del x, e, d, ae, x2, e2, d2, ae2
labels = T.lvector('ys')

loss = Neg_LL(inputs=lenet.models[-1].get_outputs(), targets=labels, one_hot=False)

#accuracy = Monitor(name="Accuracy", expression=1-(T.mean(T.neq(lenet.models[-1].y_pred, labels))),
#                   valid=True, test=True)


def greyscale_image(img):
    img = img.transpose(2, 1, 0)
    arr = np.average(img, 0).astype(int)
    return arr[None, :, :]


def target_preprocess(img):
    x, y, _ = filter_test.find_goals(img)[0]
    return x/img.shape[0], y/img.shape[1]

data = ImageDataset("training_data/filtered_pics/", test_filter="**1.jpg", valid_filter="**2.jpg",
                    targets_preprocess=target_preprocess,
                    inputs_preprocess=greyscale_image)

print("Building optimizer")
optimizer = AdaDelta(model=lenet, loss=loss, dataset=data, epochs=10)
optimizer.train()

print("Predicting...")
predictions = lenet.run(data.test_inputs)

print("Accuracy: ", float(sum(predictions == data.test_targets)) / len(data.test_targets))