コード例 #1
0
    def test_concatenation(self):
        ls = MS.GradientDescent(lr=0.1)
        cost = MC.NegativeLogLikelihood()

        inp = ML.Input(2, 'inp')
        h1 = ML.Hidden(5, activation=MA.Tanh(), name="h1")
        h2 = ML.Hidden(5, activation=MA.Tanh(), name="h2")
        o = ML.SoftmaxClassifier(nbClasses=2,
                                 cost=cost,
                                 learningScenari=[ls],
                                 name="out")

        inp > h1
        inp > h2
        c = ML.C([h1, h2], name="concat")
        mlp = c > o
        mlp.init()

        self.assertEqual(c.getIntrinsicShape()[0],
                         h1.getIntrinsicShape()[0] + h2.getIntrinsicShape()[0])
        for i in xrange(10000):
            ii = i % len(self.xor_ins)
            miniBatch = [self.xor_ins[ii]]
            targets = [self.xor_outs[ii]]
            mlp["out"].train({
                "inp.inputs": miniBatch,
                "out.targets": targets
            })["out.drive.train"]

        for i in xrange(len(self.xor_ins)):
            self.assertEqual(
                mlp["out"].predict["test"]({
                    "inp.inputs": [self.xor_ins[i]]
                })["out.predict.test"], self.xor_outs[i])
コード例 #2
0
    def test_composite(self):
        ls = MS.GradientDescent(lr=0.1)
        cost = MC.NegativeLogLikelihood()

        inp = ML.Input(2, 'inp')
        h1 = ML.Hidden(5, activation=MA.Tanh(), name="h1")
        h2 = ML.Hidden(5, activation=MA.Tanh(), name="h2")
        o = ML.SoftmaxClassifier(2,
                                 learningScenario=ls,
                                 costObject=cost,
                                 name="out")
        c = ML.Composite(name="Comp")

        inp > h1 > c
        inp > h2 > c
        mlp = c > o

        for i in xrange(10000):
            ii = i % len(self.xor_ins)
            mlp.train(o, inp=[self.xor_ins[ii]], targets=[self.xor_outs[ii]])

        self.assertEqual(mlp.predict(o, inp=[self.xor_ins[0]])["class"], 0)
        self.assertEqual(mlp.predict(o, inp=[self.xor_ins[1]])["class"], 1)
        self.assertEqual(mlp.predict(o, inp=[self.xor_ins[2]])["class"], 1)
        self.assertEqual(mlp.predict(o, inp=[self.xor_ins[3]])["class"], 0)
コード例 #3
0
    def __init__(self, ls, cost):
        maxPool = MCONV.MaxPooling2D(2, 2)

        #The input channeler will take regular layers and arrange them into several channels
        i = MCONV.Input(nbChannels=3, height=256, width=256, name='inp')
        #ichan = MCONV.InputChanneler(256, 256, name = 'inpChan')

        c1 = MCONV.Convolution2D(nbFilters=3,
                                 filterHeight=5,
                                 filterWidth=5,
                                 activation=MA.Tanh(),
                                 pooler=maxPool,
                                 name="conv1")

        c2 = MCONV.Convolution2D(nbFilters=3,
                                 filterHeight=5,
                                 filterWidth=5,
                                 activation=MA.Tanh(),
                                 pooler=maxPool,
                                 name="conv2")

        f = MCONV.Flatten(name="flat")
        h = ML.Hidden(5,
                      activation=MA.Tanh(),
                      decorators=[MD.GlorotTanhInit()],
                      regularizations=[MR.L1(0), MR.L2(0.0001)],
                      name="hid")
        o = ML.SoftmaxClassifier(2,
                                 decorators=[],
                                 learningScenario=ls,
                                 costObject=cost,
                                 name="out",
                                 regularizations=[])

        self.model = i > c1 > c2 > f > h > o
コード例 #4
0
    def __init__(self, ls, cost):
        maxPool = MCONV.MaxPooling2D(2, 2)

        i = MCONV.Input(nbChannels=1, height=28, width=28, name='inp')

        c1 = MCONV.Convolution2D(nbFilters=20,
                                 filterHeight=5,
                                 filterWidth=5,
                                 activation=MA.Tanh(),
                                 pooler=maxPool,
                                 name="conv1")

        c2 = MCONV.Convolution2D(nbFilters=50,
                                 filterHeight=5,
                                 filterWidth=5,
                                 activation=MA.Tanh(),
                                 pooler=maxPool,
                                 name="conv2")

        #needed for the transition to a fully connected layer
        f = MCONV.Flatten(name="flat")
        h = ML.Hidden(500,
                      activation=MA.Tanh(),
                      decorators=[],
                      regularizations=[],
                      name="hid")
        o = ML.SoftmaxClassifier(10,
                                 decorators=[],
                                 learningScenario=ls,
                                 costObject=cost,
                                 name="out",
                                 regularizations=[])

        self.model = i > c1 > c2 > f > h > o
        print self.model
コード例 #5
0
    def test_optimizer_override(self):

        ls = MS.GradientDescent(lr=0.5)
        cost = MC.NegativeLogLikelihood()

        inp = ML.Input(1, 'inp')
        h = ML.Hidden(5,
                      activation=MA.Tanh(),
                      learningScenari=[MS.Fixed("b")],
                      name="h")
        o = ML.SoftmaxClassifier(
            2,
            learningScenari=[MS.GradientDescent(lr=0.5),
                             MS.Fixed("W")],
            cost=cost,
            name="out")
        net = inp > h > o
        net.init()

        ow = o.getP('W').getValue()
        ob = o.getP('b').getValue()
        hw = h.getP('W').getValue()
        hb = h.getP('b').getValue()
        for x in xrange(1, 10):
            net["out"].train({
                "inp.inputs": [[1]],
                "out.targets": [1]
            })["out.drive.train"]

        self.assertTrue(sum(ow[0]) == sum(o.getP('W').getValue()[0]))
        self.assertTrue(sum(ob) != sum(o.getP('b').getValue()))
        self.assertTrue(sum(hb) == sum(h.getP('b').getValue()))
        self.assertTrue(sum(hw[0]) != sum(h.getP('W').getValue()[0]))
コード例 #6
0
def MLP(ls, cost):

    i = ML.Input(28 * 28, name='inp')
    h = ML.Hidden(500,
                  activation=MA.Tanh(),
                  decorators=[MD.GlorotTanhInit()],
                  regularizations=[MR.L1(0), MR.L2(0.0001)],
                  name="hid")
    o = ML.SoftmaxClassifier(10,
                             decorators=[MD.ZerosInit()],
                             learningScenario=ls,
                             costObject=cost,
                             name="out",
                             regularizations=[MR.L1(0),
                                              MR.L2(0.0001)])

    mlp = i > h > o

    return mlp
コード例 #7
0
ファイル: tests.py プロジェクト: craftsliu/Mariana
    def trainMLP_xor(self):
        ls = MS.GradientDescent(lr=0.1)
        cost = MC.NegativeLogLikelihood()

        i = ML.Input(2, 'inp')
        h = ML.Hidden(4,
                      activation=MA.Tanh(),
                      decorators=[dec.GlorotTanhInit()],
                      regularizations=[MR.L1(0), MR.L2(0)])
        o = ML.SoftmaxClassifier(2,
                                 learningScenario=ls,
                                 costObject=cost,
                                 name="out")

        mlp = i > h > o

        self.xor_ins = numpy.array(self.xor_ins)
        self.xor_outs = numpy.array(self.xor_outs)
        for i in xrange(1000):
            ii = i % len(self.xor_ins)
            mlp.train(o, inp=[self.xor_ins[ii]], targets=[self.xor_outs[ii]])

        return mlp
コード例 #8
0
* automatically saves the model if the training halts because of an error or if the process is killed
* saves a log if the process dies unexpectedly
* training results and hyper parameters values are recorded to a file
* allows you to define custom stop criteria
* training info is printed at each epoch, including best scores and at which epoch they were achieved

"""

if __name__ == "__main__":

	# Let's define the network
	ls = MS.GradientDescent(lr=0.01)
	cost = MC.NegativeLogLikelihood()

	i = ML.Input(28 * 28, name='inp')
	h = ML.Hidden(500, activation=MA.Tanh(), decorators=[MD.GlorotTanhInit()], regularizations=[MR.L1(0), MR.L2(0.0001)], name="hid")
	o = ML.SoftmaxClassifier(10, learningScenario=ls, costObject=cost, name="out", regularizations=[MR.L1(0), MR.L2(0.0001)])

	mlp = i > h > o

	mlp.saveDOT("mnist_mlp")
	mlp.saveHTML("mnist_mlp")
	# And then map sets to the inputs and outputs of our network
	train_set, validation_set, test_set = load_mnist()

	trainData = MDM.Series(images=train_set[0], numbers=train_set[1])
	trainMaps = MDM.DatasetMapper()
	trainMaps.mapInput(i, trainData.images)
	trainMaps.mapOutput(o, trainData.numbers)

	testData = MDM.Series(images=test_set[0], numbers=test_set[1])
コード例 #9
0
* automatically saves the model if the training halts because of an error or if the process is killed
* saves a log if the process dies unexpectedly
* training results and hyper parameters values are recorded to a file
* allows you to define custom stop criteria
* training info is printed at each epoch, including best scores and at which epoch they were achieved

"""

if __name__ == "__main__":

	# Let's define the network
	ls = MS.GradientDescent(lr=0.01)
	cost = MC.NegativeLogLikelihood()

	i = ML.Input(28 * 28, name='inp')
	h = ML.Hidden(500, activation=MA.Tanh(), initializations=[MI.GlorotTanhInit(), MI.ZeroBias()], regularizations=[MR.L1(0), MR.L2(0.0001)], name="hid")
	o = ML.SoftmaxClassifier(10, learningScenario=ls, costObject=cost, name="out", regularizations=[MR.L1(0), MR.L2(0.0001)])

	mlp = i > h > o

	mlp.saveDOT("mnist_mlp")
	mlp.saveHTML("mnist_mlp")
	# And then map sets to the inputs and outputs of our network
	train_set, validation_set, test_set = load_mnist()

	trainData = MDM.Series(images=train_set[0], numbers=train_set[1])
	trainMaps = MDM.DatasetMapper("train", miniBatchSize=500)
	trainMaps.mapInput(i, trainData.images)
	trainMaps.mapOutput(o, trainData.numbers)

	testData = MDM.Series(images=test_set[0], numbers=test_set[1])
コード例 #10
0
ファイル: mnist_mlp.py プロジェクト: rsumner31/Mariana-212
* saves a log if the process dies unexpectedly
* training results and hyper parameters values are recorded to a file
* allows you to define custom stop criteria
* training info is printed at each epoch, including best scores and at which epoch they were achieved

"""

if __name__ == "__main__":

    # Let's define the network
    ls = MS.GradientDescent(lr=0.01)
    cost = MC.NegativeLogLikelihood()

    i = ML.Input(28 * 28, name='inp')
    h = ML.Hidden(500,
                  activation=MA.Tanh(),
                  initializations=[MI.GlorotTanhInit()],
                  regularizations=[MR.L1(0), MR.L2(0.0001)],
                  name="hid")
    o = ML.SoftmaxClassifier(10,
                             learningScenario=ls,
                             costObject=cost,
                             name="out",
                             regularizations=[MR.L1(0),
                                              MR.L2(0.0001)])

    mlp = i > h > o

    mlp.saveDOT("mnist_mlp")
    mlp.saveHTML("mnist_mlp")
    # And then map sets to the inputs and outputs of our network
コード例 #11
0
    def test_multiout_fctmixin(self):

        i = ML.Input(1, name='inp')
        o1 = ML.Autoencode(targetLayer=i,
                           activation=MA.Tanh(),
                           learningScenari=[MS.GradientDescent(lr=0.1)],
                           cost=MC.MeanSquaredError(),
                           name="out1")
        o2 = ML.Regression(1,
                           activation=MA.Tanh(),
                           learningScenari=[MS.GradientDescent(lr=0.2)],
                           cost=MC.MeanSquaredError(),
                           name="out2")

        i > o1
        ae = i > o2
        ae.init()

        preOut1 = ae["out1"].test({"inp.inputs": [[1]]})["out1.drive.test"]
        preOut2 = ae["out2"].test({
            "inp.inputs": [[1]],
            "out2.targets": [[1]]
        })["out2.drive.test"]
        ae["out1"].train({"inp.inputs": [[1]]})["out1.drive.train"]
        self.assertTrue(
            preOut1 > ae["out1"].test({"inp.inputs": [[1]]})["out1.drive.test"]
        )
        self.assertTrue(preOut2 == ae["out2"].test({
            "inp.inputs": [[1]],
            "out2.targets": [[1]]
        })["out2.drive.test"])

        preOut1 = ae["out1"].test({"inp.inputs": [[1]]})["out1.drive.test"]
        preOut2 = ae["out2"].test({
            "inp.inputs": [[1]],
            "out2.targets": [[1]]
        })["out2.drive.test"]
        ae["out2"].train({
            "inp.inputs": [[1]],
            "out2.targets": [[1]]
        })["out2.drive.train"]
        self.assertTrue(preOut1 == ae["out1"].test({"inp.inputs": [[1]]})
                        ["out1.drive.test"])
        self.assertTrue(preOut2 > ae["out2"].test({
            "inp.inputs": [[1]],
            "out2.targets": [[1]]
        })["out2.drive.test"])

        preOut1 = ae["out1"].test({"inp.inputs": [[1]]})["out1.drive.test"]
        preOut2 = ae["out2"].test({
            "inp.inputs": [[1]],
            "out2.targets": [[1]]
        })["out2.drive.test"]
        fct = ae["out1"].train + ae["out2"].train + ae["inp"].propagate["train"]
        ret = fct({"inp.inputs": [[1]], "out2.targets": [[1]]})
        self.assertEqual(len(ret), 3)
        self.assertTrue(
            preOut1 > ae["out1"].test({"inp.inputs": [[1]]})["out1.drive.test"]
        )
        self.assertTrue(preOut2 > ae["out2"].test({
            "inp.inputs": [[1]],
            "out2.targets": [[1]]
        })["out2.drive.test"])