示例#1
0
def ae2(data):
    """This one uses an Autoencode layer. This layer is a part of the graph and does not need a specific traget"""

    miniBatchSize = 1

    ls = MS.GradientDescent(lr=0.1)
    cost = MC.MeanSquaredError()

    i = ML.Input(8, name='inp')
    h = ML.Hidden(3,
                  activation=MA.ReLU(),
                  initializations=[MI.SmallUniformWeights(),
                                   MI.ZeroBias()],
                  name="hid")
    o = ML.Autoencode(
        i.name,
        activation=MA.ReLU(),
        initializations=[MI.SmallUniformWeights(),
                         MI.ZeroBias()],
        learningScenario=ls,
        costObject=cost,
        name="out")

    ae = i > h > o
    # ae.init()
    # o.train.printGraph()
    for e in xrange(2000):
        for i in xrange(0, len(data), miniBatchSize):
            ae.train(o, inp=data[i:i + miniBatchSize])

    return ae, o
示例#2
0
文件: tests.py 项目: Solertis/Mariana
    def test_ae(self) :

        data = []
        for i in xrange(8) :
            zeros = numpy.zeros(8)
            zeros[i] = 1
            data.append(zeros)

        ls = MS.GradientDescent(lr = 0.1)
        cost = MC.MeanSquaredError()

        i = ML.Input(8, name = 'inp')
        h = ML.Hidden(3, activation = MA.ReLU(), initializations=[MI.SmallUniformWeights(), MI.ZeroBias()], name = "hid")
        o = ML.Autoencode(targetLayerName = "inp", activation = MA.ReLU(), initializations=[MI.SmallUniformWeights(), MI.ZeroBias()], learningScenario = ls, costObject = cost, name = "out" )

        ae = i > h > o

        miniBatchSize = 1
        for e in xrange(2000) :
            for i in xrange(0, len(data), miniBatchSize) :
                ae.train(o, inp = data[i:i+miniBatchSize])

        res = ae.propagate(o, inp = data)["outputs"]
        for i in xrange(len(res)) :
            self.assertEqual( numpy.argmax(data[i]), numpy.argmax(res[i]))
示例#3
0
    def test_ae(self):
        powerOf2 = 3
        nbUnits = 2**powerOf2

        data = []
        for i in xrange(nbUnits):
            zeros = numpy.zeros(nbUnits)
            zeros[i] = 1
            data.append(zeros)

        ls = MS.GradientDescent(lr=0.1)
        cost = MC.MeanSquaredError()

        i = ML.Input(nbUnits, name='inp')
        h = ML.Hidden(powerOf2,
                      activation=MA.ReLU(),
                      initializations=[
                          MI.Uniform('W', small=True),
                          MI.SingleValue('b', 0)
                      ],
                      name="hid")
        o = ML.Autoencode(targetLayer=i,
                          activation=MA.ReLU(),
                          initializations=[
                              MI.Uniform('W', small=True),
                              MI.SingleValue('b', 0)
                          ],
                          learningScenari=[ls],
                          cost=cost,
                          name="out")

        ae = i > h > o
        ae.init()

        miniBatchSize = 1
        for e in xrange(2000):
            for i in xrange(0, len(data), miniBatchSize):
                miniBatch = data[i:i + miniBatchSize]
                loss = ae["out"].train({"inp.inputs":
                                        miniBatch})["out.drive.train"]

        res = ae["out"].propagate["test"]({
            "inp.inputs": data
        })["out.propagate.test"]
        for i in xrange(len(res)):
            self.assertEqual(numpy.argmax(data[i]), numpy.argmax(res[i]))
示例#4
0
    def test_multiout_fctmixin(self):

        i = ML.Input(1, name='inp')
        o1 = ML.Autoencode(targetLayer=i,
                           activation=MA.Tanh(),
                           learningScenari=[MS.GradientDescent(lr=0.1)],
                           cost=MC.MeanSquaredError(),
                           name="out1")
        o2 = ML.Regression(1,
                           activation=MA.Tanh(),
                           learningScenari=[MS.GradientDescent(lr=0.2)],
                           cost=MC.MeanSquaredError(),
                           name="out2")

        i > o1
        ae = i > o2
        ae.init()

        preOut1 = ae["out1"].test({"inp.inputs": [[1]]})["out1.drive.test"]
        preOut2 = ae["out2"].test({
            "inp.inputs": [[1]],
            "out2.targets": [[1]]
        })["out2.drive.test"]
        ae["out1"].train({"inp.inputs": [[1]]})["out1.drive.train"]
        self.assertTrue(
            preOut1 > ae["out1"].test({"inp.inputs": [[1]]})["out1.drive.test"]
        )
        self.assertTrue(preOut2 == ae["out2"].test({
            "inp.inputs": [[1]],
            "out2.targets": [[1]]
        })["out2.drive.test"])

        preOut1 = ae["out1"].test({"inp.inputs": [[1]]})["out1.drive.test"]
        preOut2 = ae["out2"].test({
            "inp.inputs": [[1]],
            "out2.targets": [[1]]
        })["out2.drive.test"]
        ae["out2"].train({
            "inp.inputs": [[1]],
            "out2.targets": [[1]]
        })["out2.drive.train"]
        self.assertTrue(preOut1 == ae["out1"].test({"inp.inputs": [[1]]})
                        ["out1.drive.test"])
        self.assertTrue(preOut2 > ae["out2"].test({
            "inp.inputs": [[1]],
            "out2.targets": [[1]]
        })["out2.drive.test"])

        preOut1 = ae["out1"].test({"inp.inputs": [[1]]})["out1.drive.test"]
        preOut2 = ae["out2"].test({
            "inp.inputs": [[1]],
            "out2.targets": [[1]]
        })["out2.drive.test"]
        fct = ae["out1"].train + ae["out2"].train + ae["inp"].propagate["train"]
        ret = fct({"inp.inputs": [[1]], "out2.targets": [[1]]})
        self.assertEqual(len(ret), 3)
        self.assertTrue(
            preOut1 > ae["out1"].test({"inp.inputs": [[1]]})["out1.drive.test"]
        )
        self.assertTrue(preOut2 > ae["out2"].test({
            "inp.inputs": [[1]],
            "out2.targets": [[1]]
        })["out2.drive.test"])