Ejemplo n.º 1
0
    def test_save_load_64h(self):
        import os
        import Mariana.network as MN

        ls = MS.GradientDescent(lr=0.1)
        cost = MC.NegativeLogLikelihood()

        i = ML.Input(2, 'inp')
        o = ML.SoftmaxClassifier(nbClasses=2,
                                 cost=cost,
                                 learningScenari=[ls],
                                 name="out")

        prev = i
        for i in xrange(64):
            h = ML.Hidden(size=10, activation=MA.ReLU(), name="Hidden_%s" % i)
            prev > h
            prev = h

        mlp = prev > o
        mlp.init()
        mlp.save("test_save")

        mlp2 = MN.loadModel("test_save.mar")
        mlp2.init()

        v1 = mlp["out"].propagate["test"]({
            "inp.inputs": self.xor_ins
        })["out.propagate.test"]
        v2 = mlp2["out"].propagate["test"]({
            "inp.inputs": self.xor_ins
        })["out.propagate.test"]
        self.assertTrue((v1 == v2).all())
        os.remove('test_save.mar')
Ejemplo n.º 2
0
    def test_composite(self):
        ls = MS.GradientDescent(lr=0.1)
        cost = MC.NegativeLogLikelihood()

        inp = ML.Input(2, 'inp')
        h1 = ML.Hidden(5, activation=MA.Tanh(), name="h1")
        h2 = ML.Hidden(5, activation=MA.Tanh(), name="h2")
        o = ML.SoftmaxClassifier(2,
                                 learningScenario=ls,
                                 costObject=cost,
                                 name="out")
        c = ML.Composite(name="Comp")

        inp > h1 > c
        inp > h2 > c
        mlp = c > o

        for i in xrange(10000):
            ii = i % len(self.xor_ins)
            mlp.train(o, inp=[self.xor_ins[ii]], targets=[self.xor_outs[ii]])

        self.assertEqual(mlp.predict(o, inp=[self.xor_ins[0]])["class"], 0)
        self.assertEqual(mlp.predict(o, inp=[self.xor_ins[1]])["class"], 1)
        self.assertEqual(mlp.predict(o, inp=[self.xor_ins[2]])["class"], 1)
        self.assertEqual(mlp.predict(o, inp=[self.xor_ins[3]])["class"], 0)
Ejemplo n.º 3
0
    def test_save_load_pickle(self) :
        import os
        import Mariana.network as MN

        ls = MS.GradientDescent(lr = 0.1)
        cost = MC.NegativeLogLikelihood()

        i = ML.Input(2, 'inp')
        h = Hidden_layerRef(i, 10, activation = MA.ReLU(), name = "Hidden_0.500705866892")
        o = ML.SoftmaxClassifier(2, learningScenario = ls, costObject = cost, name = "out")

        mlp = i > h > o
        
        self.xor_ins = numpy.array(self.xor_ins)
        self.xor_outs = numpy.array(self.xor_outs)
        for i in xrange(1000) :
            mlp.train(o, inp = self.xor_ins, targets = self.xor_outs )

        mlp.save("test_save")
        mlp2 = MN.loadModel("test_save.mar.mdl.pkl")
        
        o = mlp.outputs.values()[0]
        
        v1 = mlp.propagate( o.name, inp = self.xor_ins )["outputs"]
        v2 = mlp2.propagate( o.name, inp = self.xor_ins )["outputs"]
        self.assertEqual(numpy.sum(v1), numpy.sum(v2))
        self.assertEqual(mlp["Hidden_0.500705866892"].otherLayer.name, mlp2["Hidden_0.500705866892"].otherLayer.name)
        
        os.remove('test_save.mar.mdl.pkl')
Ejemplo n.º 4
0
    def test_embedding(self):
        """the first 3 and the last 3 should be diametrically opposed"""
        data = [[0], [1], [2], [3], [4], [5]]
        targets = [0, 0, 0, 1, 1, 1]

        ls = MS.GradientDescent(lr=0.5)
        cost = MC.NegativeLogLikelihood()

        emb = ML.Embedding(1, 2, len(data), learningScenario=ls, name="emb")
        o = ML.SoftmaxClassifier(2,
                                 learningScenario=MS.Fixed(),
                                 costObject=cost,
                                 name="out")
        net = emb > o

        miniBatchSize = 2
        for i in xrange(2000):
            for i in xrange(0, len(data), miniBatchSize):
                net.train(o,
                          emb=data[i:i + miniBatchSize],
                          targets=targets[i:i + miniBatchSize])

        embeddings = emb.getEmbeddings()
        for i in xrange(0, len(data) / 2):
            v = numpy.dot(embeddings[i], embeddings[i + len(data) / 2])
            self.assertTrue(v < -1)
Ejemplo n.º 5
0
    def __init__(self, ls, cost):
        maxPool = MCONV.MaxPooling2D(2, 2)

        #The input channeler will take regular layers and arrange them into several channels
        i = MCONV.Input(nbChannels=3, height=256, width=256, name='inp')
        #ichan = MCONV.InputChanneler(256, 256, name = 'inpChan')

        c1 = MCONV.Convolution2D(nbFilters=3,
                                 filterHeight=5,
                                 filterWidth=5,
                                 activation=MA.Tanh(),
                                 pooler=maxPool,
                                 name="conv1")

        c2 = MCONV.Convolution2D(nbFilters=3,
                                 filterHeight=5,
                                 filterWidth=5,
                                 activation=MA.Tanh(),
                                 pooler=maxPool,
                                 name="conv2")

        f = MCONV.Flatten(name="flat")
        h = ML.Hidden(5,
                      activation=MA.Tanh(),
                      decorators=[MD.GlorotTanhInit()],
                      regularizations=[MR.L1(0), MR.L2(0.0001)],
                      name="hid")
        o = ML.SoftmaxClassifier(2,
                                 decorators=[],
                                 learningScenario=ls,
                                 costObject=cost,
                                 name="out",
                                 regularizations=[])

        self.model = i > c1 > c2 > f > h > o
Ejemplo n.º 6
0
        def getModel(inpSize, filterWidth):
            ls = MS.GradientDescent(lr=0.5)
            cost = MC.NegativeLogLikelihood()

            i = ML.Input((1, 1, inpSize), name='inp')

            c1 = MCONV.Convolution2D(numFilters=5,
                                     filterHeight=1,
                                     filterWidth=filterWidth,
                                     activation=MA.ReLU(),
                                     name="conv1")

            pool1 = MSAMP.MaxPooling2D(poolHeight=1, poolWidth=2, name="pool1")

            c2 = MCONV.Convolution2D(numFilters=10,
                                     filterHeight=1,
                                     filterWidth=filterWidth,
                                     activation=MA.ReLU(),
                                     name="conv2")

            pool2 = MSAMP.MaxPooling2D(poolHeight=1, poolWidth=2, name="pool2")

            h = ML.Hidden(5, activation=MA.ReLU(), name="hid")
            o = ML.SoftmaxClassifier(nbClasses=2,
                                     cost=cost,
                                     learningScenari=[ls],
                                     name="out")

            model = i > c1 > pool1 > c2 > pool2 > h > o
            model.init()
            return model
Ejemplo n.º 7
0
    def testRecurrences(self):
        import Mariana.recurrence as MREC
        import Mariana.reshaping as MRES

        ls = MS.GradientDescent(lr=0.1)
        cost = MC.NegativeLogLikelihood()

        # for clas in [MREC.RecurrentDense, MREC.LSTM, MREC.GRU] :
        for clas in [MREC.RecurrentDense]:
            inp = ML.Input((None, 3), 'inp')
            r = clas(2, onlyReturnFinal=True, name="rec")
            reshape = MRES.Reshape((-1, 2), name="reshape")
            o = ML.SoftmaxClassifier(
                2,
                cost=cost,
                learningScenari=[MS.GradientDescent(lr=0.5)],
                name="out")
            net = inp > r > reshape > o
            net.init()
            inputs = [[[1, 1], [1, 0], [1, 1]], [[1, 0], [0, 1], [1, 0]]]

            oldWih = r.getP("W_in_to_hid").getValue()
            oldWhh = r.getP("W_hid_to_hid").getValue()
            for x in xrange(1, 100):
                net["out"].train({
                    "inp.inputs": inputs,
                    "out.targets": [1, 1, 1]
                })["out.drive.train"]
            self.assertTrue(
                oldWih.mean() != r.getP("W_in_to_hid").getValue().mean())
            self.assertTrue(
                oldWhh.mean() != r.getP("W_hid_to_hid").getValue().mean())
Ejemplo n.º 8
0
    def test_embedding(self):
        """the first 3 and the last 3 should be diametrically opposed"""
        data = [[0], [1], [2], [3], [4], [5]]
        targets = [0, 0, 0, 1, 1, 1]

        ls = MS.GradientDescent(lr=0.5)
        cost = MC.NegativeLogLikelihood()

        inp = ML.Input(1, 'inp')
        emb = ML.Embedding(nbDimensions=2,
                           dictSize=len(data),
                           learningScenari=[ls],
                           name="emb")
        o = ML.SoftmaxClassifier(2,
                                 learningScenari=[MS.Fixed()],
                                 cost=cost,
                                 name="out")
        net = inp > emb > o
        net.init()

        miniBatchSize = 2
        for i in xrange(2000):
            for i in xrange(0, len(data), miniBatchSize):
                net["out"].train({
                    "inp.inputs": data[i:i + miniBatchSize],
                    "out.targets": targets[i:i + miniBatchSize]
                })["out.drive.train"]

        embeddings = emb.getP("embeddings").getValue()
        for i in xrange(0, len(data) / 2):
            v = numpy.dot(embeddings[i], embeddings[i + len(data) / 2])
            self.assertTrue(v < -1)
Ejemplo n.º 9
0
    def test_optimizer_override(self):

        ls = MS.GradientDescent(lr=0.5)
        cost = MC.NegativeLogLikelihood()

        inp = ML.Input(1, 'inp')
        h = ML.Hidden(5,
                      activation=MA.Tanh(),
                      learningScenari=[MS.Fixed("b")],
                      name="h")
        o = ML.SoftmaxClassifier(
            2,
            learningScenari=[MS.GradientDescent(lr=0.5),
                             MS.Fixed("W")],
            cost=cost,
            name="out")
        net = inp > h > o
        net.init()

        ow = o.getP('W').getValue()
        ob = o.getP('b').getValue()
        hw = h.getP('W').getValue()
        hb = h.getP('b').getValue()
        for x in xrange(1, 10):
            net["out"].train({
                "inp.inputs": [[1]],
                "out.targets": [1]
            })["out.drive.train"]

        self.assertTrue(sum(ow[0]) == sum(o.getP('W').getValue()[0]))
        self.assertTrue(sum(ob) != sum(o.getP('b').getValue()))
        self.assertTrue(sum(hb) == sum(h.getP('b').getValue()))
        self.assertTrue(sum(hw[0]) != sum(h.getP('W').getValue()[0]))
Ejemplo n.º 10
0
    def test_concatenation(self):
        ls = MS.GradientDescent(lr=0.1)
        cost = MC.NegativeLogLikelihood()

        inp = ML.Input(2, 'inp')
        h1 = ML.Hidden(5, activation=MA.Tanh(), name="h1")
        h2 = ML.Hidden(5, activation=MA.Tanh(), name="h2")
        o = ML.SoftmaxClassifier(nbClasses=2,
                                 cost=cost,
                                 learningScenari=[ls],
                                 name="out")

        inp > h1
        inp > h2
        c = ML.C([h1, h2], name="concat")
        mlp = c > o
        mlp.init()

        self.assertEqual(c.getIntrinsicShape()[0],
                         h1.getIntrinsicShape()[0] + h2.getIntrinsicShape()[0])
        for i in xrange(10000):
            ii = i % len(self.xor_ins)
            miniBatch = [self.xor_ins[ii]]
            targets = [self.xor_outs[ii]]
            mlp["out"].train({
                "inp.inputs": miniBatch,
                "out.targets": targets
            })["out.drive.train"]

        for i in xrange(len(self.xor_ins)):
            self.assertEqual(
                mlp["out"].predict["test"]({
                    "inp.inputs": [self.xor_ins[i]]
                })["out.predict.test"], self.xor_outs[i])
Ejemplo n.º 11
0
        def getModel(inpSize, filterWidth) :
            ls = MS.GradientDescent(lr = 0.5)
            cost = MC.NegativeLogLikelihood()
            
            pooler = MCONV.MaxPooling2D(1, 2)

            i = ML.Input(inpSize, name = 'inp')
            ichan = MCONV.InputChanneler(1, inpSize, name = 'inpChan')
            
            c1 = MCONV.Convolution2D( 
                nbFilters = 5,
                filterHeight = 1,
                filterWidth = filterWidth,
                activation = MA.ReLU(),
                pooler = pooler,
                name = "conv1"
            )

            c2 = MCONV.Convolution2D( 
                nbFilters = 10,
                filterHeight = 1,
                filterWidth = filterWidth,
                activation = MA.ReLU(),
                pooler = pooler,
                name = "conv2"
            )

            f = MCONV.Flatten(name = "flat")
            h = ML.Hidden(5, activation = MA.ReLU(), decorators = [], regularizations = [ ], name = "hid" )
            o = ML.SoftmaxClassifier(2, decorators = [], learningScenario = ls, costObject = cost, name = "out", regularizations = [ ] )
            
            model = i > ichan > c1 > c2 > f > h > o
            return model
Ejemplo n.º 12
0
    def __init__(self, ls, cost):
        maxPool = MCONV.MaxPooling2D(2, 2)

        i = MCONV.Input(nbChannels=1, height=28, width=28, name='inp')

        c1 = MCONV.Convolution2D(nbFilters=20,
                                 filterHeight=5,
                                 filterWidth=5,
                                 activation=MA.Tanh(),
                                 pooler=maxPool,
                                 name="conv1")

        c2 = MCONV.Convolution2D(nbFilters=50,
                                 filterHeight=5,
                                 filterWidth=5,
                                 activation=MA.Tanh(),
                                 pooler=maxPool,
                                 name="conv2")

        #needed for the transition to a fully connected layer
        f = MCONV.Flatten(name="flat")
        h = ML.Hidden(500,
                      activation=MA.Tanh(),
                      decorators=[],
                      regularizations=[],
                      name="hid")
        o = ML.SoftmaxClassifier(10,
                                 decorators=[],
                                 learningScenario=ls,
                                 costObject=cost,
                                 name="out",
                                 regularizations=[])

        self.model = i > c1 > c2 > f > h > o
        print self.model
def Perceptron(ls, cost):
    i = ML.Input(28 * 28, name='inp')
    o = ML.SoftmaxClassifier(10,
                             learningScenario=ls,
                             costObject=cost,
                             name="out",
                             regularizations=[MR.L1(0), MR.L2(0)])

    return i > o
Ejemplo n.º 14
0
def getMLP(self, nbInputs=2, nbClasses=2):
    ls = MS.GradientDescent(lr=0.1)
    cost = MC.NegativeLogLikelihood()

    i = ML.Input(nbInputs, 'inp')
    h = ML.Hidden(size=6, activation=MA.ReLU(), name="Hidden_0.500705866892")
    o = ML.SoftmaxClassifier(nbClasses=nbClasses,
                             cost=cost,
                             learningScenari=[ls],
                             name="out")

    mlp = i > h > o
    mlp.init()
    return mlp
Ejemplo n.º 15
0
    def trainMLP_xor(self) :
        ls = MS.GradientDescent(lr = 0.1)
        cost = MC.NegativeLogLikelihood()

        i = ML.Input(2, 'inp')
        h = ML.Hidden(10, activation = MA.ReLU(), name = "Hidden_0.500705866892")
        o = ML.SoftmaxClassifier(2, learningScenario = ls, costObject = cost, name = "out")

        mlp = i > h > o
        
        self.xor_ins = numpy.array(self.xor_ins)
        self.xor_outs = numpy.array(self.xor_outs)
        for i in xrange(1000) :
            mlp.train(o, inp = self.xor_ins, targets = self.xor_outs )

        return mlp
Ejemplo n.º 16
0
def MLP(ls, cost):

    i = ML.Input(28 * 28, name='inp')
    h = ML.Hidden(500,
                  activation=MA.Tanh(),
                  decorators=[MD.GlorotTanhInit()],
                  regularizations=[MR.L1(0), MR.L2(0.0001)],
                  name="hid")
    o = ML.SoftmaxClassifier(10,
                             decorators=[MD.ZerosInit()],
                             learningScenario=ls,
                             costObject=cost,
                             name="out",
                             regularizations=[MR.L1(0),
                                              MR.L2(0.0001)])

    mlp = i > h > o

    return mlp
Ejemplo n.º 17
0
    def __init__(self, ls, cost):
        maxPool = MCONV.MaxPooling2D(3, 3)
        i = MCONV.Input(nbChannels=1, height=100, width=100, name='inp')

        c1 = MCONV.Convolution2D(nbFilters=10,
                                 filterHeight=3,
                                 filterWidth=3,
                                 activation=MA.Max_norm(),
                                 pooler=maxPool,
                                 name="conv1")
        c3 = MCONV.Convolution2D(nbFilters=20,
                                 filterHeight=3,
                                 filterWidth=3,
                                 activation=MA.Max_norm(),
                                 pooler=maxPool,
                                 name="conv3")

        c2 = MCONV.Convolution2D(nbFilters=10,
                                 filterHeight=3,
                                 filterWidth=3,
                                 activation=MA.Max_norm(),
                                 pooler=maxPool,
                                 name="conv2")

        f = MCONV.Flatten(name="flat")
        h = ML.Hidden(2048,
                      activation=MA.Max_norm(),
                      decorators=[MD.BinomialDropout(0.7)],
                      regularizations=[],
                      name="hid")

        o = ML.SoftmaxClassifier(2,
                                 decorators=[],
                                 learningScenario=ls,
                                 costObject=cost,
                                 name="out",
                                 regularizations=[])

        self.model = i > c1 > c3 > c2 > f > h > o
Ejemplo n.º 18
0
    def trainMLP_xor(self):
        ls = MS.GradientDescent(lr=0.1)
        cost = MC.NegativeLogLikelihood()

        i = ML.Input(2, 'inp')
        h = ML.Hidden(4,
                      activation=MA.Tanh(),
                      decorators=[dec.GlorotTanhInit()],
                      regularizations=[MR.L1(0), MR.L2(0)])
        o = ML.SoftmaxClassifier(2,
                                 learningScenario=ls,
                                 costObject=cost,
                                 name="out")

        mlp = i > h > o

        self.xor_ins = numpy.array(self.xor_ins)
        self.xor_outs = numpy.array(self.xor_outs)
        for i in xrange(1000):
            ii = i % len(self.xor_ins)
            mlp.train(o, inp=[self.xor_ins[ii]], targets=[self.xor_outs[ii]])

        return mlp
Ejemplo n.º 19
0
    def __init__(self, inputSize, dictSize, patternSize, embSize, ls, cost):
        # pooler = MCONV.NoPooling()
        pooler = MCONV.MaxPooling2D(1, 2)

        emb = MCONV.Embedding(size=inputSize,
                              nbDimentions=embSize,
                              dictSize=dictSize,
                              name='Emb')

        c1 = MCONV.Convolution2D(nbFilters=1,
                                 filterHeight=1,
                                 filterWidth=patternSize / 2,
                                 activation=MA.ReLU(),
                                 pooler=pooler,
                                 name="conv1")

        c2 = MCONV.Convolution2D(nbFilters=4,
                                 filterHeight=1,
                                 filterWidth=patternSize / 2,
                                 activation=MA.ReLU(),
                                 pooler=MCONV.NoPooling(),
                                 name="conv2")

        f = MCONV.Flatten(name="flat")
        h = ML.Hidden(5,
                      activation=MA.ReLU(),
                      decorators=[],
                      regularizations=[],
                      name="hid")
        o = ML.SoftmaxClassifier(2,
                                 decorators=[],
                                 learningScenario=ls,
                                 costObject=cost,
                                 name="out",
                                 regularizations=[])

        self.model = emb > c1 > c2 > f > h > o
Ejemplo n.º 20
0
    def __init__(self, inputSize, patternSize, ls, cost):
        # pooler = MCONV.NoPooling()
        pooler = MCONV.MaxPooling2D(1, 2)

        #The input channeler will take regular layers and arrange them into several channels
        i = ML.Input(inputSize, name='inp')
        ichan = MCONV.InputChanneler(1, inputSize, name='inpChan')

        c1 = MCONV.Convolution2D(nbFilters=5,
                                 filterHeight=1,
                                 filterWidth=patternSize / 2,
                                 activation=MA.ReLU(),
                                 pooler=pooler,
                                 name="conv1")

        c2 = MCONV.Convolution2D(nbFilters=10,
                                 filterHeight=1,
                                 filterWidth=patternSize / 2,
                                 activation=MA.ReLU(),
                                 pooler=MCONV.NoPooling(),
                                 name="conv2")

        f = MCONV.Flatten(name="flat")
        h = ML.Hidden(5,
                      activation=MA.ReLU(),
                      decorators=[],
                      regularizations=[],
                      name="hid")
        o = ML.SoftmaxClassifier(2,
                                 decorators=[],
                                 learningScenario=ls,
                                 costObject=cost,
                                 name="out",
                                 regularizations=[])

        self.model = i > ichan > c1 > c2 > f > h > o
Ejemplo n.º 21
0
	nbFilters = 15,
	filterHeight = 3,
	filterWidth = 3,
	activation = MA.Max_norm(),
	pooler = maxPool,
	name = "conv2"
)
fa = MCONV.Flatten(name="flata")
fb = MCONV.Flatten(name="flatb")
f = MCONV.Flatten(name = "flat")

h = ML.Hidden(2048, activation = MA.Max_norm(), decorators = [MD.BinomialDropout(0.75)], regularizations = [], name = "hid" )
passa = ML.Hidden(1500, activation = MA.Pass(), decorators = [MD.BinomialDropout(0.5)], regularizations = [], name = "pass1" )
passb = ML.Hidden(1500, activation = MA.Pass(), decorators = [MD.BinomialDropout(0.5)], regularizations = [], name = "pass2" )
h2 = ML.Hidden(2048, activation = MA.Max_norm(), decorators = [MD.BinomialDropout(0.75)], regularizations = [], name = "hid2" )
o = ML.SoftmaxClassifier(2, decorators = [], learningScenario = ls, costObject = cost, name = "out", regularizations = [] )

model = i > c1 > c3 > c2 > f > h > h2 > o
c1 > fa > passa > h > h2 > o
c2 > fb > passb >h > h2 > o

tscore = []
vscore = []
tdata = load_data(trainfile)
vdata = load_data(validfile)
vdata = (center(vdata[0]),vdata[1])        
test = load_data(testfile)
test = center(test)

epoch = 0
permutate = [i for i in xrange(len(tdata[0]))]
Ejemplo n.º 22
0
MSET.VERBOSE = False

#The first 3 and the last 3 should end up diametrically opposed
data = [[0, 0], [1, 0], [2, 0], [3, 0], [4, 0], [5, 0]]
targets = [0, 0, 0, 1, 1, 1]

ls = MS.GradientDescent(lr=0.5)
cost = MC.NegativeLogLikelihood()

emb = ML.Embedding(2,
                   nbDimentions=2,
                   dictSize=len(data),
                   learningScenario=ls,
                   name="emb")
o = ML.SoftmaxClassifier(2,
                         learningScenario=MS.Fixed(),
                         costObject=cost,
                         name="out")
net = emb > o

miniBatchSize = 2
print "before:"
net.init()

print emb.getEmbeddings()

for i in xrange(2000):
    for i in xrange(0, len(data), miniBatchSize):
        net.train(o,
                  emb=data[i:i + miniBatchSize],
                  targets=targets[i:i + miniBatchSize])
Ejemplo n.º 23
0
* saves a log if the process dies unexpectedly
* training results and hyper parameters values are recorded to a file
* allows you to define custom stop criteria
* training info is printed at each epoch, including best scores and at which epoch they were achieved

"""

if __name__ == "__main__":

	# Let's define the network
	ls = MS.GradientDescent(lr=0.01)
	cost = MC.NegativeLogLikelihood()

	i = ML.Input(28 * 28, name='inp')
	h = ML.Hidden(500, activation=MA.Tanh(), decorators=[MD.GlorotTanhInit()], regularizations=[MR.L1(0), MR.L2(0.0001)], name="hid")
	o = ML.SoftmaxClassifier(10, learningScenario=ls, costObject=cost, name="out", regularizations=[MR.L1(0), MR.L2(0.0001)])

	mlp = i > h > o

	mlp.saveDOT("mnist_mlp")
	mlp.saveHTML("mnist_mlp")
	# And then map sets to the inputs and outputs of our network
	train_set, validation_set, test_set = load_mnist()

	trainData = MDM.Series(images=train_set[0], numbers=train_set[1])
	trainMaps = MDM.DatasetMapper()
	trainMaps.mapInput(i, trainData.images)
	trainMaps.mapOutput(o, trainData.numbers)

	testData = MDM.Series(images=test_set[0], numbers=test_set[1])
	testMaps = MDM.DatasetMapper()
Ejemplo n.º 24
0
            data[i][start:start+patternSize] = patternC2
            targets.append(2)
        else :
            targets.append(0)

    targets = numpy.asarray(targets, dtype=theano.config.floatX)
    return data, targets

if __name__ == "__main__" :
    examples, targets = makeDataset(300, 100, 10)

    ls = MS.GradientDescent(lr = 0.01)
    cost = MC.NegativeLogLikelihood()

    i = ML.Input(100, 'inp')
    h1 = ML.Hidden(50, activation = MA.ReLU(), decorators = [MD.BatchNormalization()])
    h2 = ML.Hidden(2, activation = MA.Softmax())
    o = ML.SoftmaxClassifier(3, learningScenario = ls, costObject = cost, name = "out")

    mlp = i > h1 > h2 > o

    for k in xrange(100) :
        for example, target in zip(examples, targets) :
            mlp.train(o, inp=[example], targets=[target])
    
    nbErr = 0
    for example, target in zip(examples, targets) :
        if target != mlp.classify(o, inp=[example])["class"] :
            nbErr += 1

    print "Nb Errors: %s/%s (%s%%) " % (nbErr, len(targets), float(nbErr)/len(targets) * 100)