Пример #1
0
    def test_ae(self) :

        data = []
        for i in xrange(8) :
            zeros = numpy.zeros(8)
            zeros[i] = 1
            data.append(zeros)

        ls = MS.GradientDescent(lr = 0.1)
        cost = MC.MeanSquaredError()

        i = ML.Input(8, name = 'inp')
        h = ML.Hidden(3, activation = MA.ReLU(), initializations=[MI.SmallUniformWeights(), MI.ZeroBias()], name = "hid")
        o = ML.Autoencode(targetLayerName = "inp", activation = MA.ReLU(), initializations=[MI.SmallUniformWeights(), MI.ZeroBias()], learningScenario = ls, costObject = cost, name = "out" )

        ae = i > h > o

        miniBatchSize = 1
        for e in xrange(2000) :
            for i in xrange(0, len(data), miniBatchSize) :
                ae.train(o, inp = data[i:i+miniBatchSize])

        res = ae.propagate(o, inp = data)["outputs"]
        for i in xrange(len(res)) :
            self.assertEqual( numpy.argmax(data[i]), numpy.argmax(res[i]))
Пример #2
0
def ae2(data):
    """This one uses an Autoencode layer. This layer is a part of the graph and does not need a specific traget"""

    miniBatchSize = 1

    ls = MS.GradientDescent(lr=0.1)
    cost = MC.MeanSquaredError()

    i = ML.Input(8, name='inp')
    h = ML.Hidden(3,
                  activation=MA.ReLU(),
                  initializations=[MI.SmallUniformWeights(),
                                   MI.ZeroBias()],
                  name="hid")
    o = ML.Autoencode(
        i.name,
        activation=MA.ReLU(),
        initializations=[MI.SmallUniformWeights(),
                         MI.ZeroBias()],
        learningScenario=ls,
        costObject=cost,
        name="out")

    ae = i > h > o
    # ae.init()
    # o.train.printGraph()
    for e in xrange(2000):
        for i in xrange(0, len(data), miniBatchSize):
            ae.train(o, inp=data[i:i + miniBatchSize])

    return ae, o
Пример #3
0
def ae1(data):
    '''Using a regression layer. This layer needs an explicit target'''

    miniBatchSize = 2

    ls = MS.GradientDescent(lr=0.1)
    cost = MC.MeanSquaredError()

    i = ML.Input(8, name='inp')
    h = ML.Hidden(3,
                  activation=MA.ReLU(),
                  initializations=[MI.SmallUniformWeights(),
                                   MI.ZeroBias()],
                  name="hid")
    o = ML.Regression(
        8,
        activation=MA.ReLU(),
        initializations=[MI.SmallUniformWeights(),
                         MI.ZeroBias()],
        learningScenario=ls,
        costObject=cost,
        name="out")

    ae = i > h > o

    for e in xrange(1000):
        for i in xrange(0, len(data), miniBatchSize):
            ae.train(o,
                     inp=data[i:i + miniBatchSize],
                     targets=data[i:i + miniBatchSize])

    return ae, o
Пример #4
0
    def test_multiinputs(self):
        ls = MS.GradientDescent(lr=0.1)

        inpA = ML.Embedding(2, 2, 2, name="IA")
        inpB = ML.Input(2, name="IB")
        inpNexus = ML.Composite(name="InputNexus")

        h1 = ML.Hidden(32,
                       activation=MA.ReLU(),
                       decorators=[],
                       regularizations=[],
                       name="Fully-connected1")

        o = ML.Regression(2,
                          decorators=[],
                          activation=MA.ReLU(),
                          learningScenario=ls,
                          costObject=MC.CrossEntropy(),
                          name="Out",
                          regularizations=[])

        inpA > inpNexus
        inpB > inpNexus
        m = inpNexus > h1 > o
        m.init()
Пример #5
0
    def test_ae(self):

        data = []
        for i in xrange(8):
            zeros = numpy.zeros(8)
            zeros[i] = 1
            data.append(zeros)

        ls = MS.GradientDescent(lr=0.1)
        cost = MC.MeanSquaredError()

        i = ML.Input(8, name='inp')
        h = ML.Hidden(3, activation=MA.ReLU(), name="hid")
        o = ML.Regression(8,
                          activation=MA.ReLU(),
                          learningScenario=ls,
                          costObject=cost,
                          name="out")

        ae = i > h > o

        miniBatchSize = 2
        for e in xrange(2000):
            for i in xrange(0, len(data), miniBatchSize):
                ae.train(o,
                         inp=data[i:i + miniBatchSize],
                         targets=data[i:i + miniBatchSize])

        res = ae.propagate(o, inp=data)["outputs"]
        for i in xrange(len(res)):
            self.assertEqual(numpy.argmax(data[i]), numpy.argmax(res[i]))
Пример #6
0
        def getModel(inpSize, filterWidth) :
            ls = MS.GradientDescent(lr = 0.5)
            cost = MC.NegativeLogLikelihood()
            
            pooler = MCONV.MaxPooling2D(1, 2)

            i = ML.Input(inpSize, name = 'inp')
            ichan = MCONV.InputChanneler(1, inpSize, name = 'inpChan')
            
            c1 = MCONV.Convolution2D( 
                nbFilters = 5,
                filterHeight = 1,
                filterWidth = filterWidth,
                activation = MA.ReLU(),
                pooler = pooler,
                name = "conv1"
            )

            c2 = MCONV.Convolution2D( 
                nbFilters = 10,
                filterHeight = 1,
                filterWidth = filterWidth,
                activation = MA.ReLU(),
                pooler = pooler,
                name = "conv2"
            )

            f = MCONV.Flatten(name = "flat")
            h = ML.Hidden(5, activation = MA.ReLU(), decorators = [], regularizations = [ ], name = "hid" )
            o = ML.SoftmaxClassifier(2, decorators = [], learningScenario = ls, costObject = cost, name = "out", regularizations = [ ] )
            
            model = i > ichan > c1 > c2 > f > h > o
            return model
Пример #7
0
        def getModel(inpSize, filterWidth):
            ls = MS.GradientDescent(lr=0.5)
            cost = MC.NegativeLogLikelihood()

            i = ML.Input((1, 1, inpSize), name='inp')

            c1 = MCONV.Convolution2D(numFilters=5,
                                     filterHeight=1,
                                     filterWidth=filterWidth,
                                     activation=MA.ReLU(),
                                     name="conv1")

            pool1 = MSAMP.MaxPooling2D(poolHeight=1, poolWidth=2, name="pool1")

            c2 = MCONV.Convolution2D(numFilters=10,
                                     filterHeight=1,
                                     filterWidth=filterWidth,
                                     activation=MA.ReLU(),
                                     name="conv2")

            pool2 = MSAMP.MaxPooling2D(poolHeight=1, poolWidth=2, name="pool2")

            h = ML.Hidden(5, activation=MA.ReLU(), name="hid")
            o = ML.SoftmaxClassifier(nbClasses=2,
                                     cost=cost,
                                     learningScenari=[ls],
                                     name="out")

            model = i > c1 > pool1 > c2 > pool2 > h > o
            model.init()
            return model
Пример #8
0
    def test_ae_reg(self):
        powerOf2 = 3
        nbUnits = 2**powerOf2

        data = []
        for i in xrange(nbUnits):
            zeros = numpy.zeros(nbUnits)
            zeros[i] = 1
            data.append(zeros)

        ls = MS.GradientDescent(lr=0.1)
        cost = MC.MeanSquaredError()

        i = ML.Input(nbUnits, name='inp')
        h = ML.Hidden(powerOf2,
                      activation=MA.ReLU(),
                      initializations=[
                          MI.Uniform('W', small=True),
                          MI.SingleValue('b', 0)
                      ],
                      name="hid")
        o = ML.Regression(nbUnits,
                          activation=MA.ReLU(),
                          initializations=[
                              MI.Uniform('W', small=True),
                              MI.SingleValue('b', 0)
                          ],
                          learningScenari=[ls],
                          cost=cost,
                          name="out")

        ae = i > h > o
        ae.init()

        miniBatchSize = 1
        for e in xrange(2000):
            for i in xrange(0, len(data), miniBatchSize):
                miniBatch = data[i:i + miniBatchSize]
                ae["out"].train({
                    "inp.inputs": miniBatch,
                    "out.targets": miniBatch
                })["out.drive.train"]

        res = ae["out"].propagate["test"]({
            "inp.inputs": data
        })["out.propagate.test"]
        for i in xrange(len(res)):
            self.assertEqual(numpy.argmax(data[i]), numpy.argmax(res[i]))
Пример #9
0
    def test_save_load_pickle(self) :
        import os
        import Mariana.network as MN

        ls = MS.GradientDescent(lr = 0.1)
        cost = MC.NegativeLogLikelihood()

        i = ML.Input(2, 'inp')
        h = Hidden_layerRef(i, 10, activation = MA.ReLU(), name = "Hidden_0.500705866892")
        o = ML.SoftmaxClassifier(2, learningScenario = ls, costObject = cost, name = "out")

        mlp = i > h > o
        
        self.xor_ins = numpy.array(self.xor_ins)
        self.xor_outs = numpy.array(self.xor_outs)
        for i in xrange(1000) :
            mlp.train(o, inp = self.xor_ins, targets = self.xor_outs )

        mlp.save("test_save")
        mlp2 = MN.loadModel("test_save.mar.mdl.pkl")
        
        o = mlp.outputs.values()[0]
        
        v1 = mlp.propagate( o.name, inp = self.xor_ins )["outputs"]
        v2 = mlp2.propagate( o.name, inp = self.xor_ins )["outputs"]
        self.assertEqual(numpy.sum(v1), numpy.sum(v2))
        self.assertEqual(mlp["Hidden_0.500705866892"].otherLayer.name, mlp2["Hidden_0.500705866892"].otherLayer.name)
        
        os.remove('test_save.mar.mdl.pkl')
Пример #10
0
    def test_save_load_64h(self):
        import os
        import Mariana.network as MN

        ls = MS.GradientDescent(lr=0.1)
        cost = MC.NegativeLogLikelihood()

        i = ML.Input(2, 'inp')
        o = ML.SoftmaxClassifier(nbClasses=2,
                                 cost=cost,
                                 learningScenari=[ls],
                                 name="out")

        prev = i
        for i in xrange(64):
            h = ML.Hidden(size=10, activation=MA.ReLU(), name="Hidden_%s" % i)
            prev > h
            prev = h

        mlp = prev > o
        mlp.init()
        mlp.save("test_save")

        mlp2 = MN.loadModel("test_save.mar")
        mlp2.init()

        v1 = mlp["out"].propagate["test"]({
            "inp.inputs": self.xor_ins
        })["out.propagate.test"]
        v2 = mlp2["out"].propagate["test"]({
            "inp.inputs": self.xor_ins
        })["out.propagate.test"]
        self.assertTrue((v1 == v2).all())
        os.remove('test_save.mar')
Пример #11
0
	def __init__(self, ls, cost) :
		maxPool = MCONV.MaxPooling2D(3,3)
		i = MCONV.Input(nbChannels = 1, height = 100, width = 100, name = 'inp')

		ichan = MCONV.InputChanneler(100, 100, name = 'inpChan')
		
		c1 = MCONV.Convolution2D( 
			nbFilters = 1,
			filterHeight = 20,
			filterWidth = 20,
			activation = MA.ReLU(),
			pooler = maxPool,
			name = "conv1"
		)
		c3 = MCONV.Convolution2D( 
			nbFilters = 1,
			filterHeight = 5,
			filterWidth = 5,
			activation = MA.ReLU(),
			pooler = MCONV.NoPooling(),
			name = "conv3"
		)


		c2 = MCONV.Convolution2D( 
			nbFilters = 1,
			filterHeight = 5,
			filterWidth = 5,
			activation = MA.ReLU(),
			pooler = maxPool,
			name = "conv2"
		)

		f = MCONV.Flatten(name = "flat")
		h = ML.Hidden(1000, activation = MA.ReLU(), decorators = [MD.BinomialDropout(0.2)], regularizations = [ ], name = "hid" )
		o = ML.SoftmaxClassifier(2, decorators = [], learningScenario = ls, costObject = cost, name = "out", regularizations = [MR.L1(1e-7) ] )
		
		self.model = i > c1 > c2 > c3 > f > h > o
Пример #12
0
def getMLP(self, nbInputs=2, nbClasses=2):
    ls = MS.GradientDescent(lr=0.1)
    cost = MC.NegativeLogLikelihood()

    i = ML.Input(nbInputs, 'inp')
    h = ML.Hidden(size=6, activation=MA.ReLU(), name="Hidden_0.500705866892")
    o = ML.SoftmaxClassifier(nbClasses=nbClasses,
                             cost=cost,
                             learningScenari=[ls],
                             name="out")

    mlp = i > h > o
    mlp.init()
    return mlp
Пример #13
0
    def __init__(self, inputSize, dictSize, patternSize, embSize, ls, cost):
        # pooler = MCONV.NoPooling()
        pooler = MCONV.MaxPooling2D(1, 2)

        emb = MCONV.Embedding(size=inputSize,
                              nbDimentions=embSize,
                              dictSize=dictSize,
                              name='Emb')

        c1 = MCONV.Convolution2D(nbFilters=1,
                                 filterHeight=1,
                                 filterWidth=patternSize / 2,
                                 activation=MA.ReLU(),
                                 pooler=pooler,
                                 name="conv1")

        c2 = MCONV.Convolution2D(nbFilters=4,
                                 filterHeight=1,
                                 filterWidth=patternSize / 2,
                                 activation=MA.ReLU(),
                                 pooler=MCONV.NoPooling(),
                                 name="conv2")

        f = MCONV.Flatten(name="flat")
        h = ML.Hidden(5,
                      activation=MA.ReLU(),
                      decorators=[],
                      regularizations=[],
                      name="hid")
        o = ML.SoftmaxClassifier(2,
                                 decorators=[],
                                 learningScenario=ls,
                                 costObject=cost,
                                 name="out",
                                 regularizations=[])

        self.model = emb > c1 > c2 > f > h > o
Пример #14
0
    def __init__(self, inputSize, patternSize, ls, cost):
        # pooler = MCONV.NoPooling()
        pooler = MCONV.MaxPooling2D(1, 2)

        #The input channeler will take regular layers and arrange them into several channels
        i = ML.Input(inputSize, name='inp')
        ichan = MCONV.InputChanneler(1, inputSize, name='inpChan')

        c1 = MCONV.Convolution2D(nbFilters=5,
                                 filterHeight=1,
                                 filterWidth=patternSize / 2,
                                 activation=MA.ReLU(),
                                 pooler=pooler,
                                 name="conv1")

        c2 = MCONV.Convolution2D(nbFilters=10,
                                 filterHeight=1,
                                 filterWidth=patternSize / 2,
                                 activation=MA.ReLU(),
                                 pooler=MCONV.NoPooling(),
                                 name="conv2")

        f = MCONV.Flatten(name="flat")
        h = ML.Hidden(5,
                      activation=MA.ReLU(),
                      decorators=[],
                      regularizations=[],
                      name="hid")
        o = ML.SoftmaxClassifier(2,
                                 decorators=[],
                                 learningScenario=ls,
                                 costObject=cost,
                                 name="out",
                                 regularizations=[])

        self.model = i > ichan > c1 > c2 > f > h > o
Пример #15
0
    def trainMLP_xor(self) :
        ls = MS.GradientDescent(lr = 0.1)
        cost = MC.NegativeLogLikelihood()

        i = ML.Input(2, 'inp')
        h = ML.Hidden(10, activation = MA.ReLU(), name = "Hidden_0.500705866892")
        o = ML.SoftmaxClassifier(2, learningScenario = ls, costObject = cost, name = "out")

        mlp = i > h > o
        
        self.xor_ins = numpy.array(self.xor_ins)
        self.xor_outs = numpy.array(self.xor_outs)
        for i in xrange(1000) :
            mlp.train(o, inp = self.xor_ins, targets = self.xor_outs )

        return mlp
Пример #16
0
    def __init__(self,
                 size,
                 activation=MA.ReLU(),
                 learningScenario=None,
                 name=None,
                 regularizations=[],
                 **kwargs):
        Layer_ABC.__init__(self,
                           size,
                           activation=activation,
                           learningScenario=learningScenario,
                           name=name,
                           **kwargs)
        self.W = None
        self.b = None

        self.regularizationObjects = regularizations
        self.regularizations = []

        self.type = TYPE_HIDDEN_LAYER
Пример #17
0
            data[i][start:start+patternSize] = patternC2
            targets.append(2)
        else :
            targets.append(0)

    targets = numpy.asarray(targets, dtype=theano.config.floatX)
    return data, targets

if __name__ == "__main__" :
    examples, targets = makeDataset(300, 100, 10)

    ls = MS.GradientDescent(lr = 0.01)
    cost = MC.NegativeLogLikelihood()

    i = ML.Input(100, 'inp')
    h1 = ML.Hidden(50, activation = MA.ReLU(), decorators = [MD.BatchNormalization()])
    h2 = ML.Hidden(2, activation = MA.Softmax())
    o = ML.SoftmaxClassifier(3, learningScenario = ls, costObject = cost, name = "out")

    mlp = i > h1 > h2 > o

    for k in xrange(100) :
        for example, target in zip(examples, targets) :
            mlp.train(o, inp=[example], targets=[target])
    
    nbErr = 0
    for example, target in zip(examples, targets) :
        if target != mlp.classify(o, inp=[example])["class"] :
            nbErr += 1

    print "Nb Errors: %s/%s (%s%%) " % (nbErr, len(targets), float(nbErr)/len(targets) * 100)