def test_composite(self): ls = MS.GradientDescent(lr=0.1) cost = MC.NegativeLogLikelihood() inp = ML.Input(2, 'inp') h1 = ML.Hidden(5, activation=MA.Tanh(), name="h1") h2 = ML.Hidden(5, activation=MA.Tanh(), name="h2") o = ML.SoftmaxClassifier(2, learningScenario=ls, costObject=cost, name="out") c = ML.Composite(name="Comp") inp > h1 > c inp > h2 > c mlp = c > o for i in xrange(10000): ii = i % len(self.xor_ins) mlp.train(o, inp=[self.xor_ins[ii]], targets=[self.xor_outs[ii]]) self.assertEqual(mlp.predict(o, inp=[self.xor_ins[0]])["class"], 0) self.assertEqual(mlp.predict(o, inp=[self.xor_ins[1]])["class"], 1) self.assertEqual(mlp.predict(o, inp=[self.xor_ins[2]])["class"], 1) self.assertEqual(mlp.predict(o, inp=[self.xor_ins[3]])["class"], 0)
def test_concatenation(self): ls = MS.GradientDescent(lr=0.1) cost = MC.NegativeLogLikelihood() inp = ML.Input(2, 'inp') h1 = ML.Hidden(5, activation=MA.Tanh(), name="h1") h2 = ML.Hidden(5, activation=MA.Tanh(), name="h2") o = ML.SoftmaxClassifier(nbClasses=2, cost=cost, learningScenari=[ls], name="out") inp > h1 inp > h2 c = ML.C([h1, h2], name="concat") mlp = c > o mlp.init() self.assertEqual(c.getIntrinsicShape()[0], h1.getIntrinsicShape()[0] + h2.getIntrinsicShape()[0]) for i in xrange(10000): ii = i % len(self.xor_ins) miniBatch = [self.xor_ins[ii]] targets = [self.xor_outs[ii]] mlp["out"].train({ "inp.inputs": miniBatch, "out.targets": targets })["out.drive.train"] for i in xrange(len(self.xor_ins)): self.assertEqual( mlp["out"].predict["test"]({ "inp.inputs": [self.xor_ins[i]] })["out.predict.test"], self.xor_outs[i])
def __init__(self, ls, cost): maxPool = MCONV.MaxPooling2D(2, 2) #The input channeler will take regular layers and arrange them into several channels i = MCONV.Input(nbChannels=3, height=256, width=256, name='inp') #ichan = MCONV.InputChanneler(256, 256, name = 'inpChan') c1 = MCONV.Convolution2D(nbFilters=3, filterHeight=5, filterWidth=5, activation=MA.Tanh(), pooler=maxPool, name="conv1") c2 = MCONV.Convolution2D(nbFilters=3, filterHeight=5, filterWidth=5, activation=MA.Tanh(), pooler=maxPool, name="conv2") f = MCONV.Flatten(name="flat") h = ML.Hidden(5, activation=MA.Tanh(), decorators=[MD.GlorotTanhInit()], regularizations=[MR.L1(0), MR.L2(0.0001)], name="hid") o = ML.SoftmaxClassifier(2, decorators=[], learningScenario=ls, costObject=cost, name="out", regularizations=[]) self.model = i > c1 > c2 > f > h > o
def ae2(data): """This one uses an Autoencode layer. This layer is a part of the graph and does not need a specific traget""" miniBatchSize = 1 ls = MS.GradientDescent(lr=0.1) cost = MC.MeanSquaredError() i = ML.Input(8, name='inp') h = ML.Hidden(3, activation=MA.ReLU(), initializations=[MI.SmallUniformWeights(), MI.ZeroBias()], name="hid") o = ML.Autoencode( i.name, activation=MA.ReLU(), initializations=[MI.SmallUniformWeights(), MI.ZeroBias()], learningScenario=ls, costObject=cost, name="out") ae = i > h > o # ae.init() # o.train.printGraph() for e in xrange(2000): for i in xrange(0, len(data), miniBatchSize): ae.train(o, inp=data[i:i + miniBatchSize]) return ae, o
def getModel(inpSize, filterWidth): ls = MS.GradientDescent(lr=0.5) cost = MC.NegativeLogLikelihood() i = ML.Input((1, 1, inpSize), name='inp') c1 = MCONV.Convolution2D(numFilters=5, filterHeight=1, filterWidth=filterWidth, activation=MA.ReLU(), name="conv1") pool1 = MSAMP.MaxPooling2D(poolHeight=1, poolWidth=2, name="pool1") c2 = MCONV.Convolution2D(numFilters=10, filterHeight=1, filterWidth=filterWidth, activation=MA.ReLU(), name="conv2") pool2 = MSAMP.MaxPooling2D(poolHeight=1, poolWidth=2, name="pool2") h = ML.Hidden(5, activation=MA.ReLU(), name="hid") o = ML.SoftmaxClassifier(nbClasses=2, cost=cost, learningScenari=[ls], name="out") model = i > c1 > pool1 > c2 > pool2 > h > o model.init() return model
def test_multiinputs(self): ls = MS.GradientDescent(lr=0.1) inpA = ML.Embedding(2, 2, 2, name="IA") inpB = ML.Input(2, name="IB") inpNexus = ML.Composite(name="InputNexus") h1 = ML.Hidden(32, activation=MA.ReLU(), decorators=[], regularizations=[], name="Fully-connected1") o = ML.Regression(2, decorators=[], activation=MA.ReLU(), learningScenario=ls, costObject=MC.CrossEntropy(), name="Out", regularizations=[]) inpA > inpNexus inpB > inpNexus m = inpNexus > h1 > o m.init()
def __init__(self, ls, cost): maxPool = MCONV.MaxPooling2D(2, 2) i = MCONV.Input(nbChannels=1, height=28, width=28, name='inp') c1 = MCONV.Convolution2D(nbFilters=20, filterHeight=5, filterWidth=5, activation=MA.Tanh(), pooler=maxPool, name="conv1") c2 = MCONV.Convolution2D(nbFilters=50, filterHeight=5, filterWidth=5, activation=MA.Tanh(), pooler=maxPool, name="conv2") #needed for the transition to a fully connected layer f = MCONV.Flatten(name="flat") h = ML.Hidden(500, activation=MA.Tanh(), decorators=[], regularizations=[], name="hid") o = ML.SoftmaxClassifier(10, decorators=[], learningScenario=ls, costObject=cost, name="out", regularizations=[]) self.model = i > c1 > c2 > f > h > o print self.model
def test_ae(self) : data = [] for i in xrange(8) : zeros = numpy.zeros(8) zeros[i] = 1 data.append(zeros) ls = MS.GradientDescent(lr = 0.1) cost = MC.MeanSquaredError() i = ML.Input(8, name = 'inp') h = ML.Hidden(3, activation = MA.ReLU(), initializations=[MI.SmallUniformWeights(), MI.ZeroBias()], name = "hid") o = ML.Autoencode(targetLayerName = "inp", activation = MA.ReLU(), initializations=[MI.SmallUniformWeights(), MI.ZeroBias()], learningScenario = ls, costObject = cost, name = "out" ) ae = i > h > o miniBatchSize = 1 for e in xrange(2000) : for i in xrange(0, len(data), miniBatchSize) : ae.train(o, inp = data[i:i+miniBatchSize]) res = ae.propagate(o, inp = data)["outputs"] for i in xrange(len(res)) : self.assertEqual( numpy.argmax(data[i]), numpy.argmax(res[i]))
def test_ae(self): data = [] for i in xrange(8): zeros = numpy.zeros(8) zeros[i] = 1 data.append(zeros) ls = MS.GradientDescent(lr=0.1) cost = MC.MeanSquaredError() i = ML.Input(8, name='inp') h = ML.Hidden(3, activation=MA.ReLU(), name="hid") o = ML.Regression(8, activation=MA.ReLU(), learningScenario=ls, costObject=cost, name="out") ae = i > h > o miniBatchSize = 2 for e in xrange(2000): for i in xrange(0, len(data), miniBatchSize): ae.train(o, inp=data[i:i + miniBatchSize], targets=data[i:i + miniBatchSize]) res = ae.propagate(o, inp=data)["outputs"] for i in xrange(len(res)): self.assertEqual(numpy.argmax(data[i]), numpy.argmax(res[i]))
def getModel(inpSize, filterWidth) : ls = MS.GradientDescent(lr = 0.5) cost = MC.NegativeLogLikelihood() pooler = MCONV.MaxPooling2D(1, 2) i = ML.Input(inpSize, name = 'inp') ichan = MCONV.InputChanneler(1, inpSize, name = 'inpChan') c1 = MCONV.Convolution2D( nbFilters = 5, filterHeight = 1, filterWidth = filterWidth, activation = MA.ReLU(), pooler = pooler, name = "conv1" ) c2 = MCONV.Convolution2D( nbFilters = 10, filterHeight = 1, filterWidth = filterWidth, activation = MA.ReLU(), pooler = pooler, name = "conv2" ) f = MCONV.Flatten(name = "flat") h = ML.Hidden(5, activation = MA.ReLU(), decorators = [], regularizations = [ ], name = "hid" ) o = ML.SoftmaxClassifier(2, decorators = [], learningScenario = ls, costObject = cost, name = "out", regularizations = [ ] ) model = i > ichan > c1 > c2 > f > h > o return model
def ae1(data): '''Using a regression layer. This layer needs an explicit target''' miniBatchSize = 2 ls = MS.GradientDescent(lr=0.1) cost = MC.MeanSquaredError() i = ML.Input(8, name='inp') h = ML.Hidden(3, activation=MA.ReLU(), initializations=[MI.SmallUniformWeights(), MI.ZeroBias()], name="hid") o = ML.Regression( 8, activation=MA.ReLU(), initializations=[MI.SmallUniformWeights(), MI.ZeroBias()], learningScenario=ls, costObject=cost, name="out") ae = i > h > o for e in xrange(1000): for i in xrange(0, len(data), miniBatchSize): ae.train(o, inp=data[i:i + miniBatchSize], targets=data[i:i + miniBatchSize]) return ae, o
def test_save_load_64h(self): import os import Mariana.network as MN ls = MS.GradientDescent(lr=0.1) cost = MC.NegativeLogLikelihood() i = ML.Input(2, 'inp') o = ML.SoftmaxClassifier(nbClasses=2, cost=cost, learningScenari=[ls], name="out") prev = i for i in xrange(64): h = ML.Hidden(size=10, activation=MA.ReLU(), name="Hidden_%s" % i) prev > h prev = h mlp = prev > o mlp.init() mlp.save("test_save") mlp2 = MN.loadModel("test_save.mar") mlp2.init() v1 = mlp["out"].propagate["test"]({ "inp.inputs": self.xor_ins })["out.propagate.test"] v2 = mlp2["out"].propagate["test"]({ "inp.inputs": self.xor_ins })["out.propagate.test"] self.assertTrue((v1 == v2).all()) os.remove('test_save.mar')
def test_optimizer_override(self): ls = MS.GradientDescent(lr=0.5) cost = MC.NegativeLogLikelihood() inp = ML.Input(1, 'inp') h = ML.Hidden(5, activation=MA.Tanh(), learningScenari=[MS.Fixed("b")], name="h") o = ML.SoftmaxClassifier( 2, learningScenari=[MS.GradientDescent(lr=0.5), MS.Fixed("W")], cost=cost, name="out") net = inp > h > o net.init() ow = o.getP('W').getValue() ob = o.getP('b').getValue() hw = h.getP('W').getValue() hb = h.getP('b').getValue() for x in xrange(1, 10): net["out"].train({ "inp.inputs": [[1]], "out.targets": [1] })["out.drive.train"] self.assertTrue(sum(ow[0]) == sum(o.getP('W').getValue()[0])) self.assertTrue(sum(ob) != sum(o.getP('b').getValue())) self.assertTrue(sum(hb) == sum(h.getP('b').getValue())) self.assertTrue(sum(hw[0]) != sum(h.getP('W').getValue()[0]))
def getMLP(self, nbInputs=2, nbClasses=2): ls = MS.GradientDescent(lr=0.1) cost = MC.NegativeLogLikelihood() i = ML.Input(nbInputs, 'inp') h = ML.Hidden(size=6, activation=MA.ReLU(), name="Hidden_0.500705866892") o = ML.SoftmaxClassifier(nbClasses=nbClasses, cost=cost, learningScenari=[ls], name="out") mlp = i > h > o mlp.init() return mlp
def test_ae_reg(self): powerOf2 = 3 nbUnits = 2**powerOf2 data = [] for i in xrange(nbUnits): zeros = numpy.zeros(nbUnits) zeros[i] = 1 data.append(zeros) ls = MS.GradientDescent(lr=0.1) cost = MC.MeanSquaredError() i = ML.Input(nbUnits, name='inp') h = ML.Hidden(powerOf2, activation=MA.ReLU(), initializations=[ MI.Uniform('W', small=True), MI.SingleValue('b', 0) ], name="hid") o = ML.Regression(nbUnits, activation=MA.ReLU(), initializations=[ MI.Uniform('W', small=True), MI.SingleValue('b', 0) ], learningScenari=[ls], cost=cost, name="out") ae = i > h > o ae.init() miniBatchSize = 1 for e in xrange(2000): for i in xrange(0, len(data), miniBatchSize): miniBatch = data[i:i + miniBatchSize] ae["out"].train({ "inp.inputs": miniBatch, "out.targets": miniBatch })["out.drive.train"] res = ae["out"].propagate["test"]({ "inp.inputs": data })["out.propagate.test"] for i in xrange(len(res)): self.assertEqual(numpy.argmax(data[i]), numpy.argmax(res[i]))
def trainMLP_xor(self) : ls = MS.GradientDescent(lr = 0.1) cost = MC.NegativeLogLikelihood() i = ML.Input(2, 'inp') h = ML.Hidden(10, activation = MA.ReLU(), name = "Hidden_0.500705866892") o = ML.SoftmaxClassifier(2, learningScenario = ls, costObject = cost, name = "out") mlp = i > h > o self.xor_ins = numpy.array(self.xor_ins) self.xor_outs = numpy.array(self.xor_outs) for i in xrange(1000) : mlp.train(o, inp = self.xor_ins, targets = self.xor_outs ) return mlp
def MLP(ls, cost): i = ML.Input(28 * 28, name='inp') h = ML.Hidden(500, activation=MA.Tanh(), decorators=[MD.GlorotTanhInit()], regularizations=[MR.L1(0), MR.L2(0.0001)], name="hid") o = ML.SoftmaxClassifier(10, decorators=[MD.ZerosInit()], learningScenario=ls, costObject=cost, name="out", regularizations=[MR.L1(0), MR.L2(0.0001)]) mlp = i > h > o return mlp
def __init__(self, ls, cost): maxPool = MCONV.MaxPooling2D(3, 3) i = MCONV.Input(nbChannels=1, height=100, width=100, name='inp') c1 = MCONV.Convolution2D(nbFilters=10, filterHeight=3, filterWidth=3, activation=MA.Max_norm(), pooler=maxPool, name="conv1") c3 = MCONV.Convolution2D(nbFilters=20, filterHeight=3, filterWidth=3, activation=MA.Max_norm(), pooler=maxPool, name="conv3") c2 = MCONV.Convolution2D(nbFilters=10, filterHeight=3, filterWidth=3, activation=MA.Max_norm(), pooler=maxPool, name="conv2") f = MCONV.Flatten(name="flat") h = ML.Hidden(2048, activation=MA.Max_norm(), decorators=[MD.BinomialDropout(0.7)], regularizations=[], name="hid") o = ML.SoftmaxClassifier(2, decorators=[], learningScenario=ls, costObject=cost, name="out", regularizations=[]) self.model = i > c1 > c3 > c2 > f > h > o
def trainMLP_xor(self): ls = MS.GradientDescent(lr=0.1) cost = MC.NegativeLogLikelihood() i = ML.Input(2, 'inp') h = ML.Hidden(4, activation=MA.Tanh(), decorators=[dec.GlorotTanhInit()], regularizations=[MR.L1(0), MR.L2(0)]) o = ML.SoftmaxClassifier(2, learningScenario=ls, costObject=cost, name="out") mlp = i > h > o self.xor_ins = numpy.array(self.xor_ins) self.xor_outs = numpy.array(self.xor_outs) for i in xrange(1000): ii = i % len(self.xor_ins) mlp.train(o, inp=[self.xor_ins[ii]], targets=[self.xor_outs[ii]]) return mlp
def __init__(self, inputSize, dictSize, patternSize, embSize, ls, cost): # pooler = MCONV.NoPooling() pooler = MCONV.MaxPooling2D(1, 2) emb = MCONV.Embedding(size=inputSize, nbDimentions=embSize, dictSize=dictSize, name='Emb') c1 = MCONV.Convolution2D(nbFilters=1, filterHeight=1, filterWidth=patternSize / 2, activation=MA.ReLU(), pooler=pooler, name="conv1") c2 = MCONV.Convolution2D(nbFilters=4, filterHeight=1, filterWidth=patternSize / 2, activation=MA.ReLU(), pooler=MCONV.NoPooling(), name="conv2") f = MCONV.Flatten(name="flat") h = ML.Hidden(5, activation=MA.ReLU(), decorators=[], regularizations=[], name="hid") o = ML.SoftmaxClassifier(2, decorators=[], learningScenario=ls, costObject=cost, name="out", regularizations=[]) self.model = emb > c1 > c2 > f > h > o
def __init__(self, inputSize, patternSize, ls, cost): # pooler = MCONV.NoPooling() pooler = MCONV.MaxPooling2D(1, 2) #The input channeler will take regular layers and arrange them into several channels i = ML.Input(inputSize, name='inp') ichan = MCONV.InputChanneler(1, inputSize, name='inpChan') c1 = MCONV.Convolution2D(nbFilters=5, filterHeight=1, filterWidth=patternSize / 2, activation=MA.ReLU(), pooler=pooler, name="conv1") c2 = MCONV.Convolution2D(nbFilters=10, filterHeight=1, filterWidth=patternSize / 2, activation=MA.ReLU(), pooler=MCONV.NoPooling(), name="conv2") f = MCONV.Flatten(name="flat") h = ML.Hidden(5, activation=MA.ReLU(), decorators=[], regularizations=[], name="hid") o = ML.SoftmaxClassifier(2, decorators=[], learningScenario=ls, costObject=cost, name="out", regularizations=[]) self.model = i > ichan > c1 > c2 > f > h > o
* automatically saves the model if the training halts because of an error or if the process is killed * saves a log if the process dies unexpectedly * training results and hyper parameters values are recorded to a file * allows you to define custom stop criteria * training info is printed at each epoch, including best scores and at which epoch they were achieved """ if __name__ == "__main__": # Let's define the network ls = MS.GradientDescent(lr=0.01) cost = MC.NegativeLogLikelihood() i = ML.Input(28 * 28, name='inp') h = ML.Hidden(500, activation=MA.Tanh(), decorators=[MD.GlorotTanhInit()], regularizations=[MR.L1(0), MR.L2(0.0001)], name="hid") o = ML.SoftmaxClassifier(10, learningScenario=ls, costObject=cost, name="out", regularizations=[MR.L1(0), MR.L2(0.0001)]) mlp = i > h > o mlp.saveDOT("mnist_mlp") mlp.saveHTML("mnist_mlp") # And then map sets to the inputs and outputs of our network train_set, validation_set, test_set = load_mnist() trainData = MDM.Series(images=train_set[0], numbers=train_set[1]) trainMaps = MDM.DatasetMapper() trainMaps.mapInput(i, trainData.images) trainMaps.mapOutput(o, trainData.numbers) testData = MDM.Series(images=test_set[0], numbers=test_set[1])
data[i][start:start+patternSize] = patternC2 targets.append(2) else : targets.append(0) targets = numpy.asarray(targets, dtype=theano.config.floatX) return data, targets if __name__ == "__main__" : examples, targets = makeDataset(300, 100, 10) ls = MS.GradientDescent(lr = 0.01) cost = MC.NegativeLogLikelihood() i = ML.Input(100, 'inp') h1 = ML.Hidden(50, activation = MA.ReLU(), decorators = [MD.BatchNormalization()]) h2 = ML.Hidden(2, activation = MA.Softmax()) o = ML.SoftmaxClassifier(3, learningScenario = ls, costObject = cost, name = "out") mlp = i > h1 > h2 > o for k in xrange(100) : for example, target in zip(examples, targets) : mlp.train(o, inp=[example], targets=[target]) nbErr = 0 for example, target in zip(examples, targets) : if target != mlp.classify(o, inp=[example])["class"] : nbErr += 1 print "Nb Errors: %s/%s (%s%%) " % (nbErr, len(targets), float(nbErr)/len(targets) * 100)
* automatically saves the model if the training halts because of an error or if the process is killed * saves a log if the process dies unexpectedly * training results and hyper parameters values are recorded to a file * allows you to define custom stop criteria * training info is printed at each epoch, including best scores and at which epoch they were achieved """ if __name__ == "__main__": # Let's define the network ls = MS.GradientDescent(lr=0.01) cost = MC.NegativeLogLikelihood() i = ML.Input(28 * 28, name='inp') h = ML.Hidden(500, activation=MA.Tanh(), initializations=[MI.GlorotTanhInit(), MI.ZeroBias()], regularizations=[MR.L1(0), MR.L2(0.0001)], name="hid") o = ML.SoftmaxClassifier(10, learningScenario=ls, costObject=cost, name="out", regularizations=[MR.L1(0), MR.L2(0.0001)]) mlp = i > h > o mlp.saveDOT("mnist_mlp") mlp.saveHTML("mnist_mlp") # And then map sets to the inputs and outputs of our network train_set, validation_set, test_set = load_mnist() trainData = MDM.Series(images=train_set[0], numbers=train_set[1]) trainMaps = MDM.DatasetMapper("train", miniBatchSize=500) trainMaps.mapInput(i, trainData.images) trainMaps.mapOutput(o, trainData.numbers) testData = MDM.Series(images=test_set[0], numbers=test_set[1])
name = "conv3" ) c2 = MCONV.Convolution2D( nbFilters = 15, filterHeight = 3, filterWidth = 3, activation = MA.Max_norm(), pooler = maxPool, name = "conv2" ) fa = MCONV.Flatten(name="flata") fb = MCONV.Flatten(name="flatb") f = MCONV.Flatten(name = "flat") h = ML.Hidden(2048, activation = MA.Max_norm(), decorators = [MD.BinomialDropout(0.75)], regularizations = [], name = "hid" ) passa = ML.Hidden(1500, activation = MA.Pass(), decorators = [MD.BinomialDropout(0.5)], regularizations = [], name = "pass1" ) passb = ML.Hidden(1500, activation = MA.Pass(), decorators = [MD.BinomialDropout(0.5)], regularizations = [], name = "pass2" ) h2 = ML.Hidden(2048, activation = MA.Max_norm(), decorators = [MD.BinomialDropout(0.75)], regularizations = [], name = "hid2" ) o = ML.SoftmaxClassifier(2, decorators = [], learningScenario = ls, costObject = cost, name = "out", regularizations = [] ) model = i > c1 > c3 > c2 > f > h > h2 > o c1 > fa > passa > h > h2 > o c2 > fb > passb >h > h2 > o tscore = [] vscore = [] tdata = load_data(trainfile) vdata = load_data(validfile) vdata = (center(vdata[0]),vdata[1]) test = load_data(testfile)