def __init__(self, ls, cost): maxPool = MCONV.MaxPooling2D(2, 2) #The input channeler will take regular layers and arrange them into several channels i = MCONV.Input(nbChannels=3, height=256, width=256, name='inp') #ichan = MCONV.InputChanneler(256, 256, name = 'inpChan') c1 = MCONV.Convolution2D(nbFilters=3, filterHeight=5, filterWidth=5, activation=MA.Tanh(), pooler=maxPool, name="conv1") c2 = MCONV.Convolution2D(nbFilters=3, filterHeight=5, filterWidth=5, activation=MA.Tanh(), pooler=maxPool, name="conv2") f = MCONV.Flatten(name="flat") h = ML.Hidden(5, activation=MA.Tanh(), decorators=[MD.GlorotTanhInit()], regularizations=[MR.L1(0), MR.L2(0.0001)], name="hid") o = ML.SoftmaxClassifier(2, decorators=[], learningScenario=ls, costObject=cost, name="out", regularizations=[]) self.model = i > c1 > c2 > f > h > o
def Perceptron(ls, cost): i = ML.Input(28 * 28, name='inp') o = ML.SoftmaxClassifier(10, learningScenario=ls, costObject=cost, name="out", regularizations=[MR.L1(0), MR.L2(0)]) return i > o
def MLP(ls, cost): i = ML.Input(28 * 28, name='inp') h = ML.Hidden(500, activation=MA.Tanh(), decorators=[MD.GlorotTanhInit()], regularizations=[MR.L1(0), MR.L2(0.0001)], name="hid") o = ML.SoftmaxClassifier(10, decorators=[MD.ZerosInit()], learningScenario=ls, costObject=cost, name="out", regularizations=[MR.L1(0), MR.L2(0.0001)]) mlp = i > h > o return mlp
def trainMLP_xor(self): ls = MS.GradientDescent(lr=0.1) cost = MC.NegativeLogLikelihood() i = ML.Input(2, 'inp') h = ML.Hidden(10, activation=MA.ReLU(), regularizations=[MR.L1(0), MR.L2(0)], name="Hidden_0.500705866892") o = ML.SoftmaxClassifier(2, learningScenario=ls, costObject=cost, name="out") mlp = i > h > o self.xor_ins = numpy.array(self.xor_ins) self.xor_outs = numpy.array(self.xor_outs) for i in xrange(1000): mlp.train(o, inp=self.xor_ins, targets=self.xor_outs) return mlp
def trainMLP_xor(self): ls = MS.GradientDescent(lr=0.1) cost = MC.NegativeLogLikelihood() i = ML.Input(2, 'inp') h = ML.Hidden(4, activation=MA.Tanh(), decorators=[dec.GlorotTanhInit()], regularizations=[MR.L1(0), MR.L2(0)]) o = ML.SoftmaxClassifier(2, learningScenario=ls, costObject=cost, name="out") mlp = i > h > o self.xor_ins = numpy.array(self.xor_ins) self.xor_outs = numpy.array(self.xor_outs) for i in xrange(1000): ii = i % len(self.xor_ins) mlp.train(o, inp=[self.xor_ins[ii]], targets=[self.xor_outs[ii]]) return mlp
def test_save_load_pickle(self): import os import Mariana.network as MN ls = MS.GradientDescent(lr=0.1) cost = MC.NegativeLogLikelihood() i = ML.Input(2, 'inp') h = Hidden_layerRef(i, 10, activation=MA.ReLU(), regularizations=[MR.L1(0), MR.L2(0)], name="Hidden_0.500705866892") o = ML.SoftmaxClassifier(2, learningScenario=ls, costObject=cost, name="out") mlp = i > h > o self.xor_ins = numpy.array(self.xor_ins) self.xor_outs = numpy.array(self.xor_outs) for i in xrange(1000): mlp.train(o, inp=self.xor_ins, targets=self.xor_outs) mlp.save("test_save") mlp2 = MN.loadModel("test_save.mar.mdl.pkl") o = mlp.outputs.values()[0] v1 = mlp.propagate(o.name, inp=self.xor_ins)["outputs"] v2 = mlp2.propagate(o.name, inp=self.xor_ins)["outputs"] self.assertEqual(numpy.sum(v1), numpy.sum(v2)) self.assertEqual(mlp["Hidden_0.500705866892"].otherLayer.name, mlp2["Hidden_0.500705866892"].otherLayer.name) os.remove('test_save.mar.mdl.pkl')
* automatically saves the model if the training halts because of an error or if the process is killed * saves a log if the process dies unexpectedly * training results and hyper parameters values are recorded to a file * allows you to define custom stop criteria * training info is printed at each epoch, including best scores and at which epoch they were achieved """ if __name__ == "__main__": # Let's define the network ls = MS.GradientDescent(lr=0.01) cost = MC.NegativeLogLikelihood() i = ML.Input(28 * 28, name='inp') h = ML.Hidden(500, activation=MA.Tanh(), decorators=[MD.GlorotTanhInit()], regularizations=[MR.L1(0), MR.L2(0.0001)], name="hid") o = ML.SoftmaxClassifier(10, learningScenario=ls, costObject=cost, name="out", regularizations=[MR.L1(0), MR.L2(0.0001)]) mlp = i > h > o mlp.saveDOT("mnist_mlp") mlp.saveHTML("mnist_mlp") # And then map sets to the inputs and outputs of our network train_set, validation_set, test_set = load_mnist() trainData = MDM.Series(images=train_set[0], numbers=train_set[1]) trainMaps = MDM.DatasetMapper() trainMaps.mapInput(i, trainData.images) trainMaps.mapOutput(o, trainData.numbers) testData = MDM.Series(images=test_set[0], numbers=test_set[1])
f = gzip.open(dataset, 'rb') return cPickle.load(f) if __name__ == "__main__": #Let's define the network ls = MS.DefaultScenario(lr=0.01, momentum=0) cost = MC.NegativeLogLikelihood() i = ML.Input(28 * 28, name='inp') h = ML.Hidden(500, activation=MA.tanh, decorators=[MD.GlorotTanhInit()], regularizations=[MR.L1(0), MR.L2(0.0001)], name="hid") o = ML.SoftmaxClassifier(10, learningScenario=ls, costObject=cost, name="out", regularizations=[MR.L1(0), MR.L2(0.0001)]) mlp = i > h > o mlp.saveDOT("minist_mlp") #And then map sets to the inputs and outputs of our network train_set, validation_set, test_set = load_mnist() trainMaps = MDM.DatasetMapper()