def ae1(data): '''Using a regression layer. This layer needs an explicit target''' miniBatchSize = 2 ls = MS.GradientDescent(lr=0.1) cost = MC.MeanSquaredError() i = ML.Input(8, name='inp') h = ML.Hidden(3, activation=MA.ReLU(), initializations=[MI.SmallUniformWeights(), MI.ZeroBias()], name="hid") o = ML.Regression( 8, activation=MA.ReLU(), initializations=[MI.SmallUniformWeights(), MI.ZeroBias()], learningScenario=ls, costObject=cost, name="out") ae = i > h > o for e in xrange(1000): for i in xrange(0, len(data), miniBatchSize): ae.train(o, inp=data[i:i + miniBatchSize], targets=data[i:i + miniBatchSize]) return ae, o
def test_ae(self): data = [] for i in xrange(8): zeros = numpy.zeros(8) zeros[i] = 1 data.append(zeros) ls = MS.GradientDescent(lr=0.1) cost = MC.MeanSquaredError() i = ML.Input(8, name='inp') h = ML.Hidden(3, activation=MA.ReLU(), name="hid") o = ML.Regression(8, activation=MA.ReLU(), learningScenario=ls, costObject=cost, name="out") ae = i > h > o miniBatchSize = 2 for e in xrange(2000): for i in xrange(0, len(data), miniBatchSize): ae.train(o, inp=data[i:i + miniBatchSize], targets=data[i:i + miniBatchSize]) res = ae.propagate(o, inp=data)["outputs"] for i in xrange(len(res)): self.assertEqual(numpy.argmax(data[i]), numpy.argmax(res[i]))
def test_multiinputs(self): ls = MS.GradientDescent(lr=0.1) inpA = ML.Embedding(2, 2, 2, name="IA") inpB = ML.Input(2, name="IB") inpNexus = ML.Composite(name="InputNexus") h1 = ML.Hidden(32, activation=MA.ReLU(), decorators=[], regularizations=[], name="Fully-connected1") o = ML.Regression(2, decorators=[], activation=MA.ReLU(), learningScenario=ls, costObject=MC.CrossEntropy(), name="Out", regularizations=[]) inpA > inpNexus inpB > inpNexus m = inpNexus > h1 > o m.init()
def test_ae_reg(self): powerOf2 = 3 nbUnits = 2**powerOf2 data = [] for i in xrange(nbUnits): zeros = numpy.zeros(nbUnits) zeros[i] = 1 data.append(zeros) ls = MS.GradientDescent(lr=0.1) cost = MC.MeanSquaredError() i = ML.Input(nbUnits, name='inp') h = ML.Hidden(powerOf2, activation=MA.ReLU(), initializations=[ MI.Uniform('W', small=True), MI.SingleValue('b', 0) ], name="hid") o = ML.Regression(nbUnits, activation=MA.ReLU(), initializations=[ MI.Uniform('W', small=True), MI.SingleValue('b', 0) ], learningScenari=[ls], cost=cost, name="out") ae = i > h > o ae.init() miniBatchSize = 1 for e in xrange(2000): for i in xrange(0, len(data), miniBatchSize): miniBatch = data[i:i + miniBatchSize] ae["out"].train({ "inp.inputs": miniBatch, "out.targets": miniBatch })["out.drive.train"] res = ae["out"].propagate["test"]({ "inp.inputs": data })["out.propagate.test"] for i in xrange(len(res)): self.assertEqual(numpy.argmax(data[i]), numpy.argmax(res[i]))
def test_multiout_fctmixin(self): i = ML.Input(1, name='inp') o1 = ML.Autoencode(targetLayer=i, activation=MA.Tanh(), learningScenari=[MS.GradientDescent(lr=0.1)], cost=MC.MeanSquaredError(), name="out1") o2 = ML.Regression(1, activation=MA.Tanh(), learningScenari=[MS.GradientDescent(lr=0.2)], cost=MC.MeanSquaredError(), name="out2") i > o1 ae = i > o2 ae.init() preOut1 = ae["out1"].test({"inp.inputs": [[1]]})["out1.drive.test"] preOut2 = ae["out2"].test({ "inp.inputs": [[1]], "out2.targets": [[1]] })["out2.drive.test"] ae["out1"].train({"inp.inputs": [[1]]})["out1.drive.train"] self.assertTrue( preOut1 > ae["out1"].test({"inp.inputs": [[1]]})["out1.drive.test"] ) self.assertTrue(preOut2 == ae["out2"].test({ "inp.inputs": [[1]], "out2.targets": [[1]] })["out2.drive.test"]) preOut1 = ae["out1"].test({"inp.inputs": [[1]]})["out1.drive.test"] preOut2 = ae["out2"].test({ "inp.inputs": [[1]], "out2.targets": [[1]] })["out2.drive.test"] ae["out2"].train({ "inp.inputs": [[1]], "out2.targets": [[1]] })["out2.drive.train"] self.assertTrue(preOut1 == ae["out1"].test({"inp.inputs": [[1]]}) ["out1.drive.test"]) self.assertTrue(preOut2 > ae["out2"].test({ "inp.inputs": [[1]], "out2.targets": [[1]] })["out2.drive.test"]) preOut1 = ae["out1"].test({"inp.inputs": [[1]]})["out1.drive.test"] preOut2 = ae["out2"].test({ "inp.inputs": [[1]], "out2.targets": [[1]] })["out2.drive.test"] fct = ae["out1"].train + ae["out2"].train + ae["inp"].propagate["train"] ret = fct({"inp.inputs": [[1]], "out2.targets": [[1]]}) self.assertEqual(len(ret), 3) self.assertTrue( preOut1 > ae["out1"].test({"inp.inputs": [[1]]})["out1.drive.test"] ) self.assertTrue(preOut2 > ae["out2"].test({ "inp.inputs": [[1]], "out2.targets": [[1]] })["out2.drive.test"])