def _setCustomTheanoFunctions(self): super(Autoencode, self)._setCustomTheanoFunctions() self.train = MWRAP.TheanoFunction("train", self, [("score", self.cost)], {}, updates=self.updates, allow_input_downcast=True) self.test = MWRAP.TheanoFunction("test", self, [("score", self.testCost)], {}, allow_input_downcast=True)
def setCustomTheanoFunctions(self): Output_ABC.setCustomTheanoFunctions(self) self.train = MWRAP.TheanoFunction("train", self, [("score", self.cost)], {}, updates=self.updates, allow_input_downcast=True) self.test = MWRAP.TheanoFunction("test", self, [("score", self.testCost)], {}, allow_input_downcast=True)
def setCustomTheanoFunctions(self): self.train = MWRAP.TheanoFunction("train", self, [self.cost], {}, updates=self.updates, allow_input_downcast=True) self.test = MWRAP.TheanoFunction("test", self, [self.test_cost], {}, updates=self.updates_lastOutputs, allow_input_downcast=True)
def _setTheanoFunctions(self): """Creates propagate/propagateTest theano function that returns the layer's outputs. propagateTest returns the testOutputs, some decorators might not be applied. This is called after decorating""" self.propagate = MWRAP.TheanoFunction("propagate", self, [("outputs", self.outputs)], allow_input_downcast=True) self.propagateTest = MWRAP.TheanoFunction( "propagateTest", self, [("testOutputs", self.testOutputs)], allow_input_downcast=True)
def _setTheanoFunctions(self): """Creates theano_train/theano_test/theano_propagate functions and calls setCustomTheanoFunctions to create user custom theano functions.""" self._backTrckDependencies() self._userInit() self.cost = self.costObject.costFct(self.targets, self.outputs) self.test_cost = self.costObject.costFct(self.targets, self.test_outputs) for l in self.dependencies.itervalues(): if l.__class__ is not Composite: try: for reg in l.regularizations: self.cost += reg except AttributeError: pass self.updates = self.learningScenario.getUpdates(self, self.cost) for l in self.dependencies.itervalues(): try: self.updates.extend(l.learningScenario.getUpdates( l, self.cost)) except AttributeError: self.updates.extend( self.learningScenario.getUpdates(l, self.cost)) self.updates_lastOutputs = [] for l in self.network.layers.itervalues(): if (l.last_outputs is not None) and (l.outputs is not None): self.updates.append((l.last_outputs, l.outputs)) self.updates_lastOutputs.append( (l.last_outputs, l.test_outputs)) self.train = MWRAP.TheanoFunction("train", self, [self.cost], {"targets": self.targets}, updates=self.updates, allow_input_downcast=True) self.test = MWRAP.TheanoFunction("test", self, [self.test_cost], {"targets": self.targets}, updates=self.updates_lastOutputs, allow_input_downcast=True) self.propagate = MWRAP.TheanoFunction("propagate", self, [self.test_outputs], updates=self.updates_lastOutputs, allow_input_downcast=True) self.setCustomTheanoFunctions()
def setCustomTheanoFunctions(self) : """Adds train, test, model functions:: * train: update parameters and return cost * test: do not update parameters and return cost without adding regularizations """ self._backTrckDependencies() self.cost = self.costObject.apply(self, self.targets, self.outputs, "training") self.testCost = self.costObject.apply(self, self.targets, self.outputs, "testing") for l in self.dependencies.itervalues() : if l.__class__ is not Composite : try : for reg in l.regularizations : self.cost += reg except AttributeError : pass self.updates = self.learningScenario.apply(self, self.cost) for l in self.dependencies.itervalues() : if l.learningScenario is not None : updates = l.learningScenario.apply(l, self.cost) else : updates = self.learningScenario.apply(l, self.cost) self.updates.extend(updates) self.train = MWRAP.TheanoFunction("train", self, [("score", self.cost)], { "targets" : self.targets }, updates = self.updates, allow_input_downcast=True) self.test = MWRAP.TheanoFunction("test", self, [("score", self.testCost)], { "targets" : self.targets }, allow_input_downcast=True) layers = [self] layers.extend(self.dependencies.values()) for l in layers : try : gradOuts = [] upsOuts = [] for k, v in l.getParameterDict().iteritems() : if l.learningScenario is not None : gradOuts.append( (k, l.learningScenario.gradients[v]) ) upsOuts.append( (k, l.learningScenario.updates[v]) ) else : gradOuts.append( (k, self.learningScenario.gradients[v]) ) upsOuts.append( (k, self.learningScenario.updates[v]) ) setattr(self, "getGradients_%s" % l.name, MWRAP.TheanoFunction("getGradients", self, gradOuts, { "targets" : self.targets }, allow_input_downcast=True, on_unused_input='ignore') ) setattr(self, "getUpdates_%s" % l.name, MWRAP.TheanoFunction("getUpdates", self, gradOuts, { "targets" : self.targets }, allow_input_downcast=True, on_unused_input='ignore') ) except : if MSET.VERBOSE : print("Warning! Unable to setup theano function for retreiving updates and gradients for layer '%s'. Perhaps the current learning scenario is not keeping them stored." % l.name)
def _setCustomTheanoFunctions(self): """Adds train, test, model functions:: * train: update parameters and return cost * test: do not update parameters and return cost without adding regularizations """ if self.cost is None or self.testCost is None: self._setCosts() # theano.printing.debugprint(self.cost) self.train = MWRAP.TheanoFunction("train", self, [("score", self.cost)], {"targets": self.targets}, updates=self.updates, allow_input_downcast=True) self.test = MWRAP.TheanoFunction("test", self, [("score", self.testCost)], {"targets": self.targets}, allow_input_downcast=True)
def setCustomTheanoFunctions(self): """Adds train, test, model functions:: * train: update parameters and return cost * test: do not update parameters and return cost without adding regularizations """ self._backTrckDependencies() self.cost = self.costObject.apply(self, self.targets, self.outputs, "training") self.testCost = self.costObject.apply(self, self.targets, self.outputs, "testing") for l in self.dependencies.itervalues(): if l.__class__ is not Composite: try: for reg in l.regularizations: self.cost += reg except AttributeError: pass self.updates = self.learningScenario.apply(self, self.cost) for l in self.dependencies.itervalues(): if l.learningScenario is not None: updates = l.learningScenario.apply(l, self.cost) else: updates = self.learningScenario.apply(l, self.cost) self.updates.extend(updates) self.train = MWRAP.TheanoFunction("train", self, [("score", self.cost)], {"targets": self.targets}, updates=self.updates, allow_input_downcast=True) self.test = MWRAP.TheanoFunction("test", self, [("score", self.testCost)], {"targets": self.targets}, allow_input_downcast=True)
def _setGetGradientsUpdatesFunctions(self): """Defines functions for retreving gradients/updates""" layers = [self] layers.extend(self.dependencies.values()) for l in layers: try: gradOuts = [] upsOuts = [] for k, v in l.getParameterDict().iteritems(): if l.learningScenario is not None: gradOuts.append((k, l.learningScenario.gradients[v])) upsOuts.append((k, l.learningScenario.updates[v])) else: gradOuts.append( (k, self.learningScenario.gradients[v])) upsOuts.append((k, self.learningScenario.updates[v])) setattr( self, "getGradients_%s" % l.name, MWRAP.TheanoFunction("getGradients", self, gradOuts, {"targets": self.targets}, allow_input_downcast=True, on_unused_input='ignore')) setattr( self, "getUpdates_%s" % l.name, MWRAP.TheanoFunction("getUpdates", self, gradOuts, {"targets": self.targets}, allow_input_downcast=True, on_unused_input='ignore')) except: msg = "Warning! Unable to setup theano function for retreiving updates and gradients for layer '%s'. Perhaps the current learning scenario is not keeping them stored." % l.name self.network.logLayerEvent(self, msg, {}) if MSET.VERBOSE: print(msg)
def setCustomTheanoFunctions(self): """defines:: * classify: return the argmax of the outputs applying all the decorators. * predict: return the argmax of the test outputs (some decorators may not be applied). * classificationAccuracy: returns the accuracy (between [0, 1]) of the model, computed on outputs. * predictionAccuracy: returns the accuracy (between [0, 1]) of the model, computed on test outputs. """ Output_ABC.setCustomTheanoFunctions(self) clas = tt.argmax(self.outputs, axis=1) pred = tt.argmax(self.outputs, axis=1) self.classify = MWRAP.TheanoFunction("classify", self, [("class", clas)], allow_input_downcast=True) self.predict = MWRAP.TheanoFunction("predict", self, [("class", pred)], allow_input_downcast=True) clasAcc = tt.mean(tt.eq(self.targets, clas)) predAcc = tt.mean(tt.eq(self.targets, pred)) self.classificationAccuracy = MWRAP.TheanoFunction( "accuracy", self, [("accuracy", clasAcc)], {"targets": self.targets}, allow_input_downcast=True) self.predictionAccuracy = MWRAP.TheanoFunction( "accuracy", self, [("accuracy", predAcc)], {"targets": self.targets}, allow_input_downcast=True) self.trainAndAccuracy = MWRAP.TheanoFunction("accuracy", self, [("score", self.cost), ("accuracy", clasAcc)], {"targets": self.targets}, updates=self.updates, allow_input_downcast=True) self.testAndAccuracy = MWRAP.TheanoFunction("accuracy", self, [("score", self.testCost), ("accuracy", predAcc)], {"targets": self.targets}, allow_input_downcast=True)
def setCustomTheanoFunctions(self): """defined theano_classify, that returns the argmax of the output""" self.classify = MWRAP.TheanoFunction("classify", self, [tt.argmax(self.test_outputs)], updates=self.updates_lastOutputs)