def learning_updates(self): """ Return updates in the training. """ params = self.network.parameters gradients = T.grad(self.cost, params) return optimize_updates(params, gradients, self.config)
def optimization_updates(self, params, gradients): """ Return updates from optimization. """ updates, free_parameters = optimize_updates(params, gradients, self.config) self.network.free_parameters.extend(free_parameters) logging.info("Added %d free parameters for optimization" % len(free_parameters)) return updates
def learning_updates(self): """ Return updates in the training. """ params = self.network.parameters gradients = T.grad(self.cost, params) updates, free_parameters = optimize_updates(params, gradients, self.config) self.network.free_parameters.extend(free_parameters) logging.info("Added %d free parameters for optimization" % len(free_parameters)) return updates
def learning_updates(self): """ Return updates in the training. """ params = self.network.parameters # Freeze parameters if self.config.freeze_params: logging.info("freeze parameters: %s" % ", ".join(map(str, self.config.freeze_params))) params = [p for p in params if p not in self.config.freeze_params] gradients = T.grad(self.cost, params) updates, free_parameters = optimize_updates(params, gradients, self.config) self.network.free_parameters.extend(free_parameters) logging.info("Added %d free parameters for optimization" % len(free_parameters)) return updates