def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8): super(AdamOptimizer, self).__init__() self.updater = updaters.AdamUpdater(learning_rate, beta1, beta2, epsilon)
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8, use_locking=False, name='Adam'): super(AdamOptimizer, self).__init__(use_locking, name) self.updater = updaters.AdamUpdater(learning_rate, beta1, beta2, epsilon)
def __init__(self, prototxt): super(AdamSolver, self).__init__(prototxt=prototxt) self._updater = updaters.AdamUpdater(**self._update_param) # generates update targets for layer, blobs in self._net.params.items(): self._lr_blobs.extend(blobs) for idx, blob in enumerate(self._lr_blobs): if self._net._lr_mults[idx] > 0: if blob.diff is None: continue self._updater.append((blob.data, blob.diff), self._net._lr_mults[idx], self._net._decay_mults[idx]) self.train = self._net.function self.tests = [test_net.function for test_net in self._test_nets] self.update = function(updater=self._updater)
def __init__( self, learning_rate, decay=0.9, momentum=0.0, epsilon=1e-10, use_locking=False, centered=False, name='RMSProp', ): super(RMSPropOptimizer, self).__init__(use_locking, name) if momentum > 0.: self.updater = _updaters.AdamUpdater( learning_rate, momentum, decay, epsilon) else: self.updater = _updaters.RMSPropUpdater( learning_rate, decay, epsilon) self._set_dynamic_lr(learning_rate)
def __init__(self, prototxt): super(AdamSolver, self).__init__(prototxt=prototxt) self._optimizer = updaters.AdamUpdater(**self._update_param) self.BuildOptimizer()
def __init__(self, proto_txt): super(AdamSolver, self).__init__(proto_txt=proto_txt) self.optimizer = _updaters.AdamUpdater(**self._optimizer_arguments) self.BuildOptimizer()