예제 #1
0
 def __init__(self,
              params,
              initial_accum=0.1,
              learning_rate=0.001,
              lr_power=-0.5,
              l1=0.0,
              l2=0.0,
              use_locking=False,
              loss_scale=1.0,
              weight_decay=0.0):
     super(FTRL, self).__init__(learning_rate,
                                params,
                                loss_scale=loss_scale)
     if self.is_group:
         raise RuntimeError(
             f"The {self.cls_name} optimizer cannot support group setting.")
     _check_param(initial_accum, lr_power, l1, l2, use_locking,
                  weight_decay, self.cls_name)
     self.moments = self.parameters.clone(prefix="moments",
                                          init=initial_accum)
     self.linear = self.parameters.clone(prefix="linear", init='zeros')
     self.l1 = l1
     self.l2 = l2
     self.lr_power = lr_power
     self.weight_decay = weight_decay
     self.decay_tf = tuple((lambda: True)() for x in self.parameters)
     self.hyper_map = C.HyperMap()
     self.opt = P.ApplyFtrl(use_locking=use_locking)
     self.sparse_opt = P.SparseApplyFtrl(learning_rate,
                                         l1,
                                         l2,
                                         lr_power,
                                         use_locking=use_locking)
예제 #2
0
 def __init__(self, params, initial_accum=0.1, learning_rate=0.001, lr_power=-0.5, l1=0.0, l2=0.0,
              use_locking=False, loss_scale=1.0, weight_decay=0.0):
     super(FTRL, self).__init__(learning_rate, params, weight_decay, loss_scale=loss_scale)
     if self.dynamic_lr or self.is_group_lr:
         raise ValueError('Dynamic learning rate or group learning rate is currently not supported.')
     _check_param(initial_accum, lr_power, l1, l2, use_locking, self.cls_name)
     self.moments = self.parameters.clone(prefix="moments", init=initial_accum)
     self.linear = self.parameters.clone(prefix="linear", init='zeros')
     self.l1 = l1
     self.l2 = l2
     self.lr = learning_rate
     self.lr_power = lr_power
     if not self.is_group:
         self.decay_flags = tuple((lambda: True)() for x in self.parameters)
     self.hyper_map = C.HyperMap()
     self.opt = P.ApplyFtrl(use_locking=use_locking)
     self.use_locking = use_locking
     self.sparse_opt = P.SparseApplyFtrl(learning_rate, l1, l2, lr_power, use_locking=use_locking)
     self._ps_pull = P.Pull()
     self._ps_push = P.Push("Ftrl", [0, 1, 2])
     self._ps_push.add_prim_attr("init_accum", initial_accum)
     self._ps_push.add_prim_attr("lr", learning_rate)
     self._ps_push.add_prim_attr("l1", l1)
     self._ps_push.add_prim_attr("l2", l2)
     self._ps_push.add_prim_attr("lr_power", lr_power)
예제 #3
0
 def __init__(self, var, accum, linear):
     super(SparseApplyFtrlNet, self).__init__()
     self.sparse_apply_ftrl = P.SparseApplyFtrl(lr=0.01,
                                                l1=0.0,
                                                l2=0.0,
                                                lr_power=-0.5)
     self.var = Parameter(var, name="var")
     self.accum = Parameter(accum, name="accum")
     self.linear = Parameter(linear, name="linear")
 def __init__(self):
     super(Net, self).__init__()
     self.sparse_apply_ftrl = P.SparseApplyFtrl(lr=0.001,
                                                l1=0.0,
                                                l2=0.0,
                                                lr_power=-0.5)
     self.var = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)),
                          name="var")
     self.accum = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)),
                            name="accum")
     self.linear = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)),
                             name="linear")
예제 #5
0
    def target(self, value):
        """If the input value is set to "CPU", the parameters will be updated on the host using the Fused
           optimizer operation."""
        if not isinstance(value, str):
            raise TypeError("The value must be str type, but got value type is {}".format(type(value)))

        if value not in ('CPU', 'Ascend', 'GPU'):
            raise ValueError("The value must be 'CPU', 'Ascend' or 'GPU', but got value {}".format(value))

        if value == 'CPU':
            self.sparse_opt = P.FusedSparseFtrl(self.lr, self.l1, self.l2, self.lr_power, self.use_locking)
            self.sparse_opt.add_prim_attr("primitive_target", "CPU")
        else:
            self.sparse_opt = P.SparseApplyFtrl(self.lr, self.l1, self.l2, self.lr_power, self.use_locking)

        self._target = value