Пример #1
0
 def __init__(self, var, accum):
     super().__init__()
     self.depend = P.Depend()
     self.sparse_apply_proximal_adagrad = P.SparseApplyProximalAdagrad()
     self.var = Parameter(var, name="var")
     self.accum = Parameter(accum, name="accum")
     self.const = Tensor(9999, mstype.float32)
Пример #2
0
 def __init__(self, var, accum, lr, l1, l2):
     super(Net, self).__init__()
     self.sparse_apply_proximal_adagrad = P.SparseApplyProximalAdagrad()
     self.var = Parameter(var, name="var")
     self.accum = Parameter(accum, name="accum")
     self.lr = lr
     self.l1 = l1
     self.l2 = l2
Пример #3
0
 def __init__(self):
     super(Net, self).__init__()
     self.sparse_apply_proximal_adagrad = P.SparseApplyProximalAdagrad()
     self.var = Parameter(Tensor(np.random.rand(7800, 80).astype(np.float32)), name="var")
     self.accum = Parameter(Tensor(np.random.rand(7800, 80).astype(np.float32)), name="accum")
     self.lr = 0.01
     self.l1 = 0.0
     self.l2 = 0.0
Пример #4
0
 def __init__(self, params, accum=0.1, learning_rate=0.001, l1=0.0, l2=0.0,
              use_locking=False, loss_scale=1.0, weight_decay=0.0):
     super(ProximalAdagrad, self).__init__(learning_rate, params, weight_decay, loss_scale)
     _check_param_value(accum, l1, l2, use_locking, self.cls_name)
     self.accum = self.parameters.clone(prefix="accum", init=accum)
     self.l1 = Tensor(l1, mstype.float32)
     self.l2 = Tensor(l2, mstype.float32)
     self.hyper_map = C.HyperMap()
     self.use_locking = use_locking
     self.opt = P.ApplyProximalAdagrad(use_locking=use_locking)
     self.sparse_opt = P.SparseApplyProximalAdagrad(use_locking=use_locking)
Пример #5
0
    def target(self, value):
        """If the input value is set to "CPU", the parameters will be updated on the host using the Fused
           optimizer operation."""
        if not isinstance(value, str):
            raise TypeError("The value must be str type, but got value type is {}".format(type(value)))

        if value not in ('CPU', 'Ascend', 'GPU'):
            raise ValueError("The value must be 'CPU', 'Ascend' or 'GPU', but got value {}".format(value))

        if value == 'CPU':
            self.sparse_opt = P.FusedSparseProximalAdagrad(self.use_locking).add_prim_attr("primitive_target", "CPU")
        else:
            self.sparse_opt = P.SparseApplyProximalAdagrad(self.use_locking)

        self._target = value
Пример #6
0
 def __init__(self,
              params,
              accum=0.1,
              learning_rate=0.001,
              l1=0.0,
              l2=0.0,
              use_locking=False,
              loss_scale=1.0,
              weight_decay=0.0):
     super(ProximalAdagrad, self).__init__(learning_rate, params,
                                           weight_decay, loss_scale)
     if self.is_group:
         raise RuntimeError(
             f"The {self.cls_name} optimizer cannot support group setting.")
     _check_param_value(accum, l1, l2, use_locking, self.cls_name)
     self.accum = self.parameters.clone(prefix="accum", init=accum)
     self.l1 = Tensor(l1, mstype.float32)
     self.l2 = Tensor(l2, mstype.float32)
     self.weight_decay = weight_decay
     self.hyper_map = C.HyperMap()
     self.opt = P.ApplyProximalAdagrad(use_locking=use_locking)
     self.sparse_opt = P.SparseApplyProximalAdagrad(use_locking=use_locking)
Пример #7
0
 def __init__(self, var, accum):
     super(SparseApplyProximalAdagradNet, self).__init__()
     self.sparse_apply_proximal_adagrad = P.SparseApplyProximalAdagrad()
     self.var = Parameter(var, name="var")
     self.accum = Parameter(accum, name="accum")