Exemplo n.º 1
0
def test_grad_refactor_13():
    class Net(nn.Cell):
        """ Net definition """
        def __init__(self):
            super(Net, self).__init__()
            self.z = Parameter(Tensor(np.ones([2]).astype(np.float32)),
                               name='z')

        def construct(self, x, y):
            return x * self.z * y

    net = Net()
    weights = ParameterTuple(net.trainable_params())
    C.grad_by_list(net, weights)(Tensor(np.ones([2]).astype(np.float32)),
                                 Tensor(np.zeros([2]).astype(np.float32)))
Exemplo n.º 2
0
def TrainWrap(net, loss_fn=None, optimizer=None, weights=None):
    """
    TrainWrap
    """
    if loss_fn is None:
        loss_fn = nn.SoftmaxCrossEntropyWithLogits(reduction='mean')
    loss_net = nn.WithLossCell(net, loss_fn)
    loss_net.set_train()
    if weights is None:
        weights = ParameterTuple(net.trainable_params())
    if optimizer is None:
        optimizer = nn.Adam(weights, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8,
                            use_locking=False, use_nesterov=False, weight_decay=0.0, loss_scale=1.0)
    train_net = nn.TrainOneStepCell(loss_net, optimizer)
    return train_net
Exemplo n.º 3
0
def test_load_grad():
    class LoadNet(nn.Cell):
        def __init__(self):
            super().__init__()
            self.z = Parameter(Tensor(np.array([1.0], np.float32)), name='z')

        def construct(self, x, y):
            x = x * y * self.z
            return x

    x = Tensor(np.array([2.0], np.float32))
    y = Tensor(np.array([3.0], np.float32))
    load_net = LoadNet()
    grad_net = grad_all_list(load_net,
                             ParameterTuple(load_net.trainable_params()))
    print(grad_net(x, y))
Exemplo n.º 4
0
def test_switch_layer_with_single_prim():
    class SwitchLayerCell(nn.Cell):
        def __init__(self):
            super(SwitchLayerCell, self).__init__()
            self.layers = (nn.ReLU(), nn.ReLU())
            self.z3 = Parameter(
                Tensor(np.full([128, 96], 0.6, dtype=np.float32)), name='z3')

        def construct(self, index, x):
            ret = self.layers[index](x) * self.z3
            return ret

    index = Tensor(0, dtype=mstype.int32)
    net = SwitchLayerCell()
    net(index, Tensor(np.full([128, 96], 0.6, dtype=np.float32)))
    C.grad_by_list(net, ParameterTuple(net.trainable_params()))(index,
                                                                Tensor(np.full([128, 96], 0.6, dtype=np.float32)))
    C.grad_all(net)(index, Tensor(np.full([128, 96], 0.6, dtype=np.float32)))
Exemplo n.º 5
0
    def __init__(self, batch_size, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout):
        super(LstmNet, self).__init__()

        num_directions = 1
        if bidirectional:
            num_directions = 2

        self.lstm = StackLSTM(input_size, hidden_size, num_layers, has_bias, bidirectional, dropout)
        input_np = np.array([[[0.6755, -1.6607, 0.1367], [0.4276, -0.7850, -0.3758]],
                             [[-0.6424, -0.6095, 0.6639], [0.7918, 0.4147, -0.5089]],
                             [[-1.5612, 0.0120, -0.7289], [-0.6656, -0.6626, -0.5883]],
                             [[-0.9667, -0.6296, -0.7310], [0.1026, -0.6821, -0.4387]],
                             [[-0.4710, 0.6558, -0.3144], [-0.8449, -0.2184, -0.1806]]
                             ]).astype(np.float32)
        self.x = Tensor(input_np)

        self.h = Tensor(np.array([0., 0., 0., 0.]).reshape((num_directions, batch_size, hidden_size)).astype(
            np.float32))

        self.c = Tensor(np.array([0., 0., 0., 0.]).reshape((num_directions, batch_size, hidden_size)).astype(
            np.float32))
        self.h = tuple((self.h,))
        self.c = tuple((self.c,))
        wih = np.array([[3.4021e-01, -4.6622e-01, 4.5117e-01],
                        [-6.4257e-02, -2.4807e-01, 1.3550e-02],  # i
                        [-3.2140e-01, 5.5578e-01, 6.3589e-01],
                        [1.6547e-01, -7.9030e-02, -2.0045e-01],
                        [-6.9863e-01, 5.9773e-01, -3.9062e-01],
                        [-3.0253e-01, -1.9464e-01, 7.0591e-01],
                        [-4.0835e-01, 3.6751e-01, 4.7989e-01],
                        [-5.6894e-01, -5.0359e-01, 4.7491e-01]]).astype(np.float32).reshape([1, -1])
        whh = np.array([[-0.4820, -0.2350],
                        [-0.1195, 0.0519],
                        [0.2162, -0.1178],
                        [0.6237, 0.0711],
                        [0.4511, -0.3961],
                        [-0.5962, 0.0906],
                        [0.1867, -0.1225],
                        [0.1831, 0.0850]]).astype(np.float32).reshape([1, -1])
        bih = np.zeros((1, 8)).astype(np.float32)
        w_np = np.concatenate((wih, whh, bih), axis=1).reshape([-1, 1, 1])
        self.w = Parameter(initializer(Tensor(w_np), w_np.shape), name='w')
        self.lstm.weight = ParameterTuple((self.w,))
Exemplo n.º 6
0
    def __init__(self, network, optimizer, scale_update_cell=None):

        super(BertFinetuneCell, self).__init__(auto_prefix=False)
        self.network = network
        self.weights = ParameterTuple(network.trainable_params())
        self.optimizer = optimizer
        self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True)
        self.reducer_flag = False
        self.allreduce = P.AllReduce()
        self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
        if self.parallel_mode in [
                ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL
        ]:
            self.reducer_flag = True
        self.grad_reducer = None
        if self.reducer_flag:
            mean = context.get_auto_parallel_context("mirror_mean")
            degree = get_group_size()
            self.grad_reducer = DistributedGradReducer(optimizer.parameters,
                                                       mean, degree)
        self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
        self.cast = P.Cast()
        self.gpu_target = False
        if context.get_context("device_target") == "GPU":
            self.gpu_target = True
            self.float_status = P.FloatStatus()
            self.addn = P.AddN()
            self.reshape = P.Reshape()
        else:
            self.alloc_status = P.NPUAllocFloatStatus()
            self.get_status = P.NPUGetFloatStatus()
            self.clear_before_grad = P.NPUClearFloatStatus()
        self.reduce_sum = P.ReduceSum(keep_dims=False)
        self.depend_parameter_use = P.ControlDepend(depend_mode=1)
        self.base = Tensor(1, mstype.float32)
        self.less_equal = P.LessEqual()
        self.hyper_map = C.HyperMap()
        self.loss_scale = None
        self.loss_scaling_manager = scale_update_cell
        if scale_update_cell:
            self.loss_scale = Parameter(Tensor(
                scale_update_cell.get_loss_scale(), dtype=mstype.float32),
                                        name="loss_scale")
    def __init__(self, network, optimizer, sens=1.0):
        super(BertTrainOneStepCell, self).__init__(auto_prefix=False)
        self.network = network
        self.weights = ParameterTuple(network.trainable_params())
        self.optimizer = optimizer
        self.grad = C.GradOperation(get_by_list=True, sens_param=True)
        self.sens = sens
        self.reducer_flag = False
        self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
        if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
            self.reducer_flag = True
        self.grad_reducer = None
        if self.reducer_flag:
            mean = context.get_auto_parallel_context("gradients_mean")
            degree = get_group_size()
            self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, degree)

        self.cast = P.Cast()
        self.hyper_map = C.HyperMap()
    def __init__(self, network, optimizer, scale_update_cell=None):
        super(TransformerTrainOneStepWithLossScaleCell,
              self).__init__(auto_prefix=False)
        self.network = network
        self.network.set_grad()
        self.network.add_flags(defer_inline=True)
        self.weights = ParameterTuple(network.trainable_params())
        self.optimizer = optimizer
        self.grad = C.GradOperation(get_by_list=True, sens_param=True)
        self.reducer_flag = False
        self.allreduce = P.AllReduce()

        self.parallel_mode = _get_parallel_mode()
        if self.parallel_mode not in ParallelMode.MODE_LIST:
            raise ValueError("Parallel mode does not support: ", parallel_mode)
        if self.parallel_mode in [
                ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL
        ]:
            self.reducer_flag = True
        self.grad_reducer = None
        if self.reducer_flag:
            mean = _get_gradients_mean()
            degree = _get_device_num()
            self.grad_reducer = DistributedGradReducer(optimizer.parameters,
                                                       mean, degree)
        self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
        self.clip_gradients = ClipGradients()
        self.cast = P.Cast()
        self.alloc_status = P.NPUAllocFloatStatus()
        self.get_status = P.NPUGetFloatStatus()
        self.clear_before_grad = P.NPUClearFloatStatus()
        self.reduce_sum = P.ReduceSum(keep_dims=False)
        self.depend_parameter_use = P.ControlDepend(depend_mode=1)
        self.base = Tensor(1, mstype.float32)
        self.less_equal = P.LessEqual()
        self.hyper_map = C.HyperMap()

        self.loss_scale = None
        self.loss_scaling_manager = scale_update_cell
        if scale_update_cell:
            self.loss_scale = Parameter(Tensor(
                scale_update_cell.get_loss_scale(), dtype=mstype.float32),
                                        name="loss_scale")
Exemplo n.º 9
0
    def __init__(self, learning_rate, parameters):
        super(Optimizer, self).__init__()
        if isinstance(learning_rate, float):
            validator.check_number_range("learning rate", learning_rate, 0.0, float("inf"), Rel.INC_LEFT)
        elif isinstance(learning_rate, Iterable):
            learning_rate = Tensor(np.array(list(learning_rate)).astype(np.float32))
        elif isinstance(learning_rate, Tensor):
            if learning_rate.dim() > 1:
                raise ValueError("Learning rate should be a 0 or 1 dim `Tensor`,"
                                 f"but got {learning_rate.dim()}.")
        else:
            raise TypeError("Learning rate should be float, Tensor or Iterable.")

        if isinstance(learning_rate, Tensor) and learning_rate.dim() == 1 and learning_rate.size() < 2:
            logger.warning("If want to use the dynamic learning rate, please make sure that "
                           "the number of elements in the list, tuple or tensor passed is greater than 1.")
        self.learning_rate = Parameter(learning_rate, name="learning_rate")
        self.parameters = ParameterTuple(parameters)
        if not self.parameters:
            raise ValueError("optimizer got an empty parameter list.")
Exemplo n.º 10
0
    def __init__(self, network, optimizer, sens=1.0):
        super(TransformerTrainOneStepCell, self).__init__(auto_prefix=False)
        self.network = network
        self.weights = ParameterTuple(network.trainable_params())
        self.optimizer = optimizer
        self.grad = C.GradOperation(get_by_list=True, sens_param=True)
        self.sens = sens
        self.reducer_flag = False
        self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
        if self.parallel_mode not in ParallelMode.MODE_LIST:
            raise ValueError("Parallel mode does not support: ", parallel_mode)
        if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
            self.reducer_flag = True
        self.grad_reducer = None
        if self.reducer_flag:
            mean = context.get_auto_parallel_context("mirror_mean")
            degree = get_group_size()
            self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, degree)

        self.clip_gradients = ClipGradients()
        self.cast = P.Cast()
Exemplo n.º 11
0
def test_index_to_switch_layer():
    class Layer1(nn.Cell):
        def __init__(self):
            super(Layer1, self).__init__()
            self.z1 = Parameter(Tensor(
                np.full([128, 96], 0.6, dtype=np.float32)),
                                name='z1')

        def construct(self, x):
            return x * self.z1

    class Layer2(nn.Cell):
        def __init__(self):
            super(Layer2, self).__init__()
            self.z2 = Parameter(Tensor(
                np.full([128, 96], 0.6, dtype=np.float32)),
                                name='z2')

        def construct(self, x):
            return x * self.z2

    class SwitchLayerCell(nn.Cell):
        def __init__(self):
            super(SwitchLayerCell, self).__init__()
            self.layers = (Layer1(), Layer2())
            self.z3 = Parameter(Tensor(
                np.full([128, 96], 0.6, dtype=np.float32)),
                                name='z3')

        def construct(self, index, x):
            ret = self.layers[index](x) * self.z3
            return ret

    index = Tensor(0, dtype=mstype.int32)
    net = SwitchLayerCell()
    net(index, Tensor(np.full([128, 96], 0.6, dtype=np.float32)))
    grad_by_list(net, ParameterTuple(net.trainable_params()))(
        index, Tensor(np.full([128, 96], 0.6, dtype=np.float32)))
    grad_all(net)(index, Tensor(np.full([128, 96], 0.6, dtype=np.float32)))
Exemplo n.º 12
0
    def __init__(self, network, optimizer, scale_update_cell=None):
        super(TrainOneStepWithLossScaleCell, self).__init__(auto_prefix=False)
        self.network = network
        self.network.add_flags(defer_inline=True)
        self.weights = ParameterTuple(network.trainable_params())
        self.optimizer = optimizer
        self.grad = C.GradOperation(get_by_list=True, sens_param=True)
        self.hyper_map = C.HyperMap()
        self.alloc_status = NPUAllocFloatStatus()
        self.get_status = NPUGetFloatStatus()
        self.clear_status = NPUClearFloatStatus()
        self.reduce_sum = ReduceSum(keep_dims=False)
        self.base = Tensor(1, mstype.float32)
        self.reducer_flag = False
        self.less_equal = LessEqual()
        self.depend_parameter_use = ControlDepend(depend_mode=1)
        self.allreduce = P.AllReduce()
        self.parallel_mode = _get_parallel_mode()
        self.grad_reducer = None
        parallel_mode = _get_parallel_mode()
        if parallel_mode in (ParallelMode.DATA_PARALLEL,
                             ParallelMode.HYBRID_PARALLEL):
            self.reducer_flag = True
        if self.reducer_flag:
            mean = _get_gradients_mean()
            degree = _get_device_num()
            self.grad_reducer = DistributedGradReducer(optimizer.parameters,
                                                       mean, degree)
        self.is_distributed = self.parallel_mode != ParallelMode.STAND_ALONE

        self.loss_scale = None
        self.loss_scaling_manager = scale_update_cell
        if scale_update_cell:
            self.loss_scale = Parameter(Tensor(
                scale_update_cell.get_loss_scale(), dtype=mstype.float32),
                                        name="loss_scale")
        self.add_flags(has_effect=True)
Exemplo n.º 13
0
 def __init__(self, net):
     super(GradNet, self).__init__()
     self.net = net
     self.weights = ParameterTuple(net.trainable_params())
Exemplo n.º 14
0
 def __init__(self, net):
     super(GradNet, self).__init__()
     self.weights = ParameterTuple(net.trainable_params())
     self.net = net
     self.sens = Parameter(Tensor(np.ones([3, 4, 5]), dtype=mstype.float32), name='sens', requires_grad=False)
     self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True)
Exemplo n.º 15
0
 def __init__(self, net):
     super(GradNet, self).__init__()
     self.weights = ParameterTuple(net.trainable_params())
     self.net = net
     grad_op = C.GradOperation(get_all=True, get_by_list=False, sens_param=True)
     self.grad = Bprop(self.net, False, self.weights, grad_op)
Exemplo n.º 16
0
    def __init__(self,
                 params,
                 learning_rate,
                 momentum,
                 matrix_A,
                 matrix_G,
                 A_inv_max,
                 G_inv_max,
                 weight_decay=0.0,
                 loss_scale=1.0,
                 decay_filter=lambda x: x.name not in []):
        super(THOR, self).__init__(learning_rate, params, weight_decay,
                                   loss_scale)
        if isinstance(momentum, float) and momentum < 0.0:
            raise ValueError(
                "momentum should be at least 0.0, but got momentum {}".format(
                    momentum))
        self.momentum = Parameter(Tensor(momentum, mstype.float32),
                                  name="momentum")
        self.params = self.parameters
        self.moments = self.params.clone(prefix="moments", init='zeros')
        self.hyper_map = C.HyperMap()
        self.opt = P.ApplyMomentum()
        self.matrix_A = ParameterTuple(matrix_A)
        self.matrix_G = ParameterTuple(matrix_G)
        self.A_inv_max = ParameterTuple(A_inv_max)
        self.G_inv_max = ParameterTuple(G_inv_max)
        self.cube_matmul_left = P.CusMatMulCubeFraczLeftCast()
        self.cube_matmul_left_fc = P.CusMatMulCubeDenseLeft()
        self.cube_matmul_right_fc = P.CusMatMulCubeDenseRight()
        self.cube_matmul_right_mul = P.CusMatMulCubeFraczRightMul()
        self.transpose = P.Transpose()
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.mul = P.Mul()
        self.weight_idx = []
        for i in range(len(self.params)):
            if "conv" in self.params[i].name or "end_point" in self.params[
                    i].name:
                self.weight_idx.append(i)
        self.weight_idx.append(len(self.params))
        self.feature_map = [
            1.0 / 12544, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136,
            1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136,
            1.0 / 3136, 1.0 / 3136, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784,
            1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784,
            1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 196, 1.0 / 196, 1.0 / 196,
            1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196,
            1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196,
            1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 49, 1.0 / 49,
            1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49,
            1.0 / 49, 1.0
        ]
        mean = _get_mirror_mean()
        degree = _get_device_num()
        self.grad_reducer_Amax = DistributedGradReducerThor(
            self.parameters, 2, mean, degree)
        self.grad_reducer_Gmax = DistributedGradReducerThor(
            self.parameters, 5, mean, degree)
        self.grad_reducer_A = DistributedGradReducerThor(
            self.parameters, 3, mean, degree)
        self.grad_reducer_G = DistributedGradReducerThor(
            self.parameters, 4, mean, degree)
        self.matrix_A_inv = ()
        self.matrix_G_inv = ()
        self.matrix_max_inv = ()

        for i in range(54):
            self.matrix_max_inv = self.matrix_max_inv + (Parameter(
                initializer(1, [1], mstype.float32),
                name="matrix_max" + str(i),
                requires_grad=False), )
        self.log = P.Log()
        self.exp = P.Exp()
        self.sqrt = P.Sqrt()
        self.matrix_max_inv = ParameterTuple(self.matrix_max_inv)
        self.assign = P.Assign()
        self.cast = P.Cast()
        self.thor = True
        self.weight_decay = weight_decay * loss_scale
        self.decay_flags = tuple(decay_filter(x) for x in self.parameters)
Exemplo n.º 17
0
    def __init__(self,
                 learning_rate,
                 parameters,
                 weight_decay=0.0,
                 loss_scale=1.0):
        super(Optimizer, self).__init__(auto_prefix=False)
        if parameters is not None and not isinstance(parameters, list):
            parameters = list(parameters)

        if not parameters:
            raise ValueError("Optimizer got an empty parameter list.")

        if not isinstance(parameters[0], (dict, Parameter)):
            raise TypeError(
                "Only a list of Parameter or dict can be supported.")

        if isinstance(loss_scale, int):
            loss_scale = float(loss_scale)
        validator.check_value_type("loss_scale", loss_scale, [float],
                                   self.cls_name)
        validator.check_positive_float(loss_scale, "loss_scale", self.cls_name)
        self.loss_scale = loss_scale

        weight_decay = self._preprocess_weight_decay(weight_decay)
        self.grad_centralization = False

        self._unique = True
        self._target = context.get_context("device_target")
        self.dynamic_lr = False
        self.assignadd = None
        self.global_step = None
        self.is_group = False
        self.is_group_lr = False
        self.is_group_params_ordered = False
        learning_rate = self._preprocess_single_lr(learning_rate)
        if isinstance(parameters[0], dict):
            self.is_group = True
            self.group_params = []
            self.group_lr = []
            self.group_weight_decay = []
            self.group_grad_centralization = []
            self._init_group_params(parameters, learning_rate, weight_decay,
                                    self.grad_centralization)

        # The final value of dynamic_lr can be determined after the process of parse_single_lr and init_group_params
        if self.dynamic_lr:
            self.assignadd = P.AssignAdd()
            self.global_step = Parameter(initializer(0, [1], mindspore.int32),
                                         name='global_step')

        if self.is_group_lr:
            self.learning_rate = CellList(
                self.group_lr) if self.dynamic_lr else ParameterTuple(
                    self.group_lr)
        else:
            self.learning_rate = self._build_single_lr(learning_rate,
                                                       'learning_rate')

        if self.is_group:
            self.parameters = ParameterTuple(self.group_params)
            self.weight_decay = tuple(self.group_weight_decay)
            self.weight_decay_tensor_tuple = tuple(
                Tensor(x, mstype.float32) for x in self.group_weight_decay)
            decay_filter = lambda x: x > 0
            self.decay_flags = tuple(
                decay_filter(x) for x in self.weight_decay)
            self.exec_weight_decay = any(self.decay_flags)
            self.grad_centralization_flags = tuple(
                self.group_grad_centralization)
        else:
            self.parameters = ParameterTuple(parameters)
            self.weight_decay = weight_decay * loss_scale
            self.weight_decay_tensor = Tensor(self.weight_decay,
                                              mstype.float32)
            decay_filter = lambda x: 'beta' not in x.name and 'gamma' not in x.name
            self.decay_flags = tuple(decay_filter(x) for x in self.parameters)
            self.exec_weight_decay = self.weight_decay > 0
        # when a parameter has been unique, there is no need do another unique in optimizer.
        for param in self.parameters:
            if param.unique:
                self._unique = False
                break
        ps_filter = lambda x: x.is_param_ps
        self.ps_parameters = tuple(ps_filter(x) for x in self.parameters)
        cache_filter = lambda x: x.cache_enable
        self.cache_enable = tuple(cache_filter(x) for x in self.parameters)
        self.reciprocal_scale = Tensor(1.0 / loss_scale, mstype.float32)
        self.need_scale = loss_scale != 1.0
        self.global_step_increase_tensor = Tensor(1, mstype.int32)
        self.param_length = len(self.parameters)
        self.map_ = C.Map()
        self._use_parallel_optimizer()
Exemplo n.º 18
0
 def __init__(self, net):
     super(GradNet, self).__init__()
     self.net = net
     self.parameter_tuple = ParameterTuple(self.trainable_params())
Exemplo n.º 19
0
 def __init__(self, net):
     super(GradNetWrap, self).__init__()
     self.net = net
     self.weights = ParameterTuple(net.get_parameters())
Exemplo n.º 20
0
 def __init__(self,
              input_size,
              hidden_size,
              num_layers=1,
              has_bias=True,
              batch_first=False,
              dropout=0,
              bidirectional=False):
     super(LSTM, self).__init__()
     self.input_size = input_size
     self.hidden_size = hidden_size
     self.num_layers = num_layers
     self.has_bias = has_bias
     self.batch_first = validator.check_value_type("batch_first",
                                                   batch_first, [bool],
                                                   self.cls_name)
     self.hidden_size = validator.check_integer("hidden_size", hidden_size,
                                                0, Rel.GT, self.cls_name)
     self.num_layers = validator.check_integer("num_layers", num_layers, 0,
                                               Rel.GT, self.cls_name)
     self.dropout = float(dropout)
     self.bidirectional = bidirectional
     if self.batch_first:
         self.transpose1 = P.Transpose()
         self.transpose2 = P.Transpose()
     num_directions = 2 if self.bidirectional else 1
     self.cpu_target = False
     enable_debug = context.get_context("enable_debug_runtime")
     if context.get_context("device_target") == "CPU" and not enable_debug:
         self.cpu_target = True
     if not self.cpu_target:
         self.lstm = P.LSTM(input_size=self.input_size,
                            hidden_size=self.hidden_size,
                            num_layers=self.num_layers,
                            has_bias=self.has_bias,
                            bidirectional=self.bidirectional,
                            dropout=self.dropout)
         weight_size = 0
         gate_size = 4 * self.hidden_size
         for layer in range(self.num_layers):
             input_layer_size = self.input_size if layer == 0 else self.hidden_size * num_directions
             increment_size = gate_size * input_layer_size
             increment_size += gate_size * self.hidden_size
             if self.has_bias:
                 increment_size += 2 * gate_size
             weight_size += increment_size * num_directions
         stdv = 1 / math.sqrt(hidden_size)
         w_np = np.random.uniform(-stdv, stdv,
                                  (weight_size, 1, 1)).astype(np.float32)
         self.weight = Parameter(initializer(Tensor(w_np),
                                             [weight_size, 1, 1]),
                                 name='weight')
     else:
         input_size_list = []
         input_size_list.append(self.input_size)
         for i in range(self.num_layers - 1):
             input_size_list.append(self.hidden_size * num_directions)
         weights = []
         layers = []
         bias_size = 0 if not self.has_bias else num_directions * self.hidden_size * 4
         stdv = 1 / math.sqrt(hidden_size)
         for i in range(num_layers):
             weight_size = (input_size_list[i] + self.hidden_size
                            ) * num_directions * self.hidden_size * 4
             if has_bias:
                 weight_size = weight_size + bias_size
             w_np = np.random.uniform(
                 -stdv, stdv, (weight_size, 1, 1)).astype(np.float32)
             weights.append(
                 Parameter(initializer(Tensor(w_np), w_np.shape),
                           name='weight' + str(i)))
             layers.append(
                 nn.LSTMCell(input_size=input_size_list[i],
                             hidden_size=self.hidden_size,
                             has_bias=self.has_bias,
                             bidirectional=self.bidirectional,
                             dropout=self.dropout))
         self.lstms = layers
         self.weight = ParameterTuple(tuple(weights))
     self.fill = P.Fill()
     self.shape = P.Shape()
Exemplo n.º 21
0
    def __init__(self, learning_rate, parameters, weight_decay=0.0, loss_scale=1.0):
        super(Optimizer, self).__init__(auto_prefix=False)
        if parameters is not None and not isinstance(parameters, list):
            parameters = list(parameters)

        if not parameters:
            raise ValueError("Optimizer got an empty parameter list.")

        if not isinstance(parameters[0], (dict, Parameter)):
            raise TypeError("Only a list of Parameter or dict can be supported.")

        if isinstance(loss_scale, int):
            loss_scale = float(loss_scale)
        validator.check_value_type("loss_scale", loss_scale, [float], self.cls_name)
        validator.check_number_range("loss_scale", loss_scale, 0.0, float("inf"), Rel.INC_NEITHER, self.cls_name)
        self.loss_scale = loss_scale

        weight_decay = self._preprocess_weight_decay(weight_decay)

        self.dynamic_lr = False
        self.assignadd = None
        self.global_step = None
        self.is_group = False
        self.is_group_lr = False
        self.is_group_params_ordered = False
        learning_rate = self._preprocess_single_lr(learning_rate)
        if isinstance(parameters[0], dict):
            self.is_group = True
            self.group_params = []
            self.group_lr = []
            self.group_weight_decay = []
            self._init_group_params(parameters, learning_rate, weight_decay)

        # The final value of dynamic_lr can be determined after the process of parse_single_lr and init_group_params
        if self.dynamic_lr:
            self.assignadd = P.AssignAdd()
            self.global_step = Parameter(initializer(0, [1], mindspore.int32), name='global_step')

        if self.is_group_lr:
            if self.dynamic_lr:
                self.learning_rate = CellList(self.group_lr)
            else:
                self.learning_rate = ParameterTuple(self.group_lr)
        else:
            self.learning_rate = self._build_single_lr(learning_rate, 'learning_rate')
        if self.is_group:
            self.parameters = ParameterTuple(self.group_params)
            self.weight_decay = tuple(self.group_weight_decay)
            decay_filter = lambda x: x > 0
            self.decay_flags = tuple(decay_filter(x) for x in self.weight_decay)
            self.exec_weight_decay = any(self.decay_flags)
        else:
            self.parameters = ParameterTuple(parameters)
            self.weight_decay = weight_decay * loss_scale
            decay_filter = lambda x: 'beta' not in x.name and 'gamma' not in x.name
            self.decay_flags = tuple(decay_filter(x) for x in self.parameters)
            self.exec_weight_decay = self.weight_decay > 0
        ps_filter = lambda x: x.is_param_ps
        self.ps_parameters = tuple(ps_filter(x) for x in self.parameters)
        self.reciprocal_scale = 1.0 / loss_scale
        self.param_length = len(self.parameters)
        self.map_ = C.Map()

        use_parallel = context.get_auto_parallel_context("enable_parallel_optimizer")
        self.use_parallel = use_parallel
        if use_parallel:
            if self.cls_name not in ["Lamb", "AdamWeightDecay"]:
                raise RuntimeError("Optimizer segmentation does not support optimizer {}".format(self.cls_name))
            if _get_parallel_mode() != ParallelMode.DATA_PARALLEL:
                raise RuntimeError("Optimizer segmentation does not support parallel mode {}".format
                                   (_get_parallel_mode()))
            self.dev_num = _get_device_num()
            if self.dev_num > self.param_length:
                raise RuntimeError("Optimizer segmentation can not be applied when the number of parameters {} is"
                                   " less than the number of devices {}".format(self.param_length, self.dev_num))
            self.param_rank = self._get_parameter_group_id()
            self.optim_filter = tuple(map(lambda x: x == _get_global_rank(), self.param_rank))
            self.param_names = []
            for param in self.parameters:
                self.param_names.append(param.name)

        else:
            self.optim_filter = (True,) * self.param_length
Exemplo n.º 22
0
    def __init__(self, seq_len, batch_size, input_size, hidden_size,
                 num_layers, has_bias, bidirectional, dropout):
        super(MultiLayerBiLstmNet, self).__init__()

        num_directions = 1
        if bidirectional:
            num_directions = 2

        self.lstm = nn.LSTM(input_size=input_size,
                            hidden_size=hidden_size,
                            num_layers=num_layers,
                            has_bias=has_bias,
                            bidirectional=bidirectional,
                            dropout=dropout)

        input_np = np.array([[[
            -0.1887, -0.4144, -0.0235, 0.7489, 0.7522, 0.5969, 0.3342, 1.2198,
            0.6786, -0.9404
        ],
                              [
                                  -0.8643, -1.6835, -2.4965, 2.8093, 0.1741,
                                  0.2707, 0.7387, -0.0939, -1.7990, 0.4765
                              ]],
                             [[
                                 -0.5963, -1.2598, -0.7226, 1.1365, -1.7320,
                                 -0.7302, 0.1221, -0.2111, -1.6173, -0.0706
                             ],
                              [
                                  0.8964, 0.1737, -1.0077, -0.1389, 0.4889,
                                  0.4391, 0.7911, 0.3614, -1.9533, -0.9936
                              ]],
                             [[
                                 0.3260, -1.3312, 0.0601, 1.0726, -1.6010,
                                 -1.8733, -1.5775, 1.1579, -0.8801, -0.5742
                             ],
                              [
                                  -2.2998, -0.6344, -0.5409, -0.9221, -0.6500,
                                  0.1206, 1.5215, 0.7517, 1.3691, 2.0021
                              ]],
                             [[
                                 -0.1245, -0.3690, 2.1193, 1.3852, -0.1841,
                                 -0.8899, -0.3646, -0.8575, -0.3131, 0.2026
                             ],
                              [
                                  1.0218, -1.4331, 0.1744, 0.5442, -0.7808,
                                  0.2527, 0.1566, 1.1484, -0.7766, -0.6747
                              ]],
                             [[
                                 -0.6752, 0.9906, -0.4973, 0.3471, -0.1202,
                                 -0.4213, 2.0213, 0.0441, 0.9016, 1.0365
                             ],
                              [
                                  1.2223, -1.3248, 0.1207, -0.8256, 0.1816,
                                  0.7057, -0.3105, 0.5713, 0.2804, -1.0685
                              ]]]).astype(np.float32)

        self.x = Parameter(initializer(Tensor(input_np),
                                       [seq_len, batch_size, input_size]),
                           name='x')

        self.h0 = Parameter(initializer(
            Tensor(
                np.ones((num_directions, batch_size,
                         hidden_size)).astype(np.float32)),
            [num_directions, batch_size, hidden_size]),
                            name='h0')
        self.c0 = Parameter(initializer(
            Tensor(
                np.ones((num_directions, batch_size,
                         hidden_size)).astype(np.float32)),
            [num_directions, batch_size, hidden_size]),
                            name='c0')
        self.h1 = Parameter(initializer(
            Tensor(
                np.ones((num_directions, batch_size,
                         hidden_size)).astype(np.float32)),
            [num_directions, batch_size, hidden_size]),
                            name='h1')
        self.c1 = Parameter(initializer(
            Tensor(
                np.ones((num_directions, batch_size,
                         hidden_size)).astype(np.float32)),
            [num_directions, batch_size, hidden_size]),
                            name='c1')
        self.h = ParameterTuple((self.h0, self.h1))
        self.c = ParameterTuple((self.c0, self.c1))
        return x


BACKBONE = effnet(num_classes=1000)
load_checkpoint("efficient_net_b0.ckpt", BACKBONE)
HEAD = M.nn.Dense(1000, 10)
HEAD.weight.set_data(
    M.Tensor(
        np.random.normal(0, 0.1, HEAD.weight.data.shape).astype("float32")))
HEAD.bias.set_data(M.Tensor(np.zeros(HEAD.bias.data.shape, dtype="float32")))

n = TransferNet(BACKBONE, HEAD)

trainable_weights_list = []
trainable_weights_list.extend(n.head.trainable_params())
trainable_weights = ParameterTuple(trainable_weights_list)

M.context.set_context(mode=M.context.PYNATIVE_MODE,
                      device_target="GPU",
                      save_graphs=False)
BATCH_SIZE = 32
X = M.Tensor(np.ones((BATCH_SIZE, 3, 224, 224)), M.float32)
label = M.Tensor(np.zeros([BATCH_SIZE, 10]).astype(np.float32))

sgd = M.nn.SGD(trainable_weights,
               learning_rate=0.01,
               momentum=0.9,
               dampening=0.01,
               weight_decay=0.0,
               nesterov=False,
               loss_scale=1.0)
Exemplo n.º 24
0
    def __init__(self,
                 learning_rate,
                 parameters,
                 weight_decay=0.0,
                 loss_scale=1.0):
        super(Optimizer, self).__init__(auto_prefix=False)
        if parameters is not None and not isinstance(parameters, list):
            parameters = list(parameters)

        if not parameters:
            raise ValueError("Optimizer got an empty parameter list.")

        if not isinstance(parameters[0], (dict, Parameter)):
            raise TypeError(
                "Only a list of Parameter or dict can be supported.")

        if isinstance(loss_scale, int):
            loss_scale = float(loss_scale)
        validator.check_value_type("loss_scale", loss_scale, [float],
                                   self.cls_name)
        validator.check_positive_float(loss_scale, "loss_scale", self.cls_name)
        self.loss_scale = loss_scale

        weight_decay = self._preprocess_weight_decay(weight_decay)

        self._unique = True
        self._target = context.get_context("device_target")
        self.dynamic_lr = False
        self.assignadd = None
        self.global_step = None
        self.is_group = False
        self.is_group_lr = False
        self.is_group_params_ordered = False
        learning_rate = self._preprocess_single_lr(learning_rate)
        if isinstance(parameters[0], dict):
            self.is_group = True
            self.group_params = []
            self.group_lr = []
            self.group_weight_decay = []
            self._init_group_params(parameters, learning_rate, weight_decay)

        # The final value of dynamic_lr can be determined after the process of parse_single_lr and init_group_params
        if self.dynamic_lr:
            self.assignadd = P.AssignAdd()
            self.global_step = Parameter(initializer(0, [1], mindspore.int32),
                                         name='global_step')

        if self.is_group_lr:
            if self.dynamic_lr:
                self.learning_rate = CellList(self.group_lr)
            else:
                self.learning_rate = ParameterTuple(self.group_lr)
        else:
            self.learning_rate = self._build_single_lr(learning_rate,
                                                       'learning_rate')
        if self.is_group:
            self.parameters = ParameterTuple(self.group_params)
            self.weight_decay = tuple(self.group_weight_decay)
            self.weight_decay_tensor_tuple = tuple(
                Tensor(x, mstype.float32) for x in self.group_weight_decay)
            decay_filter = lambda x: x > 0
            self.decay_flags = tuple(
                decay_filter(x) for x in self.weight_decay)
            self.exec_weight_decay = any(self.decay_flags)
        else:
            self.parameters = ParameterTuple(parameters)
            self.weight_decay = weight_decay * loss_scale
            self.weight_decay_tensor = Tensor(self.weight_decay,
                                              mstype.float32)
            decay_filter = lambda x: 'beta' not in x.name and 'gamma' not in x.name
            self.decay_flags = tuple(decay_filter(x) for x in self.parameters)
            self.exec_weight_decay = self.weight_decay > 0
        # when a parameter has been unique, there is no need do another unique in optimizer.
        for param in self.parameters:
            if param.unique:
                self._unique = False
                break
        ps_filter = lambda x: x.is_param_ps
        self.ps_parameters = tuple(ps_filter(x) for x in self.parameters)
        ps_cache_filter = lambda x: x.cache_enable
        self.cache_enable = tuple(ps_cache_filter(x) for x in self.parameters)
        self.reciprocal_scale = Tensor(1.0 / loss_scale, mstype.float32)
        self.need_scale = loss_scale != 1.0
        self.global_step_increase_tensor = Tensor(1, mstype.int32)
        self.param_length = len(self.parameters)
        self.map_ = C.Map()
        if context.get_auto_parallel_context("enable_parallel_optimizer"):
            if _get_parallel_mode(
            ) == ParallelMode.DATA_PARALLEL and context.get_context(
                    "device_target") == "Ascend":
                self.use_parallel = True
            elif _get_parallel_mode() == ParallelMode.DATA_PARALLEL \
                    and context.get_context("device_target") != "Ascend":
                raise RuntimeError(
                    "Parallel optimizer only supports Ascend in data parallel mode."
                )
            elif _get_parallel_mode() in (ParallelMode.STAND_ALONE,
                                          ParallelMode.HYBRID_PARALLEL):
                raise RuntimeError(
                    "Parallel optimizer is not supported in {}.".format(
                        _get_parallel_mode()))
            else:
                self.use_parallel = False
        else:
            self.use_parallel = False
        if self.use_parallel:
            if self.cls_name not in ["Lamb", "AdamWeightDecay"]:
                raise RuntimeError(
                    "Parallel optimizer does not support optimizer {}".format(
                        self.cls_name))
            self.dev_num = _get_device_num()
            if self.dev_num > self.param_length:
                raise RuntimeError(
                    "Parallel optimizer can not be applied when the number of parameters {} is"
                    " less than the number of devices {}".format(
                        self.param_length, self.dev_num))
            self.param_rank = self._get_parameter_group_id()
            self.optim_filter = tuple(
                map(lambda x: x == _get_global_rank(), self.param_rank))
            self.param_names = []
            for param in self.parameters:
                self.param_names.append(param.name)

        else:
            self.optim_filter = (True, ) * self.param_length
Exemplo n.º 25
0
 def __init__(self, network):
     super(GradWrap, self).__init__()
     self.network = network
     self.weights = ParameterTuple(
         filter(lambda x: x.requires_grad, network.get_parameters()))
Exemplo n.º 26
0
    def __init__(self,
                 params,
                 learning_rate,
                 momentum,
                 matrix_A,
                 matrix_G,
                 A_inv_max,
                 G_inv_max,
                 weight_decay=0.0,
                 loss_scale=1.0,
                 use_nesterov=False,
                 decay_filter=lambda x: x.name not in []):
        super(THOR_GPU, self).__init__(learning_rate, params, weight_decay,
                                       loss_scale)
        validator.check_value_type("momentum", momentum, [float],
                                   self.cls_name)
        if isinstance(momentum, float) and momentum < 0.0:
            raise ValueError(
                "momentum should be at least 0.0, but got momentum {}".format(
                    momentum))
        self.momentum = Parameter(Tensor(momentum, mstype.float32),
                                  name="momentum")
        self.params = self.parameters
        self.use_nesterov = check_bool(use_nesterov)
        self.moments = self.params.clone(prefix="moments", init='zeros')
        self.hyper_map = C.HyperMap()
        self.opt = P.ApplyMomentum(use_nesterov=self.use_nesterov)

        self.feature_map = [
            1.0 / 12544, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136,
            1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136,
            1.0 / 3136, 1.0 / 3136, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784,
            1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784,
            1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 196, 1.0 / 196, 1.0 / 196,
            1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196,
            1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196,
            1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 49, 1.0 / 49,
            1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49,
            1.0 / 49, 1.0
        ]
        self.feature_map_new = [x**0.5 for x in self.feature_map]
        self.transpose = P.Transpose()
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.matmul = P.MatMul()
        self.matrix_A = ParameterTuple(matrix_A)
        self.matrix_G = ParameterTuple(matrix_G)
        self.A_inv_max = ParameterTuple(A_inv_max)
        self.G_inv_max = ParameterTuple(G_inv_max)
        self.assign = P.Assign()
        self.mul = P.Mul()

        mean = _get_gradients_mean()
        degree = _get_device_num()

        parameter_length = len(self.feature_map)
        self.grad_reducer_thorA = DistributedGradReducerThor(
            parameter_length, ((parameter_length, ), 0), mean, degree)
        self.grad_reducer_thorG = DistributedGradReducerThor(
            parameter_length, ((parameter_length, ), 0), mean, degree)
        self.weight_decay = weight_decay
        self.decay_flags = tuple(decay_filter(x) for x in self.parameters)
        self.update_gradient = P.UpdateThorGradient(split_dim=128)
Exemplo n.º 27
0
    def __init__(self,
                 input_size,
                 hidden_size,
                 num_layers=1,
                 has_bias=True,
                 batch_first=False,
                 dropout=0,
                 bidirectional=False):
        super(LSTM, self).__init__()
        validator.check_value_type("batch_first", batch_first, [bool],
                                   self.cls_name)
        validator.check_positive_int(hidden_size, "hidden_size", self.cls_name)
        validator.check_positive_int(num_layers, "num_layers", self.cls_name)
        self.is_ascend = context.get_context("device_target") == "Ascend"

        self.batch_first = batch_first
        self.transpose = P.Transpose()
        self.num_layers = num_layers
        self.bidirectional = bidirectional
        self.dropout = dropout
        self.lstm = P.LSTM(input_size=input_size,
                           hidden_size=hidden_size,
                           num_layers=num_layers,
                           has_bias=has_bias,
                           bidirectional=bidirectional,
                           dropout=float(dropout))

        weight_size = 0
        gate_size = 4 * hidden_size
        stdv = 1 / math.sqrt(hidden_size)
        num_directions = 2 if bidirectional else 1
        if self.is_ascend:
            self.reverse_seq = P.ReverseSequence(batch_dim=1, seq_dim=0)
            self.concat = P.Concat(axis=0)
            self.concat_2dim = P.Concat(axis=2)
            self.cast = P.Cast()
            self.shape = P.Shape()
            if dropout != 0:
                self.dropout_op = nn.Dropout(float(dropout))
            b0 = np.zeros(gate_size, dtype=np.float16)
            self.w_list = []
            self.b_list = []
            self.rnns_fw = P.DynamicRNN(forget_bias=0.0)
            self.rnns_bw = P.DynamicRNN(forget_bias=0.0)

            for layer in range(num_layers):
                w_shape = input_size if layer == 0 else (num_directions *
                                                         hidden_size)
                w_np = np.random.uniform(
                    -stdv, stdv,
                    (w_shape + hidden_size, gate_size)).astype(np.float16)
                self.w_list.append(
                    Parameter(initializer(Tensor(w_np),
                                          [w_shape + hidden_size, gate_size]),
                              name='weight_fw' + str(layer)))
                if has_bias:
                    b_np = np.random.uniform(-stdv, stdv,
                                             gate_size).astype(np.float16)
                    self.b_list.append(
                        Parameter(initializer(Tensor(b_np), [gate_size]),
                                  name='bias_fw' + str(layer)))
                else:
                    self.b_list.append(
                        Parameter(initializer(Tensor(b0), [gate_size]),
                                  name='bias_fw' + str(layer)))
                if bidirectional:
                    w_bw_np = np.random.uniform(
                        -stdv, stdv,
                        (w_shape + hidden_size, gate_size)).astype(np.float16)
                    self.w_list.append(
                        Parameter(
                            initializer(Tensor(w_bw_np),
                                        [w_shape + hidden_size, gate_size]),
                            name='weight_bw' + str(layer)))
                    b_bw_np = np.random.uniform(
                        -stdv, stdv,
                        (4 *
                         hidden_size)).astype(np.float16) if has_bias else b0
                    self.b_list.append(
                        Parameter(initializer(Tensor(b_bw_np), [gate_size]),
                                  name='bias_bw' + str(layer)))
            self.w_list = ParameterTuple(self.w_list)
            self.b_list = ParameterTuple(self.b_list)
        else:
            for layer in range(num_layers):
                input_layer_size = input_size if layer == 0 else hidden_size * num_directions
                increment_size = gate_size * input_layer_size
                increment_size += gate_size * hidden_size
                if has_bias:
                    increment_size += 2 * gate_size
                weight_size += increment_size * num_directions
            w_np = np.random.uniform(-stdv, stdv,
                                     (weight_size, 1, 1)).astype(np.float32)
            self.weight = Parameter(initializer(Tensor(w_np),
                                                [weight_size, 1, 1]),
                                    name='weight')
Exemplo n.º 28
0
 def __init__(self, network):
     super(GradWrap, self).__init__()
     self.network = network
     self.weights = ParameterTuple(network.trainable_params())
Exemplo n.º 29
0
 def __init__(self, network):
     super(GradWeight, self).__init__()
     self.network = network
     self.weights = ParameterTuple(network.trainable_params())
     self.grad = C.GradOperation(get_by_list=True, sens_param=True)
Exemplo n.º 30
0
 def __init__(self, net):
     super(NetGrad, self).__init__()
     self.grad_op = C.GradOperation(get_by_list=True, sens_param=False)
     self.net = net
     self.weights = ParameterTuple(self.net.trainable_params())