Exemple #1
0
 def __init__(self):
     super(Net, self).__init__()
     self.mul = P.Mul()
     self.relu = P.ReLU()
     self.param1 = Parameter(Tensor(np.ones([8, 8, 8, 8]).astype(np.float32)), name="wide")
     self.param2 = Parameter(Tensor(np.ones([8, 8, 8, 8]).astype(np.float32)), name="deep")
Exemple #2
0
 def __init__(self):
     super(TensorSetItemByTensorsWithTupleOfTensor, self).__init__()
     self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
     self.param = Parameter(Tensor(
         np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32),
                            name="x")
Exemple #3
0
    dataset_size = dataset.get_dataset_size()
    print("Create dataset done!")

    net = Faster_Rcnn_Resnet50(config=config)
    net = net.set_train()

    load_path = args_opt.pre_trained
    if load_path != "":
        param_dict = load_checkpoint(load_path)
        for item in list(param_dict.keys()):
            if not item.startswith('backbone'):
                param_dict.pop(item)
        if args_opt.device_target == "GPU":
            for key, value in param_dict.items():
                tensor = value.asnumpy().astype(np.float32)
                param_dict[key] = Parameter(tensor, key)
        load_param_into_net(net, param_dict)

    loss = LossNet()
    lr = Tensor(dynamic_lr(config, dataset_size), mstype.float32)

    opt = SGD(params=net.trainable_params(),
              learning_rate=lr,
              momentum=config.momentum,
              weight_decay=config.weight_decay,
              loss_scale=config.loss_scale)
    net_with_loss = WithLossCell(net, loss)
    if args_opt.run_distribute:
        net = TrainOneStepCell(net_with_loss,
                               net,
                               opt,
    def __init__(self, args, strategy):
        super(SemiAutoOneHotNet, self).__init__()
        self.a = args.a
        self.b = args.b
        self.c = args.c
        self.d = args.d
        self.e = args.e
        self.cast = P.Cast()
        self.cast.set_strategy(strategy=strategy.twod_strategy)
        self.cast1 = P.Cast()
        self.cast1.set_strategy(strategy=strategy.twod_strategy)
        self.cast2 = P.Cast()
        self.cast2.set_strategy(strategy=strategy.twod_strategy)
        self.cast3 = P.Cast()
        self.cast3.set_strategy(strategy=strategy.scalar_strategy)
        self.cast4 = P.Cast()
        self.cast4.set_strategy(strategy=strategy.scalar_strategy)
        self.a_const = Tensor(self.a, dtype=mstype.float32)
        self.b_const = Tensor(self.b, dtype=mstype.float32)
        self.c_const = Tensor(self.c, dtype=mstype.float32)
        self.d_const = Tensor(self.d, dtype=mstype.float32)
        self.e_const = Tensor(self.e, dtype=mstype.float32)
        self.m_const_zero = Tensor(0, dtype=mstype.float32)
        self.a_const_one = Tensor(1, dtype=mstype.float32)
        self.onehot = P.OneHot()
        self.onehot.set_strategy(strategy=strategy.onehot_strategy)
        self.exp = P.Exp()
        self.exp.set_strategy(strategy=strategy.twod_strategy)
        self.exp2 = P.Exp()
        self.exp2.set_strategy(strategy=strategy.twod_strategy)
        self.exp3 = P.Exp()
        self.exp3.set_strategy(strategy=strategy.twod_strategy)
        self.mul_const = P.Mul()
        self.mul_const.set_strategy(strategy=strategy.scalar_twod_strategy)
        self.mul_const2 = P.TensorAdd()
        self.mul_const2.set_strategy(strategy=strategy.scalar_twod_strategy)
        self.mul_const3 = P.Sub()
        self.mul_const3.set_strategy(strategy=strategy.twod_scalar_strategy)
        self.mul_const4 = P.Sub()
        self.mul_const4.set_strategy(strategy=strategy.scalar_twod_strategy)
        self.mul_const5 = P.Mul()
        self.mul_const5.set_strategy(strategy=strategy.twod_scalar_strategy)
        self.mul = P.Mul()
        self.mul.set_strategy(strategy=strategy.twod_twod_strategy)
        self.mul2 = P.Mul()
        self.mul2.set_strategy(strategy=strategy.twod_twod_strategy)
        self.mul3 = P.TensorAdd()
        self.mul3.set_strategy(strategy=strategy.twod_twod_strategy)
        self.mul4 = P.Sub()
        self.mul4.set_strategy(strategy=strategy.twod_twodbc_strategy)
        self.mul5 = P.RealDiv()
        self.mul5.set_strategy(strategy=strategy.twod_twodbc_strategy)
        self.mul6 = P.Mul()
        self.mul6.set_strategy(strategy=strategy.twod_twod_strategy)
        self.mul7 = P.Mul()
        self.mul7.set_strategy(strategy=strategy.twod_scalar_strategy)
        self.mul8 = P.RealDiv()
        self.mul8.set_strategy(strategy=strategy.scalar_scalar_strategy)
        self.mul9 = P.TensorAdd()
        self.mul9.set_strategy(strategy=strategy.twod_scalar_strategy)

        self.reduce_max = P.ReduceMax(keep_dims=True)
        self.reduce_max.set_strategy(strategy=strategy.twod_strategy)

        self.reduce_sum = P.ReduceSum(keep_dims=False)
        self.reduce_sum.set_strategy(strategy=strategy.twod_strategy)
        self.reduce_sum_2 = P.ReduceSum(keep_dims=False)
        self.reduce_sum_2.set_strategy(strategy=strategy.twod_strategy)
        self.reduce_sum_3 = P.ReduceSum(keep_dims=False)
        self.reduce_sum_3.set_strategy(strategy=strategy.oned_strategy)

        self.reshape = P.Reshape()
        self.log = P.Log()
        self.log.set_strategy(strategy=strategy.twod_strategy)

        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
        self.normalize = P.L2Normalize(axis=1)
        self.normalize.set_strategy(strategy=strategy.twod_strategy_m)
        self.normalize2 = P.L2Normalize(axis=1)
        self.normalize2.set_strategy(strategy=strategy.twod_strategy_m)
        self.fc = P.MatMul(transpose_b=True)
        self.fc.set_strategy(strategy=strategy.twodbc_twod_strategy)
        weight_shape = [args.num_classes, args.emb_size]
        weight_np = np.zeros(weight_shape, np.float32)
        self.weight = Parameter(Tensor(weight_np),
                                name='model_parallel_weight')
 def __init__(self, strategy1, strategy2, weight):
     super().__init__()
     self.weight = Parameter(weight, "w1")
     self.matmul = P.MatMul(transpose_a=False,
                            transpose_b=True).shard(strategy1)
     self.relu = P.ReLU().shard(strategy2)
Exemple #6
0
    def __init__(self,
                 input_size,
                 hidden_size,
                 num_layers=1,
                 has_bias=True,
                 batch_first=False,
                 dropout=0.0,
                 bidirectional=False):
        super(StackLSTMAscend, self).__init__()
        self.num_layers = num_layers
        self.batch_first = batch_first
        self.bidirectional = bidirectional
        self.transpose = P.Transpose()

        # input_size list
        input_size_list = [input_size]
        for i in range(num_layers - 1):
            input_size_list.append(hidden_size * 2)

        #weights, bias and layers init
        weights_fw = []
        weights_bw = []
        bias_fw = []
        bias_bw = []

        stdv = 1 / math.sqrt(hidden_size)
        for i in range(num_layers):
            # forward weight init
            w_np_fw = np.random.uniform(-stdv, stdv,
                                        (input_size_list[i] + hidden_size,
                                         hidden_size * 4)).astype(np.float32)
            w_fw = Parameter(initializer(Tensor(w_np_fw), w_np_fw.shape),
                             name="w_fw_layer" + str(i))
            weights_fw.append(w_fw)
            # forward bias init
            if has_bias:
                b_fw = np.random.uniform(-stdv, stdv,
                                         (hidden_size * 4)).astype(np.float32)
                b_fw = Parameter(initializer(Tensor(b_fw), b_fw.shape),
                                 name="b_fw_layer" + str(i))
            else:
                b_fw = np.zeros((hidden_size * 4)).astype(np.float32)
                b_fw = Parameter(initializer(Tensor(b_fw), b_fw.shape),
                                 name="b_fw_layer" + str(i))
            bias_fw.append(b_fw)

            if bidirectional:
                # backward weight init
                w_np_bw = np.random.uniform(
                    -stdv, stdv, (input_size_list[i] + hidden_size,
                                  hidden_size * 4)).astype(np.float32)
                w_bw = Parameter(initializer(Tensor(w_np_bw), w_np_bw.shape),
                                 name="w_bw_layer" + str(i))
                weights_bw.append(w_bw)

                # backward bias init
                if has_bias:
                    b_bw = np.random.uniform(
                        -stdv, stdv, (hidden_size * 4)).astype(np.float32)
                    b_bw = Parameter(initializer(Tensor(b_bw), b_bw.shape),
                                     name="b_bw_layer" + str(i))
                else:
                    b_bw = np.zeros((hidden_size * 4)).astype(np.float32)
                    b_bw = Parameter(initializer(Tensor(b_bw), b_bw.shape),
                                     name="b_bw_layer" + str(i))
                bias_bw.append(b_bw)

        # layer init
        self.lstm = LSTM_Ascend(bidirectional=bidirectional).to_float(
            mstype.float16)

        self.weight_fw = ParameterTuple(tuple(weights_fw))
        self.weight_bw = ParameterTuple(tuple(weights_bw))
        self.bias_fw = ParameterTuple(tuple(bias_fw))
        self.bias_bw = ParameterTuple(tuple(bias_bw))
Exemple #7
0
 def __init__(self, strategy1, weight):
     super().__init__()
     self.weight = Parameter(weight, "w1")
     self.matmul = P.MatMul(transpose_a=False,
                            transpose_b=True).set_strategy(strategy1)
 def __init__(self):
     super(SecondNet, self).__init__()
     self.b2 = Parameter(Tensor(np.ones([3, 4, 5]), dtype=mstype.float32),
                         "b2",
                         requires_grad=True)
Exemple #9
0
 def __init__(self, mul_weight, strategy1=None, strategy2=None):
     super().__init__()
     self.mul = P.Mul().shard(strategy1)
     self.neg = P.Neg().shard(strategy2)
     self.mul_weight = Parameter(mul_weight, "w1")
 def __init__(self):
     super(Net, self).__init__()
     self.par = Parameter(Tensor(np.array([[1.1, 2.2, 3.3], [2.0, 3.0, 4.0]], dtype=np.float32)), name="par")
     self.grad = Tensor(np.array([[1.1, 2.2, 3.3], [2.0, 3.0, 4.0]], dtype=np.float16))
Exemple #11
0
 def __init__(self):
     super(NetWithSparseGatherV2, self).__init__()
     self.weight1 = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="weight1")
     self.weight2 = Parameter(Tensor(np.ones([2, 1, 2]).astype((np.float32))), name="weight2")
     self.axis = 0
     self.gather = P.SparseGatherV2()
 def __init__(self, strategy1, strategy2, weight, weight2):
     super().__init__()
     self.weight = Parameter(weight, "w1", requires_grad=True)
     self.weight2 = Parameter(weight2, "w2", requires_grad=True)
     self.matmul = P.MatMul().set_strategy(strategy1)
     self.matmul2 = P.MatMul().set_strategy(strategy2)
 def __init__(self):
     super().__init__()
     self.matmul = P.MatMul()
     self.tadd1 = P.TensorAdd()
     self.tadd2 = P.TensorAdd()
     self.weight = Parameter(Tensor(np.ones([128, 128]).astype(np.float32) * 0.01), "w", requires_grad=True)
Exemple #14
0
 def __init__(self):
     super(Net, self).__init__()
     self.weight = Parameter(Tensor(np.ones([4, 4, 5]),
                                    dtype=mstype.float32),
                             "b1",
                             requires_grad=True)
Exemple #15
0
 def __init__(self, mul_weight, strategy1=None, strategy2=None):
     super().__init__()
     self.mul = P.Mul().set_strategy(strategy1)
     self.loss = P.SigmoidCrossEntropyWithLogits().set_strategy(strategy2)
     self.mul_weight = Parameter(mul_weight, "w1")
 def __init__(self):
     super().__init__()
     self.z1 = Parameter(Tensor(np.ones([32]) * 2,
                                dtype=mstype.float32),
                         name='z1')
Exemple #17
0
 def __init__(self):
     super(AddNet, self).__init__()
     self.w = Parameter(Tensor(np.ones((3, 4, 5), np.float32)),
                        "w2",
                        requires_grad=True)