Example #1
0
 def __init__(self, is_training=None, num_labels=None, config=None):
     super(CrossEntropyCalculationWithMask, self).__init__()
     self.onehot = P.OneHot()
     self.on_value = Tensor(1.0, mstype.float32)
     self.off_value = Tensor(0.0, mstype.float32)
     self.reduce_sum = P.ReduceSum()
     self.reduce_mean = P.ReduceMean()
     self.reshape = P.Reshape()
     self.last_idx = (-1, )
     self.neg = P.Neg()
     self.cast = P.Cast()
     self.is_training = is_training
     self.num_labels = num_labels
     if config is not None:
         # for PPL calculation in evaluation
         self.input_mask_length = Tensor(
             config.batch_size * (config.seq_length - 1), mstype.float32)
Example #2
0
 def __init__(self, sparse=False):
     super(SoftmaxCrossEntropyExpand, self).__init__()
     self.exp = P.Exp()
     self.reduce_sum = P.ReduceSum(keep_dims=True)
     self.onehot = P.OneHot()
     self.on_value = Tensor(1.0, mstype.float32)
     self.off_value = Tensor(0.0, mstype.float32)
     self.div = P.Div()
     self.log = P.Log()
     self.sum_cross_entropy = P.ReduceSum(keep_dims=False)
     self.mul = P.Mul()
     self.mul2 = P.Mul()
     self.cast = P.Cast()
     self.reduce_mean = P.ReduceMean(keep_dims=False)
     self.sparse = sparse
     self.reduce_max = P.ReduceMax(keep_dims=True)
     self.sub = P.Sub()
 def __init__(self, config):
     super(TransformerTrainingLoss, self).__init__(auto_prefix=False)
     self.vocab_size = config.vocab_size
     self.onehot = P.OneHot()
     self.on_value = Tensor(float(1 - config.label_smoothing),
                            mstype.float32)
     self.off_value = Tensor(
         config.label_smoothing / float(self.vocab_size - 1),
         mstype.float32)
     self.reduce_sum = P.ReduceSum()
     self.reduce_mean = P.ReduceMean()
     self.reshape = P.Reshape()
     self.last_idx = (-1, )
     self.flatten = P.Flatten()
     self.neg = P.Neg()
     self.cast = P.Cast()
     self.batch_size = config.batch_size
    def __init__(self,
                 batch_size:int,
                 vocab_size:int,
                 k:int=0,
                 p:float=1.0,
                 temperature:float=1.0,
                 min_tokens_to_keep:int=1,
                 fp16:bool=False):
        super(TopKTopP_Filter, self).__init__()

        self.topK = P.TopK(sorted=True)
        self.batch_size = batch_size
        self.vocab_size = vocab_size
        self.min_tokens_to_keep = min_tokens_to_keep
        self.k = k
        self.p = p
        self.temp = temperature
        self.fp16 = fp16

        self.cumsum = P.CumSum()
        
        self.onehot = P.OneHot()
        self.cast = P.Cast()
        self.expand_dims = P.ExpandDims()
        self.device_target = get_context("device_target")
        self.mask = Tensor(
            np.zeros((batch_size, vocab_size)), dtype=mstype.float32)
        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
        self.softmax = P.Softmax()
        self.safety_mask_left = np.zeros(
            (batch_size, min_tokens_to_keep), dtype=float)
        self.safety_mask_right = np.ones((batch_size, 
                                         vocab_size-min_tokens_to_keep), 
                                         dtype=float)
        self.safety_mask = Tensor(np.concatenate((self.safety_mask_left, 
                                                 self.safety_mask_right), axis=1), 
                                                 dtype=mstype.float32)
        self.NINF = float(-1e6)
        
        self.expand_dims = P.ExpandDims()
        assert self.temp > 0.0, 'temperature must be positive'
        assert self.k >= 0, 'the top_k number must be no negative.'
        if self.k > 0:
            assert self.min_tokens_to_keep <= self.k, 'K must be larger than or equal to min_token_to_keep for top p sampling'
Example #5
0
 def __init__(self, config):
     super(LabelSmoothedCrossEntropyCriterion, self).__init__()
     self.vocab_size = config.vocab_size
     self.onehot = P.OneHot()
     self.on_value = Tensor(float(1 - config.label_smoothing),
                            mstype.float32)
     self.off_value = Tensor(
         config.label_smoothing / float(self.vocab_size - 1),
         mstype.float32)
     self.reduce_sum = P.ReduceSum()
     self.reduce_mean = P.ReduceMean()
     self.reshape = P.Reshape()
     self.last_idx = (-1, )
     self.flatten = P.Flatten()
     self.neg = P.Neg()
     self.cast = P.Cast()
     self.flat_shape = (config.batch_size * config.seq_length, )
     self.get_shape = P.Shape()
Example #6
0
 def __init__(self, vocab_size, embedding_size, use_one_hot=False, embedding_table='normal', dtype=mstype.float32):
     super(Embedding, self).__init__()
     validator.check_subclass("dtype", dtype, mstype.number_type)
     self.vocab_size = vocab_size
     self.embedding_size = embedding_size
     self.use_one_hot = use_one_hot
     self.embedding_table = Parameter(initializer(embedding_table, [vocab_size, embedding_size]),
                                      name='embedding_table')
     self.dtype = dtype
     self.expand = P.ExpandDims()
     self.reshape_flat = P.Reshape()
     self.shp_flat = (-1,)
     self.gather = P.GatherV2()
     self.one_hot = P.OneHot()
     self.on_value = Tensor(1.0, self.dtype)
     self.off_value = Tensor(0.0, self.dtype)
     self.array_mul = P.MatMul()
     self.reshape = P.Reshape()
     self.get_shp = P.Shape()
Example #7
0
 def __init__(self,
              vocab_size,
              embedding_size,
              use_one_hot=False,
              embedding_table='normal',
              dtype=mstype.float32,
              padding_idx=None):
     super(Embedding, self).__init__()
     self.vocab_size = validator.check_value_type('vocab_size', vocab_size,
                                                  [int], self.cls_name)
     self.embedding_size = validator.check_value_type(
         'embedding_size', embedding_size, [int], self.cls_name)
     validator.check_value_type('use_one_hot', use_one_hot, [bool],
                                self.cls_name)
     validator.check_subclass("dtype", dtype, mstype.number_type,
                              self.cls_name)
     self.use_one_hot = use_one_hot
     self.dtype = dtype
     self.init_tensor = initializer(embedding_table,
                                    [vocab_size, embedding_size])
     self.padding_idx = padding_idx
     if padding_idx is not None:
         self.padding_idx = validator.check_int_range(
             padding_idx, 0, vocab_size, Rel.INC_BOTH, "padding_idx",
             self.cls_name)
         if isinstance(self.init_tensor,
                       Tensor) and self.init_tensor.init is not None:
             self.init_tensor = self.init_tensor.init_data()
         self.init_tensor = self.init_tensor.asnumpy()
         self.init_tensor[self.padding_idx] = 0
         self.init_tensor = Tensor(self.init_tensor)
     self.embedding_table = Parameter(self.init_tensor,
                                      name='embedding_table')
     self.expand = P.ExpandDims()
     self.reshape_flat = P.Reshape()
     self.shp_flat = (-1, )
     self.gather = P.Gather()
     self.one_hot = P.OneHot()
     self.on_value = Tensor(1.0, self.dtype)
     self.off_value = Tensor(0.0, self.dtype)
     self.array_mul = P.MatMul()
     self.reshape = P.Reshape()
     self.get_shp = P.Shape()
Example #8
0
    def __init__(self,
                 tag_to_index,
                 batch_size=1,
                 seq_length=128,
                 is_training=True):

        super(CRF, self).__init__()
        self.target_size = len(tag_to_index)
        self.is_training = is_training
        self.tag_to_index = tag_to_index
        self.batch_size = batch_size
        self.seq_length = seq_length
        self.START_TAG = "<START>"
        self.STOP_TAG = "<STOP>"
        self.START_VALUE = Tensor(self.target_size - 2, dtype=mstype.int32)
        self.STOP_VALUE = Tensor(self.target_size - 1, dtype=mstype.int32)
        transitions = np.random.normal(size=(self.target_size,
                                             self.target_size)).astype(
                                                 np.float32)
        transitions[tag_to_index[self.START_TAG], :] = -10000
        transitions[:, tag_to_index[self.STOP_TAG]] = -10000
        self.transitions = Parameter(Tensor(transitions),
                                     name="transition_matrix")
        self.cat = P.Concat(axis=-1)
        self.argmax = P.ArgMaxWithValue(axis=-1)
        self.log = P.Log()
        self.exp = P.Exp()
        self.sum = P.ReduceSum()
        self.tile = P.Tile()
        self.reduce_sum = P.ReduceSum(keep_dims=True)
        self.reshape = P.Reshape()
        self.expand = P.ExpandDims()
        self.mean = P.ReduceMean()
        init_alphas = np.ones(shape=(self.batch_size,
                                     self.target_size)) * -10000.0
        init_alphas[:, self.tag_to_index[self.START_TAG]] = 0.
        self.init_alphas = Tensor(init_alphas, dtype=mstype.float32)
        self.cast = P.Cast()
        self.reduce_max = P.ReduceMax(keep_dims=True)
        self.on_value = Tensor(1.0, dtype=mstype.float32)
        self.off_value = Tensor(0.0, dtype=mstype.float32)
        self.onehot = P.OneHot()
Example #9
0
    def __init__(self, vocab_size, embedding_size, use_one_hot=False, embedding_table='normal', dtype=mstype.float32):
        super(Embedding2, self).__init__()
        self.vocab_size = vocab_size
        self.embedding_size = embedding_size
        self.use_one_hot = use_one_hot
        np.random.seed(1)  # 可以生成固定参数以便对比研究,不需要固定参数注释掉此行就可。
        self.embedding_table = Parameter(Tensor(np.random.uniform(0, 1, (vocab_size, embedding_size)),mindspore.float32),
                                         name='embedding_table')

        self.dtype = dtype
        self.expand = P.ExpandDims()
        self.reshape_flat = P.Reshape()
        self.shp_flat = (-1,)
        self.gather = P.GatherV2()
        self.one_hot = P.OneHot()
        self.on_value = Tensor(1.0, self.dtype)
        self.off_value = Tensor(0.0, self.dtype)
        self.array_mul = P.MatMul()
        self.reshape = P.Reshape()
        self.get_shp = P.Shape()
Example #10
0
 def __init__(self, sparse=False, stra_list=None):
     super(SoftmaxCrossEntropyExpand, self).__init__()
     if stra_list is None:
         stra_list = []
     if len(stra_list) < 11:
         stra_list = [None] * 11
     self.exp = P.Exp()
     self.reduce_sum = P.ReduceSum(keep_dims=True).shard(strategy=stra_list[1])
     self.onehot = P.OneHot().shard(strategy=stra_list[2])
     self.on_value = Tensor(1.0, mstype.float32)
     self.off_value = Tensor(0.0, mstype.float32)
     self.div = P.Div().shard(strategy=stra_list[3])
     self.log = P.Log().shard(strategy=stra_list[4])
     self.sum_cross_entropy = P.ReduceSum(keep_dims=False).shard(strategy=stra_list[5])
     self.mul = P.Mul().shard(strategy=stra_list[6])
     self.mul2 = P.Mul().shard(strategy=stra_list[7])
     self.cast = P.Cast()
     self.reduce_mean = P.ReduceMean(keep_dims=False).shard(strategy=stra_list[8])
     self.sparse = sparse
     self.reduce_max = P.ReduceMax(keep_dims=True).shard(strategy=stra_list[9])
     self.sub = P.Sub().shard(strategy=stra_list[10])
Example #11
0
 def __init__(self,
              vocab_size,
              embedding_size,
              embedding_shape,
              use_one_hot_embeddings=False,
              initializer_range=0.02):
     super(EmbeddingLookup, self).__init__()
     self.vocab_size = vocab_size
     self.use_one_hot_embeddings = use_one_hot_embeddings
     self.embedding_table = Parameter(initializer(
         TruncatedNormal(initializer_range), [vocab_size, embedding_size]),
                                      name='embedding_table')
     self.expand = P.ExpandDims()
     self.shape_flat = (-1, )
     self.gather = P.GatherV2()
     self.one_hot = P.OneHot()
     self.on_value = Tensor(1.0, mstype.float32)
     self.off_value = Tensor(0.0, mstype.float32)
     self.array_mul = P.MatMul()
     self.reshape = P.Reshape()
     self.shape = tuple(embedding_shape)
Example #12
0
    def __init__(self,
                 vocab_size,
                 embedding_dim,
                 use_one_hot_embeddings=False):
        super(EmbeddingLookup, self).__init__()
        self.vocab_size = vocab_size
        self.embedding_dim = embedding_dim
        self.use_one_hot_embeddings = use_one_hot_embeddings
        self.embedding_table = Parameter(normal_weight(
            [vocab_size, embedding_dim], embedding_dim),
                                         name='embedding_table')

        self.expand = P.ExpandDims()
        self.shape_flat = (-1, )
        self.gather = P.GatherV2()
        self.one_hot = P.OneHot()
        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
        self.array_mul = P.MatMul()
        self.reshape = P.Reshape()
        self.shape = P.Shape()
Example #13
0
    def __init__(self,
                 is_grad=True,
                 sparse=False,
                 reduction=None,
                 smooth_factor=0,
                 num_classes=2):
        super(SoftmaxCrossEntropyWithLogits, self).__init__(reduction)
        self.is_grad = is_grad
        self.sparse = sparse
        validator.check_integer("num_classes", num_classes, 1, Rel.GT, self.cls_name)
        validator.check_number_range("smooth_factor", smooth_factor, 0, 1, Rel.INC_BOTH, self.cls_name)
        self.smooth_factor = smooth_factor
        self.num_classes = num_classes
        self.softmax_cross_entropy = P.SoftmaxCrossEntropyWithLogits()
        self.one_hot = P.OneHot()
        self.on_value = Tensor(1.0 - self.smooth_factor, mstype.float32)
        self.off_value = Tensor(1.0 * self.smooth_factor / (self.num_classes - 1), mstype.float32)
        self.is_cpugpu = context.get_context('device_target') in ["CPU", "GPU"]

        if self.is_cpugpu:
            self.sparse_softmax_cross_entropy = P.SparseSoftmaxCrossEntropyWithLogits(is_grad=self.is_grad)
Example #14
0
 def __init__(self,
              use_relative_positions,
              embedding_size,
              embedding_shape,
              use_token_type=False,
              token_type_vocab_size=16,
              use_one_hot_embeddings=False,
              initializer_range=0.02,
              max_position_embeddings=512,
              dropout_prob=0.1):
     super(EmbeddingPostprocessor, self).__init__()
     self.use_token_type = use_token_type
     self.token_type_vocab_size = token_type_vocab_size
     self.use_one_hot_embeddings = use_one_hot_embeddings
     self.max_position_embeddings = max_position_embeddings
     self.token_type_embedding = nn.Embedding(
         vocab_size=token_type_vocab_size,
         embedding_size=embedding_size,
         use_one_hot=use_one_hot_embeddings)
     self.shape_flat = (-1, )
     self.one_hot = P.OneHot()
     self.on_value = Tensor(1.0, mstype.float32)
     self.off_value = Tensor(0.1, mstype.float32)
     self.array_mul = P.MatMul()
     self.reshape = P.Reshape()
     self.shape = tuple(embedding_shape)
     self.dropout = nn.Dropout(1 - dropout_prob)
     self.gather = P.Gather()
     self.use_relative_positions = use_relative_positions
     self.slice = P.StridedSlice()
     _, seq, _ = self.shape
     self.full_position_embedding = nn.Embedding(
         vocab_size=max_position_embeddings,
         embedding_size=embedding_size,
         use_one_hot=False)
     self.layernorm = nn.LayerNorm((embedding_size, ))
     self.position_ids = Tensor(
         np.arange(seq).reshape(-1, seq).astype(np.int32))
     self.add = P.Add()
Example #15
0
 def __init__(self,
              length,
              depth,
              max_relative_position,
              initializer_range,
              use_one_hot_embeddings=False):
     super(RelaPosEmbeddingsGenerator, self).__init__()
     self.depth = depth
     self.vocab_size = max_relative_position * 2 + 1
     self.use_one_hot_embeddings = use_one_hot_embeddings
     self.embeddings_table = Parameter(
         initializer(TruncatedNormal(initializer_range),
                     [self.vocab_size, self.depth]))
     self.relative_positions_matrix = RelaPosMatrixGenerator(
         length=length, max_relative_position=max_relative_position)
     self.reshape = P.Reshape()
     self.one_hot = P.OneHot()
     self.on_value = Tensor(1.0, mstype.float32)
     self.off_value = Tensor(0.0, mstype.float32)
     self.shape = P.Shape()
     self.gather = P.Gather()  # index_select
     self.matmul = P.BatchMatMul()
Example #16
0
    def __init__(self,
                 embedding_size,
                 embedding_shape,
                 use_relative_positions=False,
                 use_token_type=False,
                 token_type_vocab_size=16,
                 use_one_hot_embeddings=False,
                 initializer_range=0.02,
                 max_position_embeddings=512,
                 dropout_prob=0.1):
        super(EmbeddingPostprocessor, self).__init__()
        self.use_token_type = use_token_type
        self.token_type_vocab_size = token_type_vocab_size
        self.use_one_hot_embeddings = use_one_hot_embeddings
        self.max_position_embeddings = max_position_embeddings
        self.embedding_table = Parameter(initializer
                                         (TruncatedNormal(initializer_range),
                                          [token_type_vocab_size,
                                           embedding_size]),
                                         name='embedding_table')

        self.shape_flat = (-1,)
        self.one_hot = P.OneHot()
        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.1, mstype.float32)
        self.array_mul = P.MatMul()
        self.reshape = P.Reshape()
        self.shape = tuple(embedding_shape)
        self.layernorm = nn.LayerNorm((embedding_size,))
        self.dropout = nn.Dropout(1 - dropout_prob)
        self.gather = P.GatherV2()
        self.use_relative_positions = use_relative_positions
        self.slice = P.StridedSlice()
        self.full_position_embeddings = Parameter(initializer
                                                  (TruncatedNormal(initializer_range),
                                                   [max_position_embeddings,
                                                    embedding_size]),
                                                  name='full_position_embeddings')
Example #17
0
 def __init__(self, embbeding_size=128, classnum=270762, s=32, a=1.0, m=0.3, b=0.2):
     super(CombineMarginFC, self).__init__()
     weight_shape = [classnum, embbeding_size]
     weight_init = initializer(me_init.ReidXavierUniform(), weight_shape)
     self.weight = Parameter(weight_init, name='weight')
     self.m = m
     self.s = s
     self.a = a
     self.b = b
     self.m_const = Tensor(self.m, dtype=mstype.float32)
     self.a_const = Tensor(self.a, dtype=mstype.float32)
     self.b_const = Tensor(self.b, dtype=mstype.float32)
     self.s_const = Tensor(self.s, dtype=mstype.float32)
     self.m_const_zero = Tensor(0.0, dtype=mstype.float32)
     self.a_const_one = Tensor(1.0, dtype=mstype.float32)
     self.normalize = P.L2Normalize(axis=1)
     self.fc = P.MatMul(transpose_b=True)
     self.onehot = P.OneHot()
     self.transpose = P.Transpose()
     self.acos = P.ACos()
     self.cos = P.Cos()
     self.cast = P.Cast()
     self.on_value = Tensor(1.0, mstype.float32)
     self.off_value = Tensor(0.0, mstype.float32)
Example #18
0
    def __init__(self,
                 config,
                 batch_size,
                 num_classes,
                 target_means=(0., 0., 0., 0.),
                 target_stds=(0.1, 0.1, 0.2, 0.2)):
        super(RcnnMask, self).__init__()
        cfg = config
        self.rcnn_loss_mask_fb_weight = Tensor(
            np.array(cfg.rcnn_loss_mask_fb_weight).astype(np.float16))
        self.rcnn_mask_out_channels = cfg.rcnn_mask_out_channels
        self.target_means = target_means
        self.target_stds = target_stds
        self.num_classes = num_classes
        self.in_channels = cfg.rcnn_in_channels

        self.fpn_mask = FpnMask(self.in_channels, self.rcnn_mask_out_channels,
                                self.num_classes)

        self.logicaland = P.LogicalAnd()
        self.loss_mask = P.SigmoidCrossEntropyWithLogits()
        self.onehot = P.OneHot()
        self.greater = P.Greater()
        self.cast = P.Cast()
        self.sum_loss = P.ReduceSum()
        self.tile = P.Tile()
        self.expandims = P.ExpandDims()

        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)

        self.num_bboxes = cfg.num_expected_pos_stage2 * batch_size
        rmv_first = np.ones((self.num_bboxes, self.num_classes))
        rmv_first[:, 0] = np.zeros((self.num_bboxes, ))
        self.rmv_first_tensor = Tensor(rmv_first.astype(np.float16))
        self.mean_loss = P.ReduceMean()
Example #19
0
 def __init__(self, args):
     super(CombineMarginFC, self).__init__()
     weight_shape = [args.num_classes, args.emb_size]
     weight_init = initializer(me_init.ReidXavierUniform(), weight_shape)
     self.weight = Parameter(weight_init, name='weight')
     self.m = args.margin_m
     self.s = args.margin_s
     self.a = args.margin_a
     self.b = args.margin_b
     self.m_const = Tensor(self.m, dtype=mstype.float16)
     self.a_const = Tensor(self.a, dtype=mstype.float16)
     self.b_const = Tensor(self.b, dtype=mstype.float16)
     self.s_const = Tensor(self.s, dtype=mstype.float16)
     self.m_const_zero = Tensor(0, dtype=mstype.float16)
     self.a_const_one = Tensor(1, dtype=mstype.float16)
     self.normalize = P.L2Normalize(axis=1)
     self.fc = P.MatMul(transpose_b=True)
     self.onehot = P.OneHot()
     self.transpose = P.Transpose()
     self.acos = P.ACos()
     self.cos = P.Cos()
     self.cast = P.Cast()
     self.on_value = Tensor(1.0, mstype.float32)
     self.off_value = Tensor(0.0, mstype.float32)
# ============================================================================
from mindspore.ops import operations as P
from mindspore.ops.operations import _grad_ops as G
from mindspore.ops import Primitive
import mindspore as ms
import mindspore.common.dtype as mstype
from mindspore.common.tensor import Tensor

make_tuple = Primitive('make_tuple')
reshape = P.Reshape()
backend_reshape = Primitive('Reshape')
cast = P.Cast()
backend_cast = Primitive('Cast')
transpose = P.Transpose()
backend_transpose = Primitive('Transpose')
onehot1 = P.OneHot()
onehot2 = P.OneHot()
backend_onehot1 = Primitive('OneHot')
backend_onehot2 = Primitive('OneHot')
stridedslicegrad = G.StridedSliceGrad()
backend_stridedslicegrad = Primitive('StridedSliceGrad')
on_value = Tensor(1.0, mstype.float32)
off_value = Tensor(0.0, mstype.float32)
depth = Tensor(2, mstype.int32)
shape = (2, 4, 2, 2)
dropout_gen_mask = P.DropoutGenMask()


class FnDict:
    def __init__(self):
        self.fnDict = {}
Example #21
0
    def __init__(self, config):
        super(Mask_Rcnn_Resnet50, self).__init__()
        self.train_batch_size = config.batch_size
        self.num_classes = config.num_classes
        self.anchor_scales = config.anchor_scales
        self.anchor_ratios = config.anchor_ratios
        self.anchor_strides = config.anchor_strides
        self.target_means = tuple(config.rcnn_target_means)
        self.target_stds = tuple(config.rcnn_target_stds)

        # Anchor generator
        anchor_base_sizes = None
        self.anchor_base_sizes = list(
            self.anchor_strides) if anchor_base_sizes is None else anchor_base_sizes

        self.anchor_generators = []
        for anchor_base in self.anchor_base_sizes:
            self.anchor_generators.append(
                AnchorGenerator(anchor_base, self.anchor_scales, self.anchor_ratios))

        self.num_anchors = len(self.anchor_ratios) * len(self.anchor_scales)

        featmap_sizes = config.feature_shapes
        assert len(featmap_sizes) == len(self.anchor_generators)

        self.anchor_list = self.get_anchors(featmap_sizes)

        # Backbone resnet50
        self.backbone = ResNetFea(ResidualBlockUsing,
                                  config.resnet_block,
                                  config.resnet_in_channels,
                                  config.resnet_out_channels,
                                  False)

        # Fpn
        self.fpn_ncek = FeatPyramidNeck(config.fpn_in_channels,
                                        config.fpn_out_channels,
                                        config.fpn_num_outs)

        # Rpn and rpn loss
        self.gt_labels_stage1 = Tensor(np.ones((self.train_batch_size, config.num_gts)).astype(np.uint8))
        self.rpn_with_loss = RPN(config,
                                 self.train_batch_size,
                                 config.rpn_in_channels,
                                 config.rpn_feat_channels,
                                 config.num_anchors,
                                 config.rpn_cls_out_channels)

        # Proposal
        self.proposal_generator = Proposal(config,
                                           self.train_batch_size,
                                           config.activate_num_classes,
                                           config.use_sigmoid_cls)
        self.proposal_generator.set_train_local(config, True)
        self.proposal_generator_test = Proposal(config,
                                                config.test_batch_size,
                                                config.activate_num_classes,
                                                config.use_sigmoid_cls)
        self.proposal_generator_test.set_train_local(config, False)

        # Assign and sampler stage two
        self.bbox_assigner_sampler_for_rcnn = BboxAssignSampleForRcnn(config, self.train_batch_size,
                                                                      config.num_bboxes_stage2, True)
        self.decode = P.BoundingBoxDecode(max_shape=(768, 1280), means=self.target_means, \
                                          stds=self.target_stds)

        # Roi
        self.roi_align = SingleRoIExtractor(config,
                                            config.roi_layer,
                                            config.roi_align_out_channels,
                                            config.roi_align_featmap_strides,
                                            self.train_batch_size,
                                            config.roi_align_finest_scale,
                                            mask=False)
        self.roi_align.set_train_local(config, True)

        self.roi_align_mask = SingleRoIExtractor(config,
                                                 config.roi_layer,
                                                 config.roi_align_out_channels,
                                                 config.roi_align_featmap_strides,
                                                 self.train_batch_size,
                                                 config.roi_align_finest_scale,
                                                 mask=True)
        self.roi_align_mask.set_train_local(config, True)

        self.roi_align_test = SingleRoIExtractor(config,
                                                 config.roi_layer,
                                                 config.roi_align_out_channels,
                                                 config.roi_align_featmap_strides,
                                                 1,
                                                 config.roi_align_finest_scale,
                                                 mask=False)
        self.roi_align_test.set_train_local(config, False)

        self.roi_align_mask_test = SingleRoIExtractor(config,
                                                      config.roi_layer,
                                                      config.roi_align_out_channels,
                                                      config.roi_align_featmap_strides,
                                                      1,
                                                      config.roi_align_finest_scale,
                                                      mask=True)
        self.roi_align_mask_test.set_train_local(config, False)

        # Rcnn
        self.rcnn_cls = RcnnCls(config, self.train_batch_size, self.num_classes)
        self.rcnn_mask = RcnnMask(config, self.train_batch_size, self.num_classes)

        # Op declare
        self.squeeze = P.Squeeze()
        self.cast = P.Cast()

        self.concat = P.Concat(axis=0)
        self.concat_1 = P.Concat(axis=1)
        self.concat_2 = P.Concat(axis=2)
        self.reshape = P.Reshape()
        self.select = P.Select()
        self.greater = P.Greater()
        self.transpose = P.Transpose()

        # Test mode
        self.test_batch_size = config.test_batch_size
        self.split = P.Split(axis=0, output_num=self.test_batch_size)
        self.split_shape = P.Split(axis=0, output_num=4)
        self.split_scores = P.Split(axis=1, output_num=self.num_classes)
        self.split_fb_mask = P.Split(axis=1, output_num=self.num_classes)
        self.split_cls = P.Split(axis=0, output_num=self.num_classes-1)
        self.tile = P.Tile()
        self.gather = P.GatherNd()

        self.rpn_max_num = config.rpn_max_num

        self.zeros_for_nms = Tensor(np.zeros((self.rpn_max_num, 3)).astype(np.float16))
        self.ones_mask = np.ones((self.rpn_max_num, 1)).astype(np.bool)
        self.zeros_mask = np.zeros((self.rpn_max_num, 1)).astype(np.bool)
        self.bbox_mask = Tensor(np.concatenate((self.ones_mask, self.zeros_mask,
                                                self.ones_mask, self.zeros_mask), axis=1))
        self.nms_pad_mask = Tensor(np.concatenate((self.ones_mask, self.ones_mask,
                                                   self.ones_mask, self.ones_mask, self.zeros_mask), axis=1))

        self.test_score_thresh = Tensor(np.ones((self.rpn_max_num, 1)).astype(np.float16) * config.test_score_thr)
        self.test_score_zeros = Tensor(np.ones((self.rpn_max_num, 1)).astype(np.float16) * 0)
        self.test_box_zeros = Tensor(np.ones((self.rpn_max_num, 4)).astype(np.float16) * -1)
        self.test_iou_thr = Tensor(np.ones((self.rpn_max_num, 1)).astype(np.float16) * config.test_iou_thr)
        self.test_max_per_img = config.test_max_per_img
        self.nms_test = P.NMSWithMask(config.test_iou_thr)
        self.softmax = P.Softmax(axis=1)
        self.logicand = P.LogicalAnd()
        self.oneslike = P.OnesLike()
        self.test_topk = P.TopK(sorted=True)
        self.test_num_proposal = self.test_batch_size * self.rpn_max_num

        # Improve speed
        self.concat_start = min(self.num_classes - 2, 55)
        self.concat_end = (self.num_classes - 1)

        # Init tensor
        roi_align_index = [np.array(np.ones((config.num_expected_pos_stage2 + config.num_expected_neg_stage2, 1)) * i,
                                    dtype=np.float16) for i in range(self.train_batch_size)]

        roi_align_index_test = [np.array(np.ones((config.rpn_max_num, 1)) * i, dtype=np.float16) \
                                for i in range(self.test_batch_size)]

        self.roi_align_index_tensor = Tensor(np.concatenate(roi_align_index))
        self.roi_align_index_test_tensor = Tensor(np.concatenate(roi_align_index_test))

        roi_align_index_pos = [np.array(np.ones((config.num_expected_pos_stage2, 1)) * i,
                                        dtype=np.float16) for i in range(self.train_batch_size)]
        self.roi_align_index_tensor_pos = Tensor(np.concatenate(roi_align_index_pos))

        self.rcnn_loss_cls_weight = Tensor(np.array(config.rcnn_loss_cls_weight).astype(np.float16))
        self.rcnn_loss_reg_weight = Tensor(np.array(config.rcnn_loss_reg_weight).astype(np.float16))
        self.rcnn_loss_mask_fb_weight = Tensor(np.array(config.rcnn_loss_mask_fb_weight).astype(np.float16))

        self.argmax_with_value = P.ArgMaxWithValue(axis=1)
        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
        self.onehot = P.OneHot()
        self.reducesum = P.ReduceSum()
        self.sigmoid = P.Sigmoid()
        self.expand_dims = P.ExpandDims()
        self.test_mask_fb_zeros = Tensor(np.zeros((self.rpn_max_num, 28, 28)).astype(np.float16))
        self.value = Tensor(1.0, mstype.float16)
Example #22
0
    def __init__(self,
                 config,
                 representation_size,
                 batch_size,
                 num_classes,
                 target_means=(0., 0., 0., 0.),
                 target_stds=(0.1, 0.1, 0.2, 0.2)):
        super(Rcnn, self).__init__()
        cfg = config
        self.rcnn_loss_cls_weight = Tensor(
            np.array(cfg.rcnn_loss_cls_weight).astype(np.float16))
        self.rcnn_loss_reg_weight = Tensor(
            np.array(cfg.rcnn_loss_reg_weight).astype(np.float16))
        self.rcnn_fc_out_channels = cfg.rcnn_fc_out_channels
        self.target_means = target_means
        self.target_stds = target_stds
        self.num_classes = num_classes
        self.in_channels = cfg.rcnn_in_channels
        self.train_batch_size = batch_size
        self.test_batch_size = cfg.test_batch_size
        self.use_ambigous_sample = cfg.use_ambigous_sample

        shape_0 = (self.rcnn_fc_out_channels, representation_size)
        weights_0 = initializer("XavierUniform",
                                shape=shape_0[::-1],
                                dtype=mstype.float16).to_tensor()
        shape_1 = (self.rcnn_fc_out_channels, self.rcnn_fc_out_channels)
        weights_1 = initializer("XavierUniform",
                                shape=shape_1[::-1],
                                dtype=mstype.float16).to_tensor()
        self.shared_fc_0 = DenseNoTranpose(representation_size,
                                           self.rcnn_fc_out_channels,
                                           weights_0)
        self.shared_fc_1 = DenseNoTranpose(self.rcnn_fc_out_channels,
                                           self.rcnn_fc_out_channels,
                                           weights_1)

        cls_weight = initializer(
            'Normal',
            shape=[num_classes, self.rcnn_fc_out_channels][::-1],
            dtype=mstype.float16).to_tensor()
        reg_weight = initializer(
            'Normal',
            shape=[num_classes * 4, self.rcnn_fc_out_channels][::-1],
            dtype=mstype.float16).to_tensor()
        self.cls_scores = DenseNoTranpose(self.rcnn_fc_out_channels,
                                          num_classes, cls_weight)
        self.reg_scores = DenseNoTranpose(self.rcnn_fc_out_channels,
                                          num_classes * 4, reg_weight)

        self.flatten = P.Flatten()
        self.relu = P.ReLU()
        self.logicaland = P.LogicalAnd()
        self.loss_cls = P.SoftmaxCrossEntropyWithLogits()
        self.loss_bbox = P.SmoothL1Loss(beta=1.0)
        self.reshape = P.Reshape()
        self.onehot = P.OneHot()
        self.greater = P.Greater()
        self.equal = P.Equal()
        self.cast = P.Cast()
        self.sum_loss = P.ReduceSum()
        self.tile = P.Tile()
        self.expandims = P.ExpandDims()

        self.gather = P.GatherNd()
        self.argmax = P.ArgMaxWithValue(axis=1)

        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
        self.value = Tensor(1.0, mstype.float16)

        self.num_bboxes = (cfg.num_expected_pos_stage2 +
                           cfg.num_expected_neg_stage2) * batch_size
        if self.use_ambigous_sample:
            self.num_bboxes = (cfg.num_expected_pos_stage2 +
                               cfg.num_expected_amb_stage2 +
                               cfg.num_expected_neg_stage2) * batch_size

        rmv_first = np.ones((self.num_bboxes, self.num_classes))
        rmv_first[:, 0] = np.zeros((self.num_bboxes, ))
        self.rmv_first_tensor = Tensor(rmv_first.astype(np.float16))

        self.num_bboxes_test = cfg.rpn_max_num * cfg.test_batch_size

        range_max = np.arange(self.num_bboxes_test).astype(np.int32)
        self.range_max = Tensor(range_max)
Example #23
0
    def __init__(
        self,
        decoder,
        config=None,
        batch_size=None,
        tokenizer=None,
        generate_length=1,
        topk_num=0,
        topp_prob=1.0,
        temperature=1.0,
        min_tokens_to_keep=1,
        early_stop=False,
        demo_mode=False,
        return_ids=False,
        return_last_token_logits=False,
        append_eos=False,
    ):

        assert config is not None, 'Config is a must for sampling.'

        self.decoder = decoder
        self.config = config
        self.tokenizer = tokenizer
        self.generate_length = generate_length
        self.topk_num = topk_num
        self.topp_prob = topp_prob
        self.temperature = temperature
        self.min_tokens_to_keep = min_tokens_to_keep
        self.early_stop = early_stop
        self.demo_mode = demo_mode
        self.return_ids = return_ids
        self.return_last_token_logits = return_last_token_logits
        self.append_eos = append_eos

        self.seq_length = config.seq_length
        self.batch_size = config.batch_size if batch_size is None else batch_size
        self.vocab_size = config.vocab_size

        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
        self.reshape = P.Reshape()
        self.cumsum = P.CumSum()
        self.onehot = P.OneHot()
        self.cast = P.Cast()
        self.concat = P.Concat()
        self.sample_function = P.RandomCategorical(mstype.int32)
        self.filter_distribution = TopKTopP_Filter(
            batch_size=self.batch_size,
            vocab_size=self.vocab_size,
            k=self.topk_num,
            p=self.topp_prob,
            temperature=self.temperature,
            min_tokens_to_keep=self.min_tokens_to_keep)

        if self.tokenizer is not None:
            self.eos_id = self.tokenizer.eos_token_id
        else:
            self.eos_id = config.vocab_size - 1

        if self.tokenizer is not None:
            self.eos_text = self.tokenizer.eos_token
        else:
            self.eos_text = "<|endoftext|>"

        if self.demo_mode is True:
            assert self.batch_size == 1, 'Demo mode requires batchsize euqals to 1, but get batch_size={}'.format(
                self.batch_size)
    def __init__(self,
                 decoder:Model,
                 model_config=None,
                 generate_length:int=1,
                 tokenizer:Optional[GPT2Tokenizer]=None,
                 topk_num:int=0,
                 topp_prob:float=1.0,
                 temperature:float=1.0,
                 min_tokens_to_keep:int=1,
                 early_stop:bool=False,
                 demo_mode:bool=False,
                 return_ids:bool=False,
                 return_last_token_logits:bool=False,
                 append_eos:bool=False):

       
        assert model_config is not None, 'Config is a must for sampling.'
        
        self.model_config = model_config
        self.topk_num = topk_num
        self.topp_prob = topp_prob
        self.temperature = temperature
        self.min_tokens_to_keep = min_tokens_to_keep
        
        self.decoder = decoder
        self.tokenizer = tokenizer
        self.reshape = P.Reshape()
        self.cumsum = P.CumSum()
        self.onehot = P.OneHot()
        self.generate_length = generate_length
        self.seq_length = model_config.seq_length
        self.batch_size = model_config.batch_size
        self.vocab_size = model_config.vocab_size
        
        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
        self.cast = P.Cast()
        self.concat = P.Concat()
        self.early_stop = early_stop
        self.demo_mode = demo_mode
        self.return_ids = return_ids
        self.return_last_token_logits = return_last_token_logits
        self.append_eos = append_eos
        self.device_target = get_context("device_target")

        #different choice of sample function for adjusting several device target types
        if self.device_target == "GPU":
            self.sample_function = P.Multinomial(seed=1)
        elif self.device_target == "Ascend":
            self.sample_function = P.RandomCategorical(mstype.int32)
        else:
            raise NotImplementedError("Device Target {} not supported.".format(self.device_target))

        self.filter_distribution = TopKTopP_Filter(self.batch_size,
                                                   self.vocab_size,
                                                   k=self.topk_num,
                                                   p=self.topp_prob,
                                                   temperature=self.temperature,
                                                   min_tokens_to_keep=self.min_tokens_to_keep)

        if self.tokenizer is not None:
            self.eos_id = self.tokenizer.eos_token_id
        else:
            self.eos_id = model_config.vocab_size-1

        if self.tokenizer is not None:
            self.eos_text = self.tokenizer.eos_token
        else:
            self.eos_text = "<|endoftext|>"

        if self.demo_mode is True:
            assert self.batch_size == 1, 'Demo mode requires batchsize euqals to 1, but get batch_size={}'.format(
                self.batch_size)
Example #25
0
    def __init__(
        self,
        vocab_size,
        embedding_size,
        embedding_shape,
        use_one_hot_embeddings=False,
        initializer_range=0.02,
        name='embedding_table',
        batch_size=12,
        damping=0.03,
        loss_scale=1,
        frequency=100,
    ):
        super(Embedding_Thor, self).__init__()
        self.vocab_size = vocab_size
        self.use_one_hot_embeddings = use_one_hot_embeddings
        self.embedding_table = Parameter(initializer(
            TruncatedNormal(initializer_range), [vocab_size, embedding_size]),
                                         name=name)
        self.thor = True
        self.expand = P.ExpandDims()
        self.shape_flat = (-1, )
        self.gather = P.GatherV2()
        self.one_hot = P.OneHot()
        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
        self.array_mul = P.MatMul()
        self.reshape = P.Reshape()
        self.em_shape = tuple(embedding_shape)
        self.shape = P.Shape()
        self.loss_scale = Tensor(1 / loss_scale, mstype.float16)

        self.matrix_A_inv = Parameter(Tensor(
            np.zeros([vocab_size]).astype(np.float16)),
                                      name='matrix_A_inv',
                                      requires_grad=False)
        self.matrix_G_inv = Parameter(Tensor(
            np.zeros([embedding_size, embedding_size]).astype(np.float16)),
                                      name="matrix_G_inv",
                                      requires_grad=False)
        self.fake_G = Tensor(
            np.zeros([embedding_size, embedding_size]).astype(np.float16))
        self.dampingA = Tensor(np.ones([vocab_size]).astype(np.float32))
        self.dampingG = Tensor(np.identity(embedding_size), mstype.float32)
        self.cov_step = Parameter(initializer(0, [1], mstype.int32),
                                  name="cov_step",
                                  requires_grad=False)
        self.freq = Tensor(frequency, mstype.int32)
        self.axis = 0
        self.damping = damping
        self.gather = P.GatherV2()
        self.sqrt = P.Sqrt()
        self.mul = P.Mul()
        self.cast = P.Cast()
        self.cube_matmul = P.CusMatMulCube(transpose_a=True)
        self.vector_matmul = P.CusBatchMatMul()
        self.cholesky = P.CusCholeskyTrsm()
        self.matrix_combine = P.CusMatrixCombine()
        self.reduce_sum = P.ReduceSum(keep_dims=False)
        self.inv = P.Inv()
        self.getG = P.InsertGradientOf(self.save_gradient)
        self.batch_size = batch_size
Example #26
0
     'block': ArgmaxNet(),
     'desc_inputs': [Tensor(np.array([[128, 32, 32, 64],[128, 32, 32, 64]]).astype(np.float16))],
     'desc_bprop': [Tensor(np.array([[128, 32, 32, 64],[128, 32, 32, 64]]).astype(np.float16))],
     'skip': ['backward']}),
 ('ArgminNet', {
     'block': ArgminNet(),
     'desc_inputs': [Tensor(np.array([[128, 32, 32, 64],[128, 32, 32, 64]]).astype(np.float16))],
     'desc_bprop': [Tensor(np.array([[128, 32, 32, 64],[128, 32, 32, 64]]).astype(np.float16))],
     'skip': ['backward']}),
 ('CumSumNet', {
     'block': CumSumNet(),
     'desc_const': [0],
     'desc_inputs': [Tensor(np.array([[3, 4, 6, 10],[1, 6, 7, 9],[4, 3, 8, 7],[1, 3, 7, 9]]).astype(np.float16))],
     'desc_bprop': [Tensor(np.array([[3, 4, 6, 10],[1, 6, 7, 9],[4, 3, 8, 7],[1, 3, 7, 9]]).astype(np.float16))]}),
 ('OneHot', {
     'block': P.OneHot(),
     'desc_const': [3, Tensor(1.0, mstype.float32), Tensor(0.0, mstype.float32)],
     'desc_inputs': [Tensor(np.array([64]).astype(np.int32))],
     'desc_bprop': [[64, 2]]}),
 ('ReduceProd_0', {
     'block': P.ReduceProd(),
     'desc_const': [0],
     'desc_inputs': [[3, 2]],
     'desc_bprop': [[2]]}),
 ('ReduceProd_1', {
     'block': P.ReduceProd(keep_dims=True),
     'desc_const': [0],
     'desc_inputs': [[3, 2]],
     'desc_bprop': [[1, 2]]}),
 ('CumProd', {
     'block': P.CumProd(),
Example #27
0
     CumSumNet(),
     'desc_const': [0],
     'desc_inputs': [
         Tensor(
             np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7],
                       [1, 3, 7, 9]]).astype(np.float16))
     ],
     'desc_bprop': [
         Tensor(
             np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7],
                       [1, 3, 7, 9]]).astype(np.float16))
     ]
 }),
 ('OneHot', {
     'block':
     P.OneHot(),
     'desc_const':
     [3, Tensor(1.0, mstype.float32),
      Tensor(0.0, mstype.float32)],
     'desc_inputs': [Tensor(np.array([64]).astype(np.int32))],
     'desc_bprop': [[64, 2]]
 }),
 ('ReduceProd_0', {
     'block': P.ReduceProd(),
     'desc_const': [0],
     'desc_inputs': [[3, 2]],
     'desc_bprop': [[2]]
 }),
 ('ReduceProd_1', {
     'block': P.ReduceProd(keep_dims=True),
     'desc_const': [0],
Example #28
0
    def __init__(self, args, strategy):
        super(SemiAutoOneHotNet, self).__init__()
        self.a = args.a
        self.b = args.b
        self.c = args.c
        self.d = args.d
        self.e = args.e
        self.cast = P.Cast()
        self.cast.set_strategy(strategy=strategy.twod_strategy)
        self.cast1 = P.Cast()
        self.cast1.set_strategy(strategy=strategy.twod_strategy)
        self.cast2 = P.Cast()
        self.cast2.set_strategy(strategy=strategy.twod_strategy)
        self.cast3 = P.Cast()
        self.cast3.set_strategy(strategy=strategy.scalar_strategy)
        self.cast4 = P.Cast()
        self.cast4.set_strategy(strategy=strategy.scalar_strategy)
        self.a_const = Tensor(self.a, dtype=mstype.float32)
        self.b_const = Tensor(self.b, dtype=mstype.float32)
        self.c_const = Tensor(self.c, dtype=mstype.float32)
        self.d_const = Tensor(self.d, dtype=mstype.float32)
        self.e_const = Tensor(self.e, dtype=mstype.float32)
        self.m_const_zero = Tensor(0, dtype=mstype.float32)
        self.a_const_one = Tensor(1, dtype=mstype.float32)
        self.onehot = P.OneHot()
        self.onehot.set_strategy(strategy=strategy.onehot_strategy)
        self.exp = P.Exp()
        self.exp.set_strategy(strategy=strategy.twod_strategy)
        self.exp2 = P.Exp()
        self.exp2.set_strategy(strategy=strategy.twod_strategy)
        self.exp3 = P.Exp()
        self.exp3.set_strategy(strategy=strategy.twod_strategy)
        self.mul_const = P.Mul()
        self.mul_const.set_strategy(strategy=strategy.scalar_twod_strategy)
        self.mul_const2 = P.TensorAdd()
        self.mul_const2.set_strategy(strategy=strategy.scalar_twod_strategy)
        self.mul_const3 = P.Sub()
        self.mul_const3.set_strategy(strategy=strategy.twod_scalar_strategy)
        self.mul_const4 = P.Sub()
        self.mul_const4.set_strategy(strategy=strategy.scalar_twod_strategy)
        self.mul_const5 = P.Mul()
        self.mul_const5.set_strategy(strategy=strategy.twod_scalar_strategy)
        self.mul = P.Mul()
        self.mul.set_strategy(strategy=strategy.twod_twod_strategy)
        self.mul2 = P.Mul()
        self.mul2.set_strategy(strategy=strategy.twod_twod_strategy)
        self.mul3 = P.TensorAdd()
        self.mul3.set_strategy(strategy=strategy.twod_twod_strategy)
        self.mul4 = P.Sub()
        self.mul4.set_strategy(strategy=strategy.twod_twodbc_strategy)
        self.mul5 = P.RealDiv()
        self.mul5.set_strategy(strategy=strategy.twod_twodbc_strategy)
        self.mul6 = P.Mul()
        self.mul6.set_strategy(strategy=strategy.twod_twod_strategy)
        self.mul7 = P.Mul()
        self.mul7.set_strategy(strategy=strategy.twod_scalar_strategy)
        self.mul8 = P.RealDiv()
        self.mul8.set_strategy(strategy=strategy.scalar_scalar_strategy)
        self.mul9 = P.TensorAdd()
        self.mul9.set_strategy(strategy=strategy.twod_scalar_strategy)

        self.reduce_max = P.ReduceMax(keep_dims=True)
        self.reduce_max.set_strategy(strategy=strategy.twod_strategy)

        self.reduce_sum = P.ReduceSum(keep_dims=False)
        self.reduce_sum.set_strategy(strategy=strategy.twod_strategy)
        self.reduce_sum_2 = P.ReduceSum(keep_dims=False)
        self.reduce_sum_2.set_strategy(strategy=strategy.twod_strategy)
        self.reduce_sum_3 = P.ReduceSum(keep_dims=False)
        self.reduce_sum_3.set_strategy(strategy=strategy.oned_strategy)

        self.reshape = P.Reshape()
        self.log = P.Log()
        self.log.set_strategy(strategy=strategy.twod_strategy)

        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
        self.normalize = P.L2Normalize(axis=1)
        self.normalize.set_strategy(strategy=strategy.twod_strategy_m)
        self.normalize2 = P.L2Normalize(axis=1)
        self.normalize2.set_strategy(strategy=strategy.twod_strategy_m)
        self.fc = P.MatMul(transpose_b=True)
        self.fc.set_strategy(strategy=strategy.twodbc_twod_strategy)
        weight_shape = [args.num_classes, args.emb_size]
        weight_np = np.zeros(weight_shape, np.float32)
        self.weight = Parameter(Tensor(weight_np),
                                name='model_parallel_weight')
Example #29
0
 def __init__(self, axis=-1, depth=1, on_value=1.0, off_value=0.0, dtype=mstype.float32):
     super(OneHot, self).__init__()
     self.onehot = P.OneHot(axis)
     self.depth = depth
     self.on_value = Tensor(on_value, dtype)
     self.off_value = Tensor(off_value, dtype)
Example #30
0
 def __init__(self, reduction='mean'):
     super(NLLLoss, self).__init__(reduction)
     self.one_hot = P.OneHot()
     self.reduce_sum = P.ReduceSum()