Esempio n. 1
0
    def __init__(self, config):
        super(DeepFMModel, self).__init__()

        self.batch_size = config.batch_size
        self.field_size = config.data_field_size
        self.vocab_size = config.data_vocab_size
        self.emb_dim = config.data_emb_dim
        self.deep_layer_dims_list, self.deep_layer_act = config.deep_layer_args
        self.init_args = config.init_args
        self.weight_bias_init = config.weight_bias_init
        self.keep_prob = config.keep_prob
        init_acts = [('W_l2', [self.vocab_size, 1], 'normal'),
                     ('V_l2', [self.vocab_size, self.emb_dim], 'normal')]
        var_map = init_var_dict(self.init_args, init_acts)
        self.fm_w = var_map["W_l2"]
        self.embedding_table = var_map["V_l2"]
        " Deep Layers "
        self.deep_input_dims = self.field_size * self.emb_dim
        self.all_dim_list = [self.deep_input_dims
                             ] + self.deep_layer_dims_list + [1]
        self.dense_layer_1 = DenseLayer(self.all_dim_list[0],
                                        self.all_dim_list[1],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        self.keep_prob,
                                        convert_dtype=True)
        self.dense_layer_2 = DenseLayer(self.all_dim_list[1],
                                        self.all_dim_list[2],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        self.keep_prob,
                                        convert_dtype=True)
        self.dense_layer_3 = DenseLayer(self.all_dim_list[2],
                                        self.all_dim_list[3],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        self.keep_prob,
                                        convert_dtype=True)
        self.dense_layer_4 = DenseLayer(self.all_dim_list[3],
                                        self.all_dim_list[4],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        self.keep_prob,
                                        convert_dtype=True)
        self.dense_layer_5 = DenseLayer(self.all_dim_list[4],
                                        self.all_dim_list[5],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        self.keep_prob,
                                        convert_dtype=True,
                                        use_act=False)
        " FM, linear Layers "
        self.Gatherv2 = P.GatherV2()
        self.Mul = P.Mul()
        self.ReduceSum = P.ReduceSum(keep_dims=False)
        self.Reshape = P.Reshape()
        self.Square = P.Square()
        self.Shape = P.Shape()
        self.Tile = P.Tile()
        self.Concat = P.Concat(axis=1)
        self.Cast = P.Cast()
    def __init__(self, config, batch_size, num_bboxes, add_gt_as_proposals):
        super(BboxAssignSampleForRcnn, self).__init__()
        cfg = config
        self.use_ambigous_sample = cfg.use_ambigous_sample
        self.batch_size = batch_size
        self.neg_iou_thr = cfg.neg_iou_thr_stage2
        self.pos_iou_thr = cfg.pos_iou_thr_stage2
        self.min_pos_iou = cfg.min_pos_iou_stage2
        self.num_gts = cfg.num_gts
        self.num_bboxes = num_bboxes
        self.num_expected_pos = cfg.num_expected_pos_stage2
        self.num_expected_amb = cfg.num_expected_amb_stage2
        self.num_expected_neg = cfg.num_expected_neg_stage2
        self.num_expected_total = cfg.num_expected_total_stage2

        self.add_gt_as_proposals = add_gt_as_proposals
        self.label_inds = Tensor(np.arange(1, self.num_gts + 1).astype(np.int32))
        self.add_gt_as_proposals_valid = Tensor(np.array(self.add_gt_as_proposals * np.ones(self.num_gts),
                                                         dtype=np.int32))

        self.concat = P.Concat(axis=0)
        self.max_gt = P.ArgMaxWithValue(axis=0)
        self.max_anchor = P.ArgMaxWithValue(axis=1)
        self.sum_inds = P.ReduceSum()
        self.iou = P.IOU()
        self.greaterequal = P.GreaterEqual()
        self.greater = P.Greater()
        self.select = P.Select()
        self.gatherND = P.GatherNd()
        self.gatherV2 = P.GatherV2()
        self.squeeze = P.Squeeze()
        self.cast = P.Cast()
        self.logicaland = P.LogicalAnd()
        self.less = P.Less()
        self.random_choice_with_mask_pos = P.RandomChoiceWithMask(self.num_expected_pos)
        self.random_choice_with_mask_amb = P.RandomChoiceWithMask(self.num_expected_amb)
        self.random_choice_with_mask_neg = P.RandomChoiceWithMask(self.num_expected_neg)
        self.reshape = P.Reshape()
        self.equal = P.Equal()
        self.bounding_box_encode = P.BoundingBoxEncode(means=(0.0, 0.0, 0.0, 0.0), stds=(0.1, 0.1, 0.2, 0.2))
        self.concat_axis1 = P.Concat(axis=1)
        self.logicalnot = P.LogicalNot()
        self.tile = P.Tile()

        # Check
        self.check_gt_one = Tensor(np.array(-1 * np.ones((self.num_gts, 4)), dtype=np.float16))
        self.check_anchor_two = Tensor(np.array(-2 * np.ones((self.num_bboxes, 4)), dtype=np.float16))

        # Init tensor
        self.assigned_gt_inds = Tensor(np.array(-1 * np.ones(num_bboxes), dtype=np.int32))
        self.assigned_gt_zeros = Tensor(np.array(np.zeros(num_bboxes), dtype=np.int32))
        self.assigned_gt_ones = Tensor(np.array(np.ones(num_bboxes), dtype=np.int32))
        self.assigned_amb = Tensor(np.array(-3 * np.ones(num_bboxes), dtype=np.int32))
        self.assigned_gt_ignores = Tensor(np.array(-1 * np.ones(num_bboxes), dtype=np.int32))
        self.assigned_pos_ones = Tensor(np.array(np.ones(self.num_expected_pos), dtype=np.int32))

        self.gt_ignores = Tensor(np.array(-1 * np.ones(self.num_gts), dtype=np.int32))
        self.range_pos_size = Tensor(np.arange(self.num_expected_pos).astype(np.float16))
        self.range_amb_size = Tensor(np.arange(self.num_expected_amb).astype(np.float16))
        self.check_neg_mask = Tensor(np.array(np.ones(self.num_expected_neg - self.num_expected_pos), dtype=np.bool))
        if self.use_ambigous_sample:
            self.check_neg_mask = Tensor(
                np.array(np.ones(self.num_expected_neg - self.num_expected_pos - self.num_expected_amb), dtype=np.bool))
        check_neg_mask_ignore_end = np.array(np.ones(self.num_expected_neg), dtype=np.bool)
        check_neg_mask_ignore_end[-1] = False
        self.check_neg_mask_ignore_end = Tensor(check_neg_mask_ignore_end)
        self.bboxs_neg_mask = Tensor(np.zeros((self.num_expected_neg, 4), dtype=np.float16))

        self.bboxs_amb_mask = Tensor(np.zeros((self.num_expected_amb, 4), dtype=np.float16))
        self.labels_neg_mask = Tensor(np.array(np.zeros(self.num_expected_neg), dtype=np.uint8))
        self.labels_amb_mask = Tensor(np.array(np.zeros(self.num_expected_amb) + 2, dtype=np.uint8))

        self.reshape_shape_pos = (self.num_expected_pos, 1)
        self.reshape_shape_amb = (self.num_expected_amb, 1)
        self.reshape_shape_neg = (self.num_expected_neg, 1)

        self.scalar_zero = Tensor(0.0, dtype=mstype.float16)
        self.scalar_neg_iou_thr = Tensor(self.neg_iou_thr, dtype=mstype.float16)
        self.scalar_pos_iou_thr = Tensor(self.pos_iou_thr, dtype=mstype.float16)
        self.scalar_min_pos_iou = Tensor(self.min_pos_iou, dtype=mstype.float16)
Esempio n. 3
0
    def __init__(self, config):
        super(WideDeepModel, self).__init__()
        self.batch_size = config.batch_size
        host_device_mix = bool(config.host_device_mix)
        parameter_server = bool(config.parameter_server)
        parallel_mode = context.get_auto_parallel_context("parallel_mode")
        is_auto_parallel = parallel_mode in (ParallelMode.SEMI_AUTO_PARALLEL,
                                             ParallelMode.AUTO_PARALLEL)
        if is_auto_parallel:
            self.batch_size = self.batch_size * get_group_size()
        is_field_slice = config.field_slice
        sparse = config.sparse
        self.field_size = config.field_size
        self.vocab_size = config.vocab_size
        self.vocab_cache_size = config.vocab_cache_size
        self.emb_dim = config.emb_dim
        self.deep_layer_dims_list = config.deep_layer_dim
        self.deep_layer_act = config.deep_layer_act
        self.init_args = config.init_args
        self.weight_init, self.bias_init = config.weight_bias_init
        self.weight_bias_init = config.weight_bias_init
        self.emb_init = config.emb_init
        self.drop_out = config.dropout_flag
        self.keep_prob = config.keep_prob
        self.deep_input_dims = self.field_size * self.emb_dim
        self.layer_dims = self.deep_layer_dims_list + [1]
        self.all_dim_list = [self.deep_input_dims] + self.layer_dims

        init_acts = [('Wide_b', [1], self.emb_init)]
        var_map = init_var_dict(self.init_args, init_acts)
        self.wide_b = var_map["Wide_b"]
        self.dense_layer_1 = DenseLayer(self.all_dim_list[0],
                                        self.all_dim_list[1],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        convert_dtype=True,
                                        drop_out=config.dropout_flag)
        self.dense_layer_2 = DenseLayer(self.all_dim_list[1],
                                        self.all_dim_list[2],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        convert_dtype=True,
                                        drop_out=config.dropout_flag)
        self.dense_layer_3 = DenseLayer(self.all_dim_list[2],
                                        self.all_dim_list[3],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        convert_dtype=True,
                                        drop_out=config.dropout_flag)
        self.dense_layer_4 = DenseLayer(self.all_dim_list[3],
                                        self.all_dim_list[4],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        convert_dtype=True,
                                        drop_out=config.dropout_flag)
        self.dense_layer_5 = DenseLayer(self.all_dim_list[4],
                                        self.all_dim_list[5],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        use_activation=False,
                                        convert_dtype=True,
                                        drop_out=config.dropout_flag)
        self.wide_mul = P.Mul()
        self.deep_mul = P.Mul()
        self.reduce_sum = P.ReduceSum(keep_dims=False)
        self.reshape = P.Reshape()
        self.deep_reshape = P.Reshape()
        self.square = P.Square()
        self.shape = P.Shape()
        self.tile = P.Tile()
        self.concat = P.Concat(axis=1)
        self.cast = P.Cast()
        self.unique = P.Unique().shard(((1, ), ))
        self.wide_gatherv2 = P.Gather()
        self.deep_gatherv2 = P.Gather()
        if is_auto_parallel and sparse and not is_field_slice and not parameter_server:
            target = 'DEVICE'
            if host_device_mix:
                target = 'CPU'
            self.wide_embeddinglookup = nn.EmbeddingLookup(
                self.vocab_size,
                1,
                target=target,
                slice_mode=nn.EmbeddingLookup.TABLE_ROW_SLICE)
            if config.deep_table_slice_mode == "column_slice":
                self.deep_embeddinglookup = nn.EmbeddingLookup(
                    self.vocab_size,
                    self.emb_dim,
                    target=target,
                    slice_mode=nn.EmbeddingLookup.TABLE_COLUMN_SLICE)
                self.dense_layer_1.dropout.dropout.shard(
                    ((1, get_group_size()), ))
                self.dense_layer_1.matmul.shard(
                    ((1, get_group_size()), (get_group_size(), 1)))
                self.dense_layer_1.matmul.add_prim_attr(
                    "field_size", self.field_size)
                self.deep_mul.shard(((1, 1, get_group_size()), (1, 1, 1)))
                self.deep_reshape.add_prim_attr("skip_redistribution", True)
            else:
                self.deep_embeddinglookup = nn.EmbeddingLookup(
                    self.vocab_size,
                    self.emb_dim,
                    target=target,
                    slice_mode=nn.EmbeddingLookup.TABLE_ROW_SLICE)
            self.reduce_sum.add_prim_attr("cross_batch", True)
            self.embedding_table = self.deep_embeddinglookup.embedding_table
        elif is_auto_parallel and host_device_mix and is_field_slice and config.full_batch and config.manual_shape:
            manual_shapes = tuple((s[0] for s in config.manual_shape))
            self.deep_embeddinglookup = nn.EmbeddingLookup(
                self.vocab_size,
                self.emb_dim,
                slice_mode=nn.EmbeddingLookup.FIELD_SLICE,
                manual_shapes=manual_shapes)
            self.wide_embeddinglookup = nn.EmbeddingLookup(
                self.vocab_size,
                1,
                slice_mode=nn.EmbeddingLookup.FIELD_SLICE,
                manual_shapes=manual_shapes)
            self.deep_mul.shard(
                ((1, get_group_size(), 1), (1, get_group_size(), 1)))
            self.wide_mul.shard(
                ((1, get_group_size(), 1), (1, get_group_size(), 1)))
            self.reduce_sum.shard(((1, get_group_size(), 1), ))
            self.dense_layer_1.dropout.dropout.shard(((1, get_group_size()), ))
            self.dense_layer_1.matmul.shard(
                ((1, get_group_size()), (get_group_size(), 1)))
            self.embedding_table = self.deep_embeddinglookup.embedding_table
        elif parameter_server:
            cache_enable = self.vocab_cache_size > 0
            target = 'DEVICE' if cache_enable else 'CPU'
            if not cache_enable:
                sparse = True
            if is_auto_parallel and config.full_batch and cache_enable:
                self.deep_embeddinglookup = nn.EmbeddingLookup(
                    self.vocab_size,
                    self.emb_dim,
                    target=target,
                    slice_mode=nn.EmbeddingLookup.TABLE_ROW_SLICE,
                    sparse=sparse,
                    vocab_cache_size=self.vocab_cache_size)
                self.wide_embeddinglookup = nn.EmbeddingLookup(
                    self.vocab_size,
                    1,
                    target=target,
                    slice_mode=nn.EmbeddingLookup.TABLE_ROW_SLICE,
                    sparse=sparse,
                    vocab_cache_size=self.vocab_cache_size)
            else:
                self.deep_embeddinglookup = nn.EmbeddingLookup(
                    self.vocab_size,
                    self.emb_dim,
                    target=target,
                    sparse=sparse,
                    vocab_cache_size=self.vocab_cache_size)
                self.wide_embeddinglookup = nn.EmbeddingLookup(
                    self.vocab_size,
                    1,
                    target=target,
                    sparse=sparse,
                    vocab_cache_size=self.vocab_cache_size)
            self.embedding_table = self.deep_embeddinglookup.embedding_table
            self.deep_embeddinglookup.embedding_table.set_param_ps()
            self.wide_embeddinglookup.embedding_table.set_param_ps()
        else:
            self.deep_embeddinglookup = nn.EmbeddingLookup(
                self.vocab_size,
                self.emb_dim,
                target='DEVICE',
                sparse=sparse,
                vocab_cache_size=self.vocab_cache_size)
            self.wide_embeddinglookup = nn.EmbeddingLookup(
                self.vocab_size,
                1,
                target='DEVICE',
                sparse=sparse,
                vocab_cache_size=self.vocab_cache_size)
            self.embedding_table = self.deep_embeddinglookup.embedding_table
 def __init__(self):
     super(MSELoss, self).__init__()
     self.reduce_sum = P.ReduceSum()
     self.square = P.Square()
     self.reduce_mean = P.ReduceMean()
Esempio n. 5
0
 def construct(self, logits, label):
     label = self.one_hot(label,
                          F.shape(logits)[1], self.on_value, self.off_value)
     loss = self.cross_entropy(logits, label)[0]
     loss = P.RealDiv()(P.ReduceSum()(loss, -1), self.num)
     return loss
Esempio n. 6
0
 def __init__(self, network):
     super(FeatureCollectCell, self).__init__(auto_prefix=False)
     self._network = network
     self.shape = P.Shape()
     self.sum = P.ReduceSum()
Esempio n. 7
0
 def __init__(self):
     super(AxisListDefaultNet, self).__init__()
     self.reduce_sum = P.ReduceSum()
Esempio n. 8
0
def test_exec():
    context.set_context(mode=context.GRAPH_MODE)
    return test_exec_case


raise_set = [
    ('Squeeze_1_Error', {
        'block': (lambda x: P.Squeeze(axis=1.2), {
            'exception': ValueError
        }),
        'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5]))]
    }),
    ('Squeeze_2_Error', {
        'block': (lambda x: P.Squeeze(axis=((1.2, 1.3))), {
            'exception': ValueError
        }),
        'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5]))]
    }),
    ('ReduceSum_Error', {
        'block': (lambda x: P.ReduceSum(keep_dims=1), {
            'exception': ValueError
        }),
        'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5]))]
    }),
]


@mindspore_test(pipeline_for_verify_exception_for_case_by_case_config)
def test_check_exception():
    return raise_set
Esempio n. 9
0
 def __init__(self):
     super(CustNet3, self).__init__()
     self.op = P.ReduceSum()
     self.t1 = Tensor(np.ones([2, 2]), dtype=ms.int32)
     self.t2 = Tensor(np.ones([1, 5]), dtype=ms.float16)
     self.t2 = 1
Esempio n. 10
0
 def __init__(self):
     super(SquaredLoss, self).__init__()
     self.reshape = P.Reshape()
     self.shape = P.Shape()
     self.two = Tensor(np.array([2.0]).astype(np.float32))
     self.reduce_sum = P.ReduceSum()
Esempio n. 11
0
    def __init__(self,
                 batch_size,
                 input_size,
                 hidden_size,
                 num_layers,
                 bidirectional=False,
                 batch_norm=False,
                 rnn_type='LSTM',
                 device_target="GPU"):
        super(BatchRNN, self).__init__()
        self.batch_size = batch_size
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.rnn_type = rnn_type
        self.bidirectional = bidirectional
        self.has_bias = True
        self.is_batch_norm = batch_norm
        self.num_directions = 2 if bidirectional else 1
        self.reshape_op = P.Reshape()
        self.shape_op = P.Shape()
        self.sum_op = P.ReduceSum()

        input_size_list = [input_size]
        for i in range(num_layers - 1):
            input_size_list.append(hidden_size)
        layers = []

        for i in range(num_layers):
            layers.append(
                nn.LSTMCell(input_size=input_size_list[i],
                            hidden_size=hidden_size,
                            bidirectional=bidirectional,
                            has_bias=self.has_bias))

        weights = []
        for i in range(num_layers):
            weight_size = (input_size_list[i] +
                           hidden_size) * hidden_size * self.num_directions * 4
            if self.has_bias:
                if device_target == "GPU":
                    bias_size = self.num_directions * hidden_size * 4 * 2
                else:
                    bias_size = self.num_directions * hidden_size * 4
                weight_size = weight_size + bias_size

            stdv = 1 / math.sqrt(hidden_size)
            w_np = np.random.uniform(-stdv, stdv,
                                     (weight_size, 1, 1)).astype(np.float32)

            weights.append(
                Parameter(initializer(Tensor(w_np), w_np.shape),
                          name="weight" + str(i)))

        self.h, self.c = self.stack_lstm_default_state(
            batch_size,
            hidden_size,
            num_layers=num_layers,
            bidirectional=bidirectional)
        self.lstms = layers
        self.weight = ParameterTuple(tuple(weights))

        if batch_norm:
            batch_norm_layer = []
            for i in range(num_layers - 1):
                batch_norm_layer.append(nn.BatchNorm1d(hidden_size))
            self.batch_norm_list = batch_norm_layer
 def __init__(self, strategy0, strategy1):
     super().__init__()
     self.fc_nobias = P.MatMul(transpose_b=True).shard(strategy0)
     self.reduce_sum = P.ReduceSum(keep_dims=False).shard(strategy1)
Esempio n. 13
0
    def __init__(self,
                 batch_size,
                 seq_length,
                 vocab_size,
                 decoder,
                 beam_width=4,
                 decoder_layers_nums=4,
                 length_penalty_weight=0.6,
                 cov_penalty_factor=0.1,
                 hidden_size=1024,
                 max_decode_length=64,
                 sos_id=2,
                 eos_id=3,
                 is_using_while=True,
                 compute_type=mstype.float32):
        super(BeamSearchDecoder, self).__init__()

        self.encoder_length = seq_length
        self.hidden_size = hidden_size
        self.batch_size = batch_size
        self.vocab_size = vocab_size
        self.beam_width = beam_width
        self.decoder_layers_nums = decoder_layers_nums
        self.length_penalty_weight = length_penalty_weight
        self.cov_penalty_factor = cov_penalty_factor
        self.max_decode_length = max_decode_length
        self.decoder = decoder
        self.is_using_while = is_using_while

        self.add = P.Add()
        self.expand = P.ExpandDims()
        self.reshape = P.Reshape()
        self.shape_flat = (-1, )
        self.shape = P.Shape()

        self.zero_tensor = Tensor(np.zeros([batch_size, beam_width]),
                                  mstype.float32)
        self.ninf_tensor = Tensor(np.full([batch_size, beam_width], -INF),
                                  mstype.float32)

        self.select = P.Select()
        self.flat_shape = (batch_size, beam_width * vocab_size)
        self.topk = P.TopK(sorted=True)
        self.floor_div = P.FloorDiv()
        self.vocab_size_tensor = Tensor(self.vocab_size, mstype.int32)
        self.real_div = P.RealDiv()
        self.mod = Mod()
        self.equal = P.Equal()
        self.eos_ids = Tensor(np.full([batch_size, beam_width], eos_id),
                              mstype.int32)

        beam_ids = np.tile(
            np.arange(beam_width).reshape((1, beam_width)), [batch_size, 1])
        self.beam_ids = Tensor(beam_ids, mstype.int32)

        batch_ids = np.arange(batch_size * beam_width).reshape(
            (batch_size, beam_width)) // beam_width
        self.batch_ids = Tensor(batch_ids, mstype.int32)

        self.concat = P.Concat(axis=-1)
        self.gather_nd = P.GatherNd()

        self.start_ids = Tensor(np.full([batch_size * beam_width, 1], sos_id),
                                mstype.int32)
        if self.is_using_while:
            self.start = Tensor(0, dtype=mstype.int32)
            self.init_seq = Tensor(
                np.full([batch_size, beam_width, self.max_decode_length + 1],
                        sos_id), mstype.int32)
        else:
            self.init_seq = Tensor(
                np.full([batch_size, beam_width, 1], sos_id), mstype.int32)

        init_scores = np.tile(np.array([[0.] + [-INF] * (beam_width - 1)]),
                              [batch_size, 1])
        self.init_scores = Tensor(init_scores, mstype.float32)
        self.init_finished = Tensor(
            np.zeros([batch_size, beam_width], dtype=np.bool))
        self.init_length = Tensor(
            np.zeros([batch_size, beam_width], dtype=np.int32))

        self.length_penalty = LengthPenalty(weight=length_penalty_weight)

        self.one = Tensor(1, mstype.int32)
        self.prob_concat = P.Concat(axis=1)
        self.cast = P.Cast()
        self.decoder_hidden_state = Tensor(
            np.zeros([
                self.decoder_layers_nums, 2, self.batch_size * self.beam_width,
                hidden_size
            ]), mstype.float32)

        self.zeros_scores = Tensor(
            np.zeros([batch_size, beam_width], dtype=np.float))
        self.active_index = Tensor(
            np.ones([batch_size, beam_width], dtype=np.int32))
        self.init_zeros = Tensor(
            np.zeros([batch_size, beam_width], dtype=np.int32))
        self.init_ones = Tensor(
            np.ones([batch_size, beam_width], dtype=np.float32))

        self.accu_attn_scores = Tensor(
            np.zeros([batch_size, beam_width, self.encoder_length],
                     dtype=np.float32))

        self.zeros = Tensor([0], mstype.int32)
        self.eos_tensor = Tensor(
            np.full([batch_size, beam_width, beam_width], eos_id),
            mstype.int32)

        self.ones_3d = Tensor(
            np.full([batch_size, beam_width, self.encoder_length], 1),
            mstype.float32)
        self.neg_inf_3d = Tensor(
            np.full([batch_size, beam_width, self.encoder_length], -INF),
            mstype.float32)
        self.zeros_3d = Tensor(
            np.full([batch_size, beam_width, self.encoder_length], 0),
            mstype.float32)
        self.zeros_2d = Tensor(
            np.full([batch_size * beam_width, self.encoder_length], 0),
            mstype.int32)
        self.argmin = P.ArgMinWithValue(axis=1)
        self.reducesum = P.ReduceSum()
        self.div = P.Div()
        self.shape_op = P.Shape()
        self.mul = P.Mul()
        self.log = P.Log()
        self.less = P.Less()
        self.tile = P.Tile()
        self.noteq = P.Neg()
        self.zeroslike = P.ZerosLike()
        self.greater_equal = P.GreaterEqual()
        self.sub = P.Sub()
Esempio n. 14
0
 def __init__(
     self,
     vocab_size,
     embedding_size,
     embedding_shape,
     use_one_hot_embeddings=False,
     initializer_range=0.02,
     name='embedding_table',
     is_expand=False,
     batch_size=12,
     damping=0.03,
     loss_scale=1,
     frequency=10,
 ):
     super(Embedding_Thor, self).__init__()
     self.vocab_size = vocab_size
     self.use_one_hot_embeddings = use_one_hot_embeddings
     self.embedding_table = Parameter(initializer(
         TruncatedNormal(initializer_range), [vocab_size, embedding_size]),
                                      name=name)
     self.thor = True
     self.is_expand = is_expand
     self.expand = P.ExpandDims()
     self.shape_flat = (-1, )
     self.gather = P.GatherV2()
     self.one_hot = P.OneHot()
     self.on_value = Tensor(1.0, mstype.float32)
     self.off_value = Tensor(0.0, mstype.float32)
     self.array_mul = P.MatMul()
     self.reshape = P.Reshape()
     self.em_shape = tuple(embedding_shape)
     self.shape = P.Shape()
     self.loss_scale = Tensor(1 / loss_scale, mstype.float16)
     self.matrix_A_inv = Parameter(Tensor(
         np.zeros([vocab_size]).astype(np.float32)),
                                   name='matrix_A_inv',
                                   requires_grad=False)
     self.matrix_G_inv = Parameter(Tensor(
         np.zeros([embedding_size, embedding_size]).astype(np.float16)),
                                   name="matrix_G_inv",
                                   requires_grad=False)
     self.A_inv_max = Parameter(initializer(0, [1], mstype.float32),
                                name="A_inv_max",
                                requires_grad=False)
     self.G_inv_max = Parameter(initializer(0, [1], mstype.float32),
                                name="G_inv_max",
                                requires_grad=False)
     self.fused_abs_max = P.CusFusedAbsMax1()
     self.fake_G = Tensor(
         np.zeros([embedding_size, embedding_size]).astype(np.float16))
     self.dampingA = Tensor(np.ones([vocab_size]).astype(np.float32))
     self.dampingG = Tensor(np.identity(embedding_size), mstype.float32)
     self.cov_step = Parameter(initializer(0, [1], mstype.int32),
                               name="cov_step",
                               requires_grad=False)
     self.freq = Tensor(frequency, mstype.int32)
     self.axis = 0
     self.damping = damping
     self.gather = P.GatherV2()
     self.sqrt = P.Sqrt()
     self.mul = P.Mul()
     self.cast = P.Cast()
     self.cube_matmul = P.CusMatMulCube(transpose_a=True)
     self.vector_matmul = P.CusBatchMatMul()
     self.cholesky = P.CusCholeskyTrsm()
     self.matrix_combine = P.CusMatrixCombine()
     self.reduce_sum = P.ReduceSum(keep_dims=False)
     self.inv = P.Inv()
     self.getG = P.InsertGradientOf(self.save_gradient)
     self.batch_size = batch_size
Esempio n. 15
0
    def __init__(self):
        super().__init__()
        self.normal_dist = msd.Normal(dtype=mstype.float32)
        self.bernoulli_dist = msd.Bernoulli(dtype=mstype.float32)

        self.reduce_sum = P.ReduceSum(keep_dims=True)
Esempio n. 16
0
    def __init__(
            self,
            in_channels,
            out_channels,
            activation=None,
            has_bias=True,
            weight_prior_fn=NormalPrior,
            weight_posterior_fn=lambda name, shape: NormalPosterior(name=name, shape=shape),
            bias_prior_fn=NormalPrior,
            bias_posterior_fn=lambda name, shape: NormalPosterior(name=name, shape=shape)):
        super(_DenseVariational, self).__init__()
        self.in_channels = check_int_positive(in_channels)
        self.out_channels = check_int_positive(out_channels)
        self.has_bias = check_bool(has_bias)

        if isinstance(weight_prior_fn, Cell):
            self.weight_prior = weight_prior_fn
        else:
            self.weight_prior = weight_prior_fn()
        for prior_name, prior_dist in self.weight_prior.name_cells().items():
            if prior_name != 'normal':
                raise TypeError("The type of distribution of `weight_prior_fn` should be `normal`")
            if not (isinstance(getattr(prior_dist, '_mean_value'), Tensor) and
                    isinstance(getattr(prior_dist, '_sd_value'), Tensor)):
                raise TypeError("The input form of `weight_prior_fn` is incorrect")

        try:
            self.weight_posterior = weight_posterior_fn(shape=[self.out_channels, self.in_channels], name='bnn_weight')
        except TypeError:
            raise TypeError('The type of `weight_posterior_fn` should be `NormalPosterior`')
        for posterior_name, _ in self.weight_posterior.name_cells().items():
            if posterior_name != 'normal':
                raise TypeError("The type of distribution of `weight_posterior_fn` should be `normal`")

        if self.has_bias:
            if isinstance(bias_prior_fn, Cell):
                self.bias_prior = bias_prior_fn
            else:
                self.bias_prior = bias_prior_fn()
            for prior_name, prior_dist in self.bias_prior.name_cells().items():
                if prior_name != 'normal':
                    raise TypeError("The type of distribution of `bias_prior_fn` should be `normal`")
                if not (isinstance(getattr(prior_dist, '_mean_value'), Tensor) and
                        isinstance(getattr(prior_dist, '_sd_value'), Tensor)):
                    raise TypeError("The input form of `bias_prior_fn` is incorrect")

            try:
                self.bias_posterior = bias_posterior_fn(shape=[self.out_channels], name='bnn_bias')
            except TypeError:
                raise TypeError('The type of `bias_posterior_fn` should be `NormalPosterior`')
            for posterior_name, _ in self.bias_posterior.name_cells().items():
                if posterior_name != 'normal':
                    raise TypeError("The type of distribution of `bias_posterior_fn` should be `normal`")

        self.activation = activation
        if not self.activation:
            self.activation_flag = False
        else:
            self.activation_flag = True
            if isinstance(self.activation, str):
                self.activation = get_activation(activation)
            elif isinstance(self.activation, Cell):
                self.activation = activation
            else:
                raise ValueError('The type of `activation` is wrong.')

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()
        self.sum = P.ReduceSum()
Esempio n. 17
0
 def __init__(self, axis=-1):
     super(Softmax, self).__init__()
     self.max = P.ArgMaxWithValue(axis=axis, keep_dims=True)
     self.sum = P.ReduceSum(keep_dims=True)
     self.axis = axis
Esempio n. 18
0
     'desc_inputs': [[128, 5]],
     'desc_bprop': [[128, 5], [128], [128]],
     'skip': ['backward']
 }), ('Abs', {
     'block': P.Abs(),
     'desc_inputs': [[4]],
     'desc_bprop': [[4]]
 }),
 ('CumSum', {
     'block': P.CumSum(),
     'desc_const': [0],
     'desc_inputs': [Tensor(np.array([[3, 4], [1, 6]]).astype(np.float16))],
     'desc_bprop': [Tensor(np.array([[3, 4], [4, 10]]).astype(np.float16))]
 }),
 ('ReduceSum_3', {
     'block': P.ReduceSum(),
     'desc_const': [0],
     'desc_inputs': [[3, 2]],
     'desc_bprop': [[2]]
 }),
 ('ReduceSum_4', {
     'block': P.ReduceSum(keep_dims=True),
     'desc_const': [0],
     'desc_inputs': [[3, 2]],
     'desc_bprop': [[1, 2]]
 }),
 ('ReduceSum_5', {
     'block': P.ReduceSum(keep_dims=True),
     'desc_inputs': [[2, 3, 4]],
     'desc_bprop': [[1, 1, 1]]
 }),
Esempio n. 19
0
 def __init__(self):
     super(AxisListEmptyNet, self).__init__()
     self.reduce_sum = P.ReduceSum()
     self.axis = []
Esempio n. 20
0
 def __init__(self):
     super().__init__()
     self.mul1 = P.Mul()
     self.reduce_sum = P.ReduceSum(keep_dims=False)
     self.mul2 = P.Mul()
Esempio n. 21
0
def _get_square_sum(x):
    norm = P.ReduceSum(False)(F.square(x), ())
    norm = F.expand_dims(F.cast(norm, mstype.float32), 0)
    return norm
Esempio n. 22
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 has_bias=False,
                 weight_prior_fn=NormalPrior,
                 weight_posterior_fn=lambda name, shape: NormalPosterior(
                     name=name, shape=shape),
                 bias_prior_fn=NormalPrior,
                 bias_posterior_fn=lambda name, shape: NormalPosterior(
                     name=name, shape=shape)):
        kernel_size = twice(kernel_size)
        stride = twice(stride)
        dilation = twice(dilation)
        super(_ConvVariational, self).__init__(in_channels,
                                               out_channels,
                                               kernel_size,
                                               stride,
                                               pad_mode,
                                               padding,
                                               dilation,
                                               group,
                                               has_bias,
                                               weight_init='normal',
                                               bias_init='zeros')
        if pad_mode not in ('valid', 'same', 'pad'):
            raise ValueError(
                'Attr \'pad_mode\' of \'Conv2d\' Op passed ' + str(pad_mode) +
                ', should be one of values in \'valid\', \'same\', \'pad\'.')

        # convolution args
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.stride = stride
        self.pad_mode = pad_mode
        self.padding = padding
        self.dilation = dilation
        self.group = group
        self.has_bias = has_bias

        # distribution trainable parameters
        self.shape = [
            self.out_channels, self.in_channels // self.group,
            *self.kernel_size
        ]

        self.weight.requires_grad = False

        if isinstance(weight_prior_fn, Cell):
            if weight_prior_fn.__class__.__name__ != 'NormalPrior':
                raise TypeError(
                    'The type of `weight_prior_fn` should be `NormalPrior`')
            self.weight_prior = weight_prior_fn
        else:
            if weight_prior_fn.__name__ != 'NormalPrior':
                raise TypeError(
                    'The type of `weight_prior_fn` should be `NormalPrior`')
            self.weight_prior = weight_prior_fn()

        try:
            self.weight_posterior = weight_posterior_fn(shape=self.shape,
                                                        name='bnn_weight')
        except TypeError:
            raise TypeError(
                'The type of `weight_posterior_fn` should be `NormalPosterior`'
            )

        if self.has_bias:
            self.bias.requires_grad = False

            if isinstance(bias_prior_fn, Cell):
                if bias_prior_fn.__class__.__name__ != 'NormalPrior':
                    raise TypeError(
                        'The type of `bias_prior_fn` should be `NormalPrior`')
                self.bias_prior = bias_prior_fn
            else:
                if bias_prior_fn.__name__ != 'NormalPrior':
                    raise TypeError(
                        'The type of `bias_prior_fn` should be `NormalPrior`')
                self.bias_prior = bias_prior_fn()

            try:
                self.bias_posterior = bias_posterior_fn(
                    shape=[self.out_channels], name='bnn_bias')
            except TypeError:
                raise TypeError(
                    'The type of `bias_posterior_fn` should be `NormalPosterior`'
                )

        # mindspore operations
        self.bias_add = P.BiasAdd()
        self.conv2d = P.Conv2D(out_channel=self.out_channels,
                               kernel_size=self.kernel_size,
                               mode=1,
                               pad_mode=self.pad_mode,
                               pad=self.padding,
                               stride=self.stride,
                               dilation=self.dilation,
                               group=self.group)

        self.log = P.Log()
        self.sum = P.ReduceSum()
    def __init__(self, config, batch_size, num_bboxes, add_gt_as_proposals):
        super(BboxAssignSampleForRcnn, self).__init__()
        cfg = config
        self.batch_size = batch_size
        self.neg_iou_thr = cfg.neg_iou_thr_stage2
        self.pos_iou_thr = cfg.pos_iou_thr_stage2
        self.min_pos_iou = cfg.min_pos_iou_stage2
        self.num_gts = cfg.num_gts
        self.num_bboxes = num_bboxes
        self.num_expected_pos = cfg.num_expected_pos_stage2
        self.num_expected_neg = cfg.num_expected_neg_stage2
        self.num_expected_total = cfg.num_expected_total_stage2

        self.add_gt_as_proposals = add_gt_as_proposals
        self.label_inds = Tensor(
            np.arange(1, self.num_gts + 1).astype(np.int32))
        self.add_gt_as_proposals_valid = Tensor(
            np.array(self.add_gt_as_proposals * np.ones(self.num_gts),
                     dtype=np.int32))

        self.concat = P.Concat(axis=0)
        self.max_gt = P.ArgMaxWithValue(axis=0)
        self.max_anchor = P.ArgMaxWithValue(axis=1)
        self.sum_inds = P.ReduceSum()
        self.iou = P.IOU()
        self.greaterequal = P.GreaterEqual()
        self.greater = P.Greater()
        self.select = P.Select()
        self.gatherND = P.GatherNd()
        self.squeeze = P.Squeeze()
        self.cast = P.Cast()
        self.logicaland = P.LogicalAnd()
        self.less = P.Less()
        self.random_choice_with_mask_pos = P.RandomChoiceWithMask(
            self.num_expected_pos)
        self.random_choice_with_mask_neg = P.RandomChoiceWithMask(
            self.num_expected_neg)
        self.reshape = P.Reshape()
        self.equal = P.Equal()
        self.bounding_box_encode = P.BoundingBoxEncode(means=(0.0, 0.0, 0.0,
                                                              0.0),
                                                       stds=(0.1, 0.1, 0.2,
                                                             0.2))
        self.concat_axis1 = P.Concat(axis=1)
        self.logicalnot = P.LogicalNot()
        self.tile = P.Tile()

        # Check
        self.check_gt_one = Tensor(
            np.array(-1 * np.ones((self.num_gts, 4)), dtype=np.float16))
        self.check_anchor_two = Tensor(
            np.array(-2 * np.ones((self.num_bboxes, 4)), dtype=np.float16))

        # Init tensor
        self.assigned_gt_inds = Tensor(
            np.array(-1 * np.ones(num_bboxes), dtype=np.int32))
        self.assigned_gt_zeros = Tensor(
            np.array(np.zeros(num_bboxes), dtype=np.int32))
        self.assigned_gt_ones = Tensor(
            np.array(np.ones(num_bboxes), dtype=np.int32))
        self.assigned_gt_ignores = Tensor(
            np.array(-1 * np.ones(num_bboxes), dtype=np.int32))
        self.assigned_pos_ones = Tensor(
            np.array(np.ones(self.num_expected_pos), dtype=np.int32))

        self.gt_ignores = Tensor(
            np.array(-1 * np.ones(self.num_gts), dtype=np.int32))
        self.range_pos_size = Tensor(
            np.arange(self.num_expected_pos).astype(np.float16))
        self.check_neg_mask = Tensor(
            np.array(np.ones(self.num_expected_neg - self.num_expected_pos),
                     dtype=np.bool))
        self.bboxs_neg_mask = Tensor(
            np.zeros((self.num_expected_neg, 4), dtype=np.float16))
        self.labels_neg_mask = Tensor(
            np.array(np.zeros(self.num_expected_neg), dtype=np.uint8))

        self.reshape_shape_pos = (self.num_expected_pos, 1)
        self.reshape_shape_neg = (self.num_expected_neg, 1)

        self.scalar_zero = Tensor(0.0, dtype=mstype.float16)
        self.scalar_neg_iou_thr = Tensor(self.neg_iou_thr,
                                         dtype=mstype.float16)
        self.scalar_pos_iou_thr = Tensor(self.pos_iou_thr,
                                         dtype=mstype.float16)
        self.scalar_min_pos_iou = Tensor(self.min_pos_iou,
                                         dtype=mstype.float16)

        self.expand_dims = P.ExpandDims()
        self.split = P.Split(axis=1, output_num=4)
        self.concat_last_axis = P.Concat(axis=-1)
        self.round = P.Round()
        self.image_h_w = Tensor(
            [cfg.img_height, cfg.img_width, cfg.img_height, cfg.img_width],
            dtype=mstype.float16)
        self.range = nn.Range(start=0, limit=cfg.num_expected_pos_stage2)
        self.crop_and_resize = P.CropAndResize(method="bilinear_v2")
        self.mask_shape = (cfg.mask_shape[0], cfg.mask_shape[1])
        self.squeeze_mask_last = P.Squeeze(axis=-1)
Esempio n. 24
0
 def __init__(self, network, output_num):
     super(OutputReduceSumCell, self).__init__()
     self.output_num = output_num
     self.network = network
     self.reduce_sum = P.ReduceSum()
Esempio n. 25
0
    def __init__(self, config):
        super(WideDeepModel, self).__init__()
        self.batch_size = config.batch_size
        host_device_mix = bool(config.host_device_mix)
        parameter_server = bool(config.parameter_server)
        parallel_mode = _get_parallel_mode()
        is_auto_parallel = parallel_mode in (ParallelMode.SEMI_AUTO_PARALLEL,
                                             ParallelMode.AUTO_PARALLEL)
        if is_auto_parallel:
            self.batch_size = self.batch_size * get_group_size()
        self.field_size = config.field_size
        self.vocab_size = config.vocab_size
        self.emb_dim = config.emb_dim
        self.deep_layer_dims_list = config.deep_layer_dim
        self.deep_layer_act = config.deep_layer_act
        self.init_args = config.init_args
        self.weight_init, self.bias_init = config.weight_bias_init
        self.weight_bias_init = config.weight_bias_init
        self.emb_init = config.emb_init
        self.drop_out = config.dropout_flag
        self.keep_prob = config.keep_prob
        self.deep_input_dims = self.field_size * self.emb_dim
        self.layer_dims = self.deep_layer_dims_list + [1]
        self.all_dim_list = [self.deep_input_dims] + self.layer_dims

        init_acts = [('Wide_b', [1], self.emb_init)]
        var_map = init_var_dict(self.init_args, init_acts)
        self.wide_b = var_map["Wide_b"]
        self.dense_layer_1 = DenseLayer(self.all_dim_list[0],
                                        self.all_dim_list[1],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        convert_dtype=True,
                                        drop_out=config.dropout_flag)
        self.dense_layer_2 = DenseLayer(self.all_dim_list[1],
                                        self.all_dim_list[2],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        convert_dtype=True,
                                        drop_out=config.dropout_flag)
        self.dense_layer_3 = DenseLayer(self.all_dim_list[2],
                                        self.all_dim_list[3],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        convert_dtype=True,
                                        drop_out=config.dropout_flag)
        self.dense_layer_4 = DenseLayer(self.all_dim_list[3],
                                        self.all_dim_list[4],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        convert_dtype=True,
                                        drop_out=config.dropout_flag)
        self.dense_layer_5 = DenseLayer(self.all_dim_list[4],
                                        self.all_dim_list[5],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        use_activation=False,
                                        convert_dtype=True,
                                        drop_out=config.dropout_flag)
        self.wide_mul = P.Mul()
        self.deep_mul = P.Mul()
        self.reduce_sum = P.ReduceSum(keep_dims=False)
        self.reshape = P.Reshape()
        self.deep_reshape = P.Reshape()
        self.square = P.Square()
        self.shape = P.Shape()
        self.tile = P.Tile()
        self.concat = P.Concat(axis=1)
        self.cast = P.Cast()
        if is_auto_parallel and host_device_mix:
            self.dense_layer_1.dropout.dropout_do_mask.set_strategy(
                ((1, get_group_size()), ))
            self.dense_layer_1.matmul.set_strategy(
                ((1, get_group_size()), (get_group_size(), 1)))
            self.deep_embeddinglookup = nn.EmbeddingLookup(
                self.vocab_size,
                self.emb_dim,
                slice_mode=nn.EmbeddingLookUpSplitMode.TABLE_COLUMN_SLICE)
            self.wide_embeddinglookup = nn.EmbeddingLookup(
                self.vocab_size,
                1,
                slice_mode=nn.EmbeddingLookUpSplitMode.TABLE_ROW_SLICE)
            self.deep_mul.set_strategy(((1, 1, get_group_size()), (1, 1, 1)))
            self.deep_reshape.add_prim_attr("skip_redistribution", True)
            self.reduce_sum.add_prim_attr("cross_batch", True)
            self.embedding_table = self.deep_embeddinglookup.embedding_table
        elif parameter_server:
            self.deep_embeddinglookup = nn.EmbeddingLookup(
                self.vocab_size, self.emb_dim)
            self.wide_embeddinglookup = nn.EmbeddingLookup(self.vocab_size, 1)
            self.embedding_table = self.deep_embeddinglookup.embedding_table
            self.deep_embeddinglookup.embedding_table.set_param_ps()
            self.wide_embeddinglookup.embedding_table.set_param_ps()
        else:
            self.deep_embeddinglookup = nn.EmbeddingLookup(self.vocab_size,
                                                           self.emb_dim,
                                                           target='DEVICE')
            self.wide_embeddinglookup = nn.EmbeddingLookup(self.vocab_size,
                                                           1,
                                                           target='DEVICE')
            self.embedding_table = self.deep_embeddinglookup.embedding_table
Esempio n. 26
0
    def __init__(self,
                 config,
                 representation_size,
                 batch_size,
                 num_classes,
                 target_means=(0., 0., 0., 0.),
                 target_stds=(0.1, 0.1, 0.2, 0.2)
                 ):
        super(Rcnn, self).__init__()
        cfg = config
        self.rcnn_loss_cls_weight = Tensor(np.array(cfg.rcnn_loss_cls_weight).astype(np.float16))
        self.rcnn_loss_reg_weight = Tensor(np.array(cfg.rcnn_loss_reg_weight).astype(np.float16))
        self.rcnn_fc_out_channels = cfg.rcnn_fc_out_channels
        self.target_means = target_means
        self.target_stds = target_stds
        self.num_classes = num_classes
        self.in_channels = cfg.rcnn_in_channels
        self.train_batch_size = batch_size
        self.test_batch_size = cfg.test_batch_size

        shape_0 = (self.rcnn_fc_out_channels, representation_size)
        weights_0 = initializer("XavierUniform", shape=shape_0[::-1], dtype=mstype.float16).to_tensor()
        shape_1 = (self.rcnn_fc_out_channels, self.rcnn_fc_out_channels)
        weights_1 = initializer("XavierUniform", shape=shape_1[::-1], dtype=mstype.float16).to_tensor()
        self.shared_fc_0 = DenseNoTranpose(representation_size, self.rcnn_fc_out_channels, weights_0)
        self.shared_fc_1 = DenseNoTranpose(self.rcnn_fc_out_channels, self.rcnn_fc_out_channels, weights_1)

        cls_weight = initializer('Normal', shape=[num_classes, self.rcnn_fc_out_channels][::-1],
                                 dtype=mstype.float16).to_tensor()
        reg_weight = initializer('Normal', shape=[num_classes * 4, self.rcnn_fc_out_channels][::-1],
                                 dtype=mstype.float16).to_tensor()
        self.cls_scores = DenseNoTranpose(self.rcnn_fc_out_channels, num_classes, cls_weight)
        self.reg_scores = DenseNoTranpose(self.rcnn_fc_out_channels, num_classes * 4, reg_weight)

        self.flatten = P.Flatten()
        self.relu = P.ReLU()
        self.logicaland = P.LogicalAnd()
        self.loss_cls = P.SoftmaxCrossEntropyWithLogits()
        self.loss_bbox = P.SmoothL1Loss(beta=1.0)
        self.reshape = P.Reshape()
        self.onehot = P.OneHot()
        self.greater = P.Greater()
        self.cast = P.Cast()
        self.sum_loss = P.ReduceSum()
        self.tile = P.Tile()
        self.expandims = P.ExpandDims()

        self.gather = P.GatherNd()
        self.argmax = P.ArgMaxWithValue(axis=1)

        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
        self.value = Tensor(1.0, mstype.float16)

        self.num_bboxes = (cfg.num_expected_pos_stage2 + cfg.num_expected_neg_stage2) * batch_size

        rmv_first = np.ones((self.num_bboxes, self.num_classes))
        rmv_first[:, 0] = np.zeros((self.num_bboxes,))
        self.rmv_first_tensor = Tensor(rmv_first.astype(np.float16))

        self.num_bboxes_test = cfg.rpn_max_num * cfg.test_batch_size

        range_max = np.arange(self.num_bboxes_test).astype(np.int32)
        self.range_max = Tensor(range_max)
Esempio n. 27
0
 def __init__(self, network):
     super(NetWithLoss, self).__init__()
     self.sum = P.ReduceSum()
     self.mean = P.ReduceMean()
     self.net = network
Esempio n. 28
0
 def __init__(self):
     super().__init__()
     self.max = P.ReduceMax()
     self.param = Parameter(Tensor(np.arange(2 * 2 * 2).reshape((2, 2, 2)), ms.float32), name="weight")
     self.zero = Tensor(np.zeros(([2, 2, 2])), ms.float32)
     self.reduce = P.ReduceSum()
Esempio n. 29
0
from mindspore.ops.primitive import constexpr, PrimitiveWithInfer, prim_attr_register
from mindspore.ops._grad.grad_base import bprop_getters
from mindspore import Tensor, RowTensor, context
from mindspore.common.parameter import Parameter, ParameterTuple
from mindspore.common import dtype as mstype
from mindspore._checkparam import Validator as validator
from mindspore._checkparam import Rel
from mindspore.nn import Optimizer
from mindspore.nn import TrainOneStepCell, WithLossCell
from mindspore.nn.optim import Momentum
from mindspore.train import Model
from ....dataset_mock import MindData

context.set_context(mode=context.GRAPH_MODE, enable_sparse=True)

reduce_sum = P.ReduceSum()
unsorted_segment_sum = P.UnsortedSegmentSum()
transpose = P.Transpose()
shape_op = P.Shape()
reshape = P.Reshape()
size_op = P.Size()
invert_permutation = P.InvertPermutation()
logical_and = P.LogicalAnd()


def get_axis(x):
    shape = shape_op(x)
    length = F.tuple_len(shape)
    perm = F.make_range(0, length)
    return perm
Esempio n. 30
0
 def __init__(self, margin=0.0, reduction="mean"):
     super(CosineEmbeddingLoss, self).__init__(reduction)
     self.reduce_sum = P.ReduceSum()
     self.maximum = P.Maximum()
     validator.check_value_type("margin", margin, [float], self.cls_name)
     self.margin = validator.check_number_range("margin", margin, -1.0, 1.0, Rel.INC_BOTH, self.cls_name)