def construct(self, grads): global_norm = self.global_norm(grads) cond = P.GreaterEqual()(global_norm, self.clip_norm) global_norm = F.select(cond, global_norm, self.clip_norm) grads = self.hyper_map( F.partial(apply_global_norm, self.clip_norm, global_norm), grads) return grads
def __init__(self, clip_norm=1.0, use_norm=None): super(_ClipByGlobalNorm, self).__init__() # Add interface. This parameter is not used at present if use_norm is not None: validator.check_number("use_norm", use_norm, 0.0, Rel.GE, self.cls_name) validator.check_number("clip_norm", clip_norm, 0.0, Rel.GT, self.cls_name) self.clip_norm = Tensor([clip_norm], mstype.float32) self.hyper_map = C.HyperMap() self.greater_equal = P.GreaterEqual()
def __init__(self, config, batch_size, num_bboxes, add_gt_as_proposals): super(BboxAssignSample, self).__init__() cfg = config self.batch_size = batch_size self.neg_iou_thr = Tensor(cfg.neg_iou_thr, mstype.float16) self.pos_iou_thr = Tensor(cfg.pos_iou_thr, mstype.float16) self.min_pos_iou = Tensor(cfg.min_pos_iou, mstype.float16) self.zero_thr = Tensor(0.0, mstype.float16) self.num_bboxes = num_bboxes self.num_gts = cfg.num_gts self.num_expected_pos = cfg.num_expected_pos self.num_expected_neg = cfg.num_expected_neg self.add_gt_as_proposals = add_gt_as_proposals if self.add_gt_as_proposals: self.label_inds = Tensor(np.arange(1, self.num_gts + 1)) self.concat = P.Concat(axis=0) self.max_gt = P.ArgMaxWithValue(axis=0) self.max_anchor = P.ArgMaxWithValue(axis=1) self.sum_inds = P.ReduceSum() self.iou = P.IOU() self.greaterequal = P.GreaterEqual() self.greater = P.Greater() self.select = P.Select() self.gatherND = P.GatherNd() self.squeeze = P.Squeeze() self.cast = P.Cast() self.logicaland = P.LogicalAnd() self.less = P.Less() self.random_choice_with_mask_pos = P.RandomChoiceWithMask(self.num_expected_pos) self.random_choice_with_mask_neg = P.RandomChoiceWithMask(self.num_expected_neg) self.reshape = P.Reshape() self.equal = P.Equal() self.bounding_box_encode = BoundingBoxEncode() self.scatterNdUpdate = P.ScatterNdUpdate() self.scatterNd = P.ScatterNd() self.logicalnot = P.LogicalNot() self.tile = P.Tile() self.zeros_like = P.ZerosLike() self.assigned_gt_inds = Tensor(np.array(-1 * np.ones(num_bboxes), dtype=np.int32)) self.assigned_gt_zeros = Tensor(np.array(np.zeros(num_bboxes), dtype=np.int32)) self.assigned_gt_ones = Tensor(np.array(np.ones(num_bboxes), dtype=np.int32)) self.assigned_gt_ignores = Tensor(np.array(-1 * np.ones(num_bboxes), dtype=np.int32)) self.assigned_pos_ones = Tensor(np.array(np.ones(self.num_expected_pos), dtype=np.int32)) self.check_neg_mask = Tensor(np.array(np.ones(self.num_expected_neg - self.num_expected_pos), dtype=np.bool)) self.range_pos_size = Tensor(np.arange(self.num_expected_pos).astype(np.float16)) self.check_gt_one = Tensor(np.array(-1 * np.ones((self.num_gts, 4)), dtype=np.float16)) self.check_anchor_two = Tensor(np.array(-2 * np.ones((self.num_bboxes, 4)), dtype=np.float16)) self.print = P.Print()
def test_float32(): op = P.GreaterEqual() op_wrapper = OpNetWrapper(op) input_x = Tensor(np.array([1, 2, -1]).astype(np.float32)) input_y = Tensor(np.array([-3, 2, -1]).astype(np.float32)) outputs = op_wrapper(input_x, input_y) print(outputs) assert outputs.shape == (3, ) assert np.allclose(outputs.asnumpy(), [True, True, True])
def __init__(self, batch_size, seq_length, vocab_size, decoder, beam_width=4, length_penalty_weight=1.0, max_decode_length=128, sos_id=1, eos_id=2, compute_type=mstype.float32): super(BeamSearchDecoder, self).__init__(auto_prefix=False) self.seq_length = seq_length self.batch_size = batch_size self.vocab_size = vocab_size self.beam_width = beam_width self.length_penalty_weight = length_penalty_weight self.max_decode_length = max_decode_length self.decoder = decoder self.add = P.TensorAdd() self.expand = P.ExpandDims() self.reshape = P.Reshape() self.shape_flat = (-1, ) self.shape = P.Shape() self.zero_tensor = Tensor(np.zeros([batch_size, beam_width]), mstype.float32) self.ninf_tensor = Tensor(np.full([batch_size, beam_width], -INF), mstype.float32) self.select = P.Select() self.flat_shape = (batch_size, beam_width * vocab_size) self.topk = P.TopK(sorted=True) self.floor_div = P.FloorDiv() self.vocab_size_tensor = Tensor(self.vocab_size, mstype.int32) self.real_div = P.RealDiv() self.mod = Mod() self.equal = P.Equal() self.eos_ids = Tensor(np.full([batch_size, beam_width], eos_id), mstype.int32) beam_ids = np.tile( np.arange(beam_width).reshape((1, beam_width)), [batch_size, 1]) self.beam_ids = Tensor(beam_ids, mstype.int32) batch_ids = np.arange(batch_size * beam_width).reshape( (batch_size, beam_width)) // beam_width self.batch_ids = Tensor(batch_ids, mstype.int32) self.concat = P.Concat(axis=-1) self.gather_nd = P.GatherNd() self.greater_equal = P.GreaterEqual() self.sub = P.Sub() self.cast = P.Cast() self.zeroslike = P.ZerosLike() # init inputs and states self.start_ids = Tensor(np.full([batch_size * beam_width, 1], sos_id), mstype.int32) self.init_seq = Tensor(np.full([batch_size, beam_width, 1], sos_id), mstype.int32) init_scores = np.tile(np.array([[0.] + [-INF] * (beam_width - 1)]), [batch_size, 1]) self.init_scores = Tensor(init_scores, mstype.float32) self.init_finished = Tensor( np.zeros([batch_size, beam_width], dtype=np.bool)) self.init_length = Tensor( np.zeros([batch_size, beam_width], dtype=np.int32)) self.length_penalty = LengthPenalty(weight=length_penalty_weight) self.one = Tensor(1, mstype.int32)
def __init__(self): super(NetGreaterEqual, self).__init__() self.GreaterEqual = P.GreaterEqual()
def __init__(self, alpha=0.2): super(LeakyReLU, self).__init__() self.greater_equal = P.GreaterEqual() self.mul = P.Mul() self.alpha = alpha
def __init__(self, alpha=0.2): super(LeakyReLU, self).__init__() validator.check_value_type('alpha', alpha, [float, int], self.cls_name) self.greater_equal = P.GreaterEqual() self.mul = P.Mul() self.alpha = alpha
'desc_bprop': [Tensor(np.zeros((3, 4, 5), np.bool_))]}), ('NotEqual', { 'block': P.NotEqual(), 'desc_inputs': [[4, 1], [2, 3, 4, 5]], 'desc_bprop': [Tensor(np.ones((2, 3, 4, 5), np.bool_))]}), ('NotEqual_0', { 'block': P.NotEqual(), 'desc_inputs': [ 1, [2, 3, 4, 5]], 'desc_bprop': [Tensor(np.ones((2, 3, 4, 5), np.bool_))], 'skip': ['backward']}), ('Greater', { 'block': P.Greater(), 'desc_inputs': [[2, 3, 4, 1], [4, 5]], 'desc_bprop': [Tensor(np.ones((2, 3, 4, 5), np.bool_))]}), ('GreaterEqual', { 'block': P.GreaterEqual(), 'desc_inputs': [[2, 3, 4, 1], [4, 5]], 'desc_bprop': [Tensor(np.ones((2, 3, 4, 5), np.bool_))]}), ('LogicalNot', { 'block': P.LogicalNot(), 'desc_inputs': [Tensor(np.zeros((3, 4, 5), np.bool_))], 'desc_bprop': [Tensor(np.ones((3, 4, 5), np.bool_))]}), ('LogicalAnd', { 'block': P.LogicalAnd(), 'desc_inputs': [Tensor(np.zeros((2, 3, 4), np.bool_)), Tensor(np.ones((1), np.bool_))], 'desc_bprop': [Tensor(np.zeros((2, 3, 4), np.bool_))]}), ('LogicalOr', { 'block': P.LogicalOr(), 'desc_inputs': [Tensor(np.zeros((3, 4, 5), np.bool_)), Tensor(np.ones((3, 1, 1), np.bool_))], 'desc_bprop': [Tensor(np.zeros((3, 4, 5), np.bool_))]}), ('NpuAllocFloatStatus', {
def __init__(self, batch_size, seq_length, vocab_size, decoder, beam_width=4, decoder_layers_nums=4, length_penalty_weight=0.6, cov_penalty_factor=0.1, hidden_size=1024, max_decode_length=64, sos_id=2, eos_id=3, compute_type=mstype.float32): super(BeamSearchDecoder, self).__init__() self.encoder_length = seq_length self.hidden_size = hidden_size self.batch_size = batch_size self.vocab_size = vocab_size self.beam_width = beam_width self.decoder_layers_nums = decoder_layers_nums self.length_penalty_weight = length_penalty_weight self.cov_penalty_factor = cov_penalty_factor self.max_decode_length = max_decode_length self.decoder = decoder self.add = P.TensorAdd() self.expand = P.ExpandDims() self.reshape = P.Reshape() self.shape_flat = (-1,) self.shape = P.Shape() self.zero_tensor = Tensor(np.zeros([batch_size, beam_width]), mstype.float32) self.ninf_tensor = Tensor(np.full([batch_size, beam_width], -INF), mstype.float32) self.select = P.Select() self.flat_shape = (batch_size, beam_width * vocab_size) self.topk = P.TopK(sorted=True) self.floor_div = P.FloorDiv() self.vocab_size_tensor = Tensor(self.vocab_size, mstype.int32) self.real_div = P.RealDiv() self.mod = Mod() self.equal = P.Equal() self.eos_ids = Tensor(np.full([batch_size, beam_width], eos_id), mstype.int32) beam_ids = np.tile(np.arange(beam_width).reshape((1, beam_width)), [batch_size, 1]) self.beam_ids = Tensor(beam_ids, mstype.int32) batch_ids = np.arange(batch_size * beam_width).reshape((batch_size, beam_width)) // beam_width self.batch_ids = Tensor(batch_ids, mstype.int32) self.concat = P.Concat(axis=-1) self.gather_nd = P.GatherNd() self.start = Tensor(0, dtype=mstype.int32) self.start_ids = Tensor(np.full([batch_size * beam_width, 1], sos_id), mstype.int32) self.init_seq = Tensor(np.full([batch_size, beam_width, self.max_decode_length], sos_id), mstype.int32) init_scores = np.tile(np.array([[0.] + [-INF] * (beam_width - 1)]), [batch_size, 1]) self.init_scores = Tensor(init_scores, mstype.float32) self.init_finished = Tensor(np.zeros([batch_size, beam_width], dtype=np.bool)) self.init_length = Tensor(np.zeros([batch_size, beam_width], dtype=np.int32)) self.length_penalty = LengthPenalty(weight=length_penalty_weight) self.one = Tensor(1, mstype.int32) self.prob_concat = P.Concat(axis=1) self.cast = P.Cast() self.decoder_hidden_state = Tensor(np.zeros([self.decoder_layers_nums, 2, self.batch_size * self.beam_width, hidden_size]), mstype.float32) self.zeros_scores = Tensor(np.zeros([batch_size, beam_width], dtype=np.float)) self.active_index = Tensor(np.ones([batch_size, beam_width], dtype=np.int32)) self.init_zeros = Tensor(np.zeros([batch_size, beam_width], dtype=np.int32)) self.init_ones = Tensor(np.ones([batch_size, beam_width], dtype=np.float32)) self.accu_attn_scores = Tensor(np.zeros([batch_size, beam_width, self.encoder_length], dtype=np.float32)) self.zeros = Tensor([0], mstype.int32) self.eos_tensor = Tensor(np.full([batch_size, beam_width, beam_width], eos_id), mstype.int32) self.ones_3d = Tensor(np.full([batch_size, beam_width, self.encoder_length], 1), mstype.float32) self.neg_inf_3d = Tensor(np.full([batch_size, beam_width, self.encoder_length], -INF), mstype.float32) self.zeros_3d = Tensor(np.full([batch_size, beam_width, self.encoder_length], 0), mstype.float32) self.zeros_2d = Tensor(np.full([batch_size * beam_width, self.encoder_length], 0), mstype.int32) self.argmin = P.ArgMinWithValue(axis=1) self.reducesum = P.ReduceSum() self.div = P.Div() self.shape_op = P.Shape() self.mul = P.Mul() self.log = P.Log() self.less = P.Less() self.tile = P.Tile() self.noteq = P.Neg() self.zeroslike = P.ZerosLike() self.greater_equal = P.GreaterEqual() self.sub = P.Sub()
def __init__(self, config, batch_size, num_bboxes, add_gt_as_proposals): super(BboxAssignSampleForRcnn, self).__init__() cfg = config self.dtype = np.float32 self.ms_type = mstype.float32 self.batch_size = batch_size self.neg_iou_thr = cfg.neg_iou_thr_stage2 self.pos_iou_thr = cfg.pos_iou_thr_stage2 self.min_pos_iou = cfg.min_pos_iou_stage2 self.num_gts = cfg.num_gts self.num_bboxes = num_bboxes self.num_expected_pos = cfg.num_expected_pos_stage2 self.num_expected_neg = cfg.num_expected_neg_stage2 self.num_expected_total = cfg.num_expected_total_stage2 self.add_gt_as_proposals = add_gt_as_proposals self.label_inds = Tensor( np.arange(1, self.num_gts + 1).astype(np.int32)) self.add_gt_as_proposals_valid = Tensor( np.full(self.num_gts, self.add_gt_as_proposals, dtype=np.int32)) self.concat = P.Concat(axis=0) self.max_gt = P.ArgMaxWithValue(axis=0) self.max_anchor = P.ArgMaxWithValue(axis=1) self.sum_inds = P.ReduceSum() self.iou = P.IOU() self.greaterequal = P.GreaterEqual() self.greater = P.Greater() self.select = P.Select() self.gatherND = P.GatherNd() self.squeeze = P.Squeeze() self.cast = P.Cast() self.logicaland = P.LogicalAnd() self.less = P.Less() self.random_choice_with_mask_pos = P.RandomChoiceWithMask( self.num_expected_pos) self.random_choice_with_mask_neg = P.RandomChoiceWithMask( self.num_expected_neg) self.reshape = P.Reshape() self.equal = P.Equal() self.bounding_box_encode = P.BoundingBoxEncode(means=(0.0, 0.0, 0.0, 0.0), stds=(0.1, 0.1, 0.2, 0.2)) self.concat_axis1 = P.Concat(axis=1) self.logicalnot = P.LogicalNot() self.tile = P.Tile() # Check self.check_gt_one = Tensor( np.full((self.num_gts, 4), -1, dtype=self.dtype)) self.check_anchor_two = Tensor( np.full((self.num_bboxes, 4), -2, dtype=self.dtype)) # Init tensor self.assigned_gt_inds = Tensor(np.full(num_bboxes, -1, dtype=np.int32)) self.assigned_gt_zeros = Tensor( np.array(np.zeros(num_bboxes), dtype=np.int32)) self.assigned_gt_ones = Tensor( np.array(np.ones(num_bboxes), dtype=np.int32)) self.assigned_gt_ignores = Tensor( np.full(num_bboxes, -1, dtype=np.int32)) self.assigned_pos_ones = Tensor( np.array(np.ones(self.num_expected_pos), dtype=np.int32)) self.gt_ignores = Tensor(np.full(self.num_gts, -1, dtype=np.int32)) self.range_pos_size = Tensor( np.arange(self.num_expected_pos).astype(self.dtype)) self.check_neg_mask = Tensor( np.array(np.ones(self.num_expected_neg - self.num_expected_pos), dtype=np.bool)) self.bboxs_neg_mask = Tensor( np.zeros((self.num_expected_neg, 4), dtype=self.dtype)) self.labels_neg_mask = Tensor( np.array(np.zeros(self.num_expected_neg), dtype=np.uint8)) self.reshape_shape_pos = (self.num_expected_pos, 1) self.reshape_shape_neg = (self.num_expected_neg, 1) self.scalar_zero = Tensor(0.0, dtype=self.ms_type) self.scalar_neg_iou_thr = Tensor(self.neg_iou_thr, dtype=self.ms_type) self.scalar_pos_iou_thr = Tensor(self.pos_iou_thr, dtype=self.ms_type) self.scalar_min_pos_iou = Tensor(self.min_pos_iou, dtype=self.ms_type)
'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))], 'skip': ['backward']}), # type of x and y not match ('Greater1', { 'block': (P.Greater(), {'exception': TypeError, 'error_keywords': ['Greater']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], 'skip': ['backward']}), # shape of x and y not match ('Greater2', { 'block': (P.Greater(), {'exception': ValueError, 'error_keywords': ['Greater']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32)), Tensor(np.ones([3, 2]).astype(np.float32))], 'skip': ['backward']}), # input is not tensor ('GreaterEqual0', { 'block': (P.GreaterEqual(), {'exception': TypeError, 'error_keywords': ['GreaterEqual']}), 'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))], 'skip': ['backward']}), # type of x and y not match ('GreaterEqual1', { 'block': (P.GreaterEqual(), {'exception': TypeError, 'error_keywords': ['GreaterEqual']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], 'skip': ['backward']}), # shape of x and y not match ('GreaterEqual2', { 'block': (P.GreaterEqual(), {'exception': ValueError, 'error_keywords': ['GreaterEqual']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32)), Tensor(np.ones([3, 2]).astype(np.float32))], 'skip': ['backward']}), # input is not tensor ('Less0', {
# shape of x and y not match ('Greater2', { 'block': (P.Greater(), { 'exception': ValueError, 'error_keywords': ['Greater'] }), 'desc_inputs': [ Tensor(np.ones([3, 4]).astype(np.float32)), Tensor(np.ones([3, 2]).astype(np.float32)) ], 'skip': ['backward'] }), # shape of x and y not match ('GreaterEqual2', { 'block': (P.GreaterEqual(), { 'exception': ValueError, 'error_keywords': ['GreaterEqual'] }), 'desc_inputs': [ Tensor(np.ones([3, 4]).astype(np.float32)), Tensor(np.ones([3, 2]).astype(np.float32)) ], 'skip': ['backward'] }), # shape of x and y not match ('Less2', { 'block': (P.Less(), { 'exception': ValueError, 'error_keywords': ['Less']
def __init__(self, strategy1, strategy2): super().__init__() self.matmul = P.MatMul().set_strategy(strategy1) self.greaterEqual = P.GreaterEqual().set_strategy(strategy2)
def __init__(self): super(GreaterEqualNet, self).__init__() self.ops = P.GreaterEqual()
def __init__(self, config, batch_size, num_bboxes, add_gt_as_proposals): super(BboxAssignSampleForRcnn, self).__init__() cfg = config self.batch_size = batch_size self.neg_iou_thr = cfg.neg_iou_thr_stage2 self.pos_iou_thr = cfg.pos_iou_thr_stage2 self.min_pos_iou = cfg.min_pos_iou_stage2 self.num_gts = cfg.num_gts self.num_bboxes = num_bboxes self.num_expected_pos = cfg.num_expected_pos_stage2 self.num_expected_neg = cfg.num_expected_neg_stage2 self.num_expected_total = cfg.num_expected_total_stage2 self.add_gt_as_proposals = add_gt_as_proposals self.label_inds = Tensor( np.arange(1, self.num_gts + 1).astype(np.int32)) self.add_gt_as_proposals_valid = Tensor( np.array(self.add_gt_as_proposals * np.ones(self.num_gts), dtype=np.int32)) self.concat = P.Concat(axis=0) self.max_gt = P.ArgMaxWithValue(axis=0) self.max_anchor = P.ArgMaxWithValue(axis=1) self.sum_inds = P.ReduceSum() self.iou = P.IOU() self.greaterequal = P.GreaterEqual() self.greater = P.Greater() self.select = P.Select() self.gatherND = P.GatherNd() self.squeeze = P.Squeeze() self.cast = P.Cast() self.logicaland = P.LogicalAnd() self.less = P.Less() self.random_choice_with_mask_pos = P.RandomChoiceWithMask( self.num_expected_pos) self.random_choice_with_mask_neg = P.RandomChoiceWithMask( self.num_expected_neg) self.reshape = P.Reshape() self.equal = P.Equal() self.bounding_box_encode = P.BoundingBoxEncode(means=(0.0, 0.0, 0.0, 0.0), stds=(0.1, 0.1, 0.2, 0.2)) self.concat_axis1 = P.Concat(axis=1) self.logicalnot = P.LogicalNot() self.tile = P.Tile() # Check self.check_gt_one = Tensor( np.array(-1 * np.ones((self.num_gts, 4)), dtype=np.float16)) self.check_anchor_two = Tensor( np.array(-2 * np.ones((self.num_bboxes, 4)), dtype=np.float16)) # Init tensor self.assigned_gt_inds = Tensor( np.array(-1 * np.ones(num_bboxes), dtype=np.int32)) self.assigned_gt_zeros = Tensor( np.array(np.zeros(num_bboxes), dtype=np.int32)) self.assigned_gt_ones = Tensor( np.array(np.ones(num_bboxes), dtype=np.int32)) self.assigned_gt_ignores = Tensor( np.array(-1 * np.ones(num_bboxes), dtype=np.int32)) self.assigned_pos_ones = Tensor( np.array(np.ones(self.num_expected_pos), dtype=np.int32)) self.gt_ignores = Tensor( np.array(-1 * np.ones(self.num_gts), dtype=np.int32)) self.range_pos_size = Tensor( np.arange(self.num_expected_pos).astype(np.float16)) self.check_neg_mask = Tensor( np.array(np.ones(self.num_expected_neg - self.num_expected_pos), dtype=np.bool)) self.bboxs_neg_mask = Tensor( np.zeros((self.num_expected_neg, 4), dtype=np.float16)) self.labels_neg_mask = Tensor( np.array(np.zeros(self.num_expected_neg), dtype=np.uint8)) self.reshape_shape_pos = (self.num_expected_pos, 1) self.reshape_shape_neg = (self.num_expected_neg, 1) self.scalar_zero = Tensor(0.0, dtype=mstype.float16) self.scalar_neg_iou_thr = Tensor(self.neg_iou_thr, dtype=mstype.float16) self.scalar_pos_iou_thr = Tensor(self.pos_iou_thr, dtype=mstype.float16) self.scalar_min_pos_iou = Tensor(self.min_pos_iou, dtype=mstype.float16) self.expand_dims = P.ExpandDims() self.split = P.Split(axis=1, output_num=4) self.concat_last_axis = P.Concat(axis=-1) self.round = P.Round() self.image_h_w = Tensor( [cfg.img_height, cfg.img_width, cfg.img_height, cfg.img_width], dtype=mstype.float16) self.range = nn.Range(start=0, limit=cfg.num_expected_pos_stage2) self.crop_and_resize = P.CropAndResize(method="bilinear_v2") self.mask_shape = (cfg.mask_shape[0], cfg.mask_shape[1]) self.squeeze_mask_last = P.Squeeze(axis=-1)