def __init__(self, low=None, high=None, seed=0, dtype=mstype.float32, name="Uniform"): """ Constructor of Uniform distribution. """ param = dict(locals()) super(Uniform, self).__init__(dtype, name, param) if low is not None and high is not None: self._low = convert_to_batch(low, self._broadcast_shape, dtype) self._high = convert_to_batch(high, self._broadcast_shape, dtype) check_greater(self.low, self.high, "low value", "high value") else: self._low = low self._high = high # ops needed for the class self.const = P.ScalarToArray() self.dtypeop = P.DType() self.exp = P.Exp() self.fill = P.Fill() self.less = P.Less() self.lessequal = P.LessEqual() self.log = P.Log() self.logicaland = P.LogicalAnd() self.select = P.Select() self.shape = P.Shape() self.sq = P.Square() self.sqrt = P.Sqrt() self.uniform = P.UniformReal(seed=seed) self.zeroslike = P.ZerosLike()
def __init__(self, batch_size=4): super(DiceLoss, self).__init__() self.threshold0 = Tensor(0.5, mstype.float32) self.zero_float32 = Tensor(0.0, mstype.float32) self.k = int(640 * 640) self.negative_one_int32 = Tensor(-1, mstype.int32) self.batch_size = batch_size self.concat = P.Concat() self.less_equal = P.LessEqual() self.greater = P.Greater() self.reduce_sum = P.ReduceSum() self.reduce_sum_keep_dims = P.ReduceSum(keep_dims=True) self.reduce_mean = P.ReduceMean() self.reduce_min = P.ReduceMin() self.cast = P.Cast() self.minimum = P.Minimum() self.expand_dims = P.ExpandDims() self.select = P.Select() self.fill = P.Fill() self.topk = P.TopK(sorted=True) self.shape = P.Shape() self.sigmoid = P.Sigmoid() self.reshape = P.Reshape() self.slice = P.Slice() self.logical_and = P.LogicalAnd() self.logical_or = P.LogicalOr() self.equal = P.Equal() self.zeros_like = P.ZerosLike() self.add = P.TensorAdd() self.gather = P.Gather()
def __init__(self): super(IGamma, self).__init__() # const numbers # If more data types are supported, this float max value need to be selected. self.log_maxfloat32 = Tensor(np.log(np.finfo(np.float32).max), mstype.float32) # operations self.logicaland = P.LogicalAnd() self.logicalor = P.LogicalOr() self.logicalnot = P.LogicalNot() self.equal = P.Equal() self.greater = P.Greater() self.less = P.Less() self.neg = P.Neg() self.log = P.Log() self.exp = P.Exp() self.select = P.Select() self.zeroslike = P.ZerosLike() self.fill = P.Fill() self.shape = P.Shape() self.dtype = P.DType() self.lgamma = LGamma() self.const = P.ScalarToArray() self.cast = P.Cast()
def __init__(self): super(DiGamma, self).__init__() # const numbers self.k_lanczos_gamma = 7 self.k_base_lanczos_coeff = 0.99999999999980993227684700473478 self.k_lanczos_coefficients = [676.520368121885098567009190444019, -1259.13921672240287047156078755283, 771.3234287776530788486528258894, -176.61502916214059906584551354, 12.507343278686904814458936853, -0.13857109526572011689554707, 9.984369578019570859563e-6, 1.50563273514931155834e-7] self.nan = np.nan self.pi = np.pi self.lanczos_gamma_plus_one_half = self.k_lanczos_gamma + 0.5 self.log_lanczos_gamma_plus_one_half = np.log(self.lanczos_gamma_plus_one_half) # operations self.log1p = P.Log1p() self.abs = P.Abs() self.shape = P.Shape() self.dtype = P.DType() self.fill = P.Fill() self.floor = P.Floor() self.equal = P.Equal() self.less = P.Less() self.select = P.Select() self.sin = P.Sin() self.cos = P.Cos() self.logicaland = P.LogicalAnd()
def __init__(self, config, batch_size, num_classes, target_means=(0., 0., 0., 0.), target_stds=(0.1, 0.1, 0.2, 0.2) ): super(RcnnMask, self).__init__() cfg = config self.rcnn_loss_mask_fb_weight = Tensor(np.array(cfg.rcnn_loss_mask_fb_weight).astype(np.float16)) self.rcnn_mask_out_channels = cfg.rcnn_mask_out_channels self.target_means = target_means self.target_stds = target_stds self.num_classes = num_classes self.in_channels = cfg.rcnn_in_channels self.fpn_mask = FpnMask(self.in_channels, self.rcnn_mask_out_channels, self.num_classes) self.logicaland = P.LogicalAnd() self.loss_mask = P.SigmoidCrossEntropyWithLogits() self.onehot = P.OneHot() self.greater = P.Greater() self.cast = P.Cast() self.sum_loss = P.ReduceSum() self.tile = P.Tile() self.expandims = P.ExpandDims() self.on_value = Tensor(1.0, mstype.float32) self.off_value = Tensor(0.0, mstype.float32) self.num_bboxes = cfg.num_expected_pos_stage2 * batch_size rmv_first = np.ones((self.num_bboxes, self.num_classes)) rmv_first[:, 0] = np.zeros((self.num_bboxes,)) self.rmv_first_tensor = Tensor(rmv_first.astype(np.float16)) self.mean_loss = P.ReduceMean()
def __init__(self): super(MathBinaryNet2, self).__init__() self.less_equal = P.LessEqual() self.greater = P.Greater() self.logic_or = P.LogicalOr() self.logic_and = P.LogicalAnd() self.number = 3 self.flag = True
def __init__(self, probs=None, seed=None, dtype=mstype.int32, name="Categorical"): param = dict(locals()) param['param_dict'] = {'probs': probs} valid_dtype = mstype.uint_type + mstype.int_type + mstype.float_type Validator.check_type_name("dtype", dtype, valid_dtype, type(self).__name__) super(Categorical, self).__init__(seed, dtype, name, param) self._probs = self._add_parameter(probs, 'probs') if self.probs is not None: check_rank(self.probs) check_prob(self.probs) check_sum_equal_one(probs) # update is_scalar_batch and broadcast_shape # drop one dimension if self.probs.shape[:-1] == (): self._is_scalar_batch = True self._broadcast_shape = self._broadcast_shape[:-1] self.argmax = P.ArgMaxWithValue(axis=-1) self.broadcast = broadcast_to self.cast = P.Cast() self.clip_by_value = C.clip_by_value self.concat = P.Concat(-1) self.cumsum = P.CumSum() self.dtypeop = P.DType() self.exp = exp_generic self.expand_dim = P.ExpandDims() self.fill = P.Fill() self.gather = P.GatherNd() self.greater = P.Greater() self.issubclass = P.IsSubClass() self.less = P.Less() self.log = log_generic self.log_softmax = P.LogSoftmax() self.logicor = P.LogicalOr() self.logicand = P.LogicalAnd() self.multinomial = P.Multinomial(seed=self.seed) self.reshape = P.Reshape() self.reduce_sum = P.ReduceSum(keep_dims=True) self.select = P.Select() self.shape = P.Shape() self.softmax = P.Softmax() self.squeeze = P.Squeeze() self.squeeze_first_axis = P.Squeeze(0) self.squeeze_last_axis = P.Squeeze(-1) self.square = P.Square() self.transpose = P.Transpose() self.is_nan = P.IsNan() self.index_type = mstype.int32 self.nan = np.nan
def test_logicaland(): op = P.LogicalAnd() op_wrapper = OpNetWrapper(op) input_x = Tensor(np.array([True, False, False])) input_y = Tensor(np.array([True, True, False])) outputs = op_wrapper(input_x, input_y) assert np.allclose(outputs.asnumpy(), (True, False, False))
def __init__(self, config, batch_size, num_bboxes, add_gt_as_proposals): super(BboxAssignSample, self).__init__() cfg = config self.batch_size = batch_size self.neg_iou_thr = Tensor(cfg.neg_iou_thr, mstype.float16) self.pos_iou_thr = Tensor(cfg.pos_iou_thr, mstype.float16) self.min_pos_iou = Tensor(cfg.min_pos_iou, mstype.float16) self.zero_thr = Tensor(0.0, mstype.float16) self.num_bboxes = num_bboxes self.num_gts = cfg.num_gts self.num_expected_pos = cfg.num_expected_pos self.num_expected_neg = cfg.num_expected_neg self.add_gt_as_proposals = add_gt_as_proposals if self.add_gt_as_proposals: self.label_inds = Tensor(np.arange(1, self.num_gts + 1)) self.concat = P.Concat(axis=0) self.max_gt = P.ArgMaxWithValue(axis=0) self.max_anchor = P.ArgMaxWithValue(axis=1) self.sum_inds = P.ReduceSum() self.iou = P.IOU() self.greaterequal = P.GreaterEqual() self.greater = P.Greater() self.select = P.Select() self.gatherND = P.GatherNd() self.squeeze = P.Squeeze() self.cast = P.Cast() self.logicaland = P.LogicalAnd() self.less = P.Less() self.random_choice_with_mask_pos = P.RandomChoiceWithMask(self.num_expected_pos) self.random_choice_with_mask_neg = P.RandomChoiceWithMask(self.num_expected_neg) self.reshape = P.Reshape() self.equal = P.Equal() self.bounding_box_encode = BoundingBoxEncode() self.scatterNdUpdate = P.ScatterNdUpdate() self.scatterNd = P.ScatterNd() self.logicalnot = P.LogicalNot() self.tile = P.Tile() self.zeros_like = P.ZerosLike() self.assigned_gt_inds = Tensor(np.array(-1 * np.ones(num_bboxes), dtype=np.int32)) self.assigned_gt_zeros = Tensor(np.array(np.zeros(num_bboxes), dtype=np.int32)) self.assigned_gt_ones = Tensor(np.array(np.ones(num_bboxes), dtype=np.int32)) self.assigned_gt_ignores = Tensor(np.array(-1 * np.ones(num_bboxes), dtype=np.int32)) self.assigned_pos_ones = Tensor(np.array(np.ones(self.num_expected_pos), dtype=np.int32)) self.check_neg_mask = Tensor(np.array(np.ones(self.num_expected_neg - self.num_expected_pos), dtype=np.bool)) self.range_pos_size = Tensor(np.arange(self.num_expected_pos).astype(np.float16)) self.check_gt_one = Tensor(np.array(-1 * np.ones((self.num_gts, 4)), dtype=np.float16)) self.check_anchor_two = Tensor(np.array(-2 * np.ones((self.num_bboxes, 4)), dtype=np.float16)) self.print = P.Print()
def __init__(self, concentration1=None, concentration0=None, seed=None, dtype=mstype.float32, name="Beta"): """ Constructor of Beta. """ param = dict(locals()) param['param_dict'] = { 'concentration1': concentration1, 'concentration0': concentration0 } valid_dtype = mstype.float_type Validator.check_type_name("dtype", dtype, valid_dtype, type(self).__name__) # As some operators can't accept scalar input, check the type here if isinstance(concentration0, float): raise TypeError("Input concentration0 can't be scalar") if isinstance(concentration1, float): raise TypeError("Input concentration1 can't be scalar") super(Beta, self).__init__(seed, dtype, name, param) self._concentration1 = self._add_parameter(concentration1, 'concentration1') self._concentration0 = self._add_parameter(concentration0, 'concentration0') if self._concentration1 is not None: check_greater_zero(self._concentration1, "concentration1") if self._concentration0 is not None: check_greater_zero(self._concentration0, "concentration0") # ops needed for the class self.log = log_generic self.log1p = P.Log1p() self.neg = P.Neg() self.pow = P.Pow() self.squeeze = P.Squeeze(0) self.cast = P.Cast() self.fill = P.Fill() self.shape = P.Shape() self.select = P.Select() self.logicaland = P.LogicalAnd() self.greater = P.Greater() self.digamma = nn.DiGamma() self.lbeta = nn.LBeta()
def __init__(self, config, batch_size, num_classes, target_means=(0., 0., 0., 0.), target_stds=(0.1, 0.1, 0.2, 0.2)): super(RcnnCls, self).__init__() cfg = config self.rcnn_loss_cls_weight = Tensor( np.array(cfg.rcnn_loss_cls_weight).astype(np.float16)) self.rcnn_loss_reg_weight = Tensor( np.array(cfg.rcnn_loss_reg_weight).astype(np.float16)) self.rcnn_fc_out_channels = cfg.rcnn_fc_out_channels self.target_means = target_means self.target_stds = target_stds self.num_classes = num_classes self.in_channels = cfg.rcnn_in_channels self.train_batch_size = batch_size self.test_batch_size = cfg.test_batch_size self.fpn_cls = FpnCls(self.in_channels, self.rcnn_fc_out_channels, self.num_classes, cfg.roi_layer["out_size"]) self.relu = P.ReLU() self.logicaland = P.LogicalAnd() self.loss_cls = P.SoftmaxCrossEntropyWithLogits() self.loss_bbox = P.SmoothL1Loss(beta=1.0) self.loss_mask = P.SigmoidCrossEntropyWithLogits() self.reshape = P.Reshape() self.onehot = P.OneHot() self.greater = P.Greater() self.cast = P.Cast() self.sum_loss = P.ReduceSum() self.tile = P.Tile() self.expandims = P.ExpandDims() self.gather = P.GatherNd() self.argmax = P.ArgMaxWithValue(axis=1) self.on_value = Tensor(1.0, mstype.float32) self.off_value = Tensor(0.0, mstype.float32) self.value = Tensor(1.0, mstype.float16) self.num_bboxes = (cfg.num_expected_pos_stage2 + cfg.num_expected_neg_stage2) * batch_size rmv_first = np.ones((self.num_bboxes, self.num_classes)) rmv_first[:, 0] = np.zeros((self.num_bboxes, )) self.rmv_first_tensor = Tensor(rmv_first.astype(np.float16)) self.num_bboxes_test = cfg.rpn_max_num * cfg.test_batch_size
def __init__(self, low=None, high=None, seed=None, dtype=mstype.float32, name="Uniform"): """ Constructor of Uniform distribution. """ param = dict(locals()) valid_dtype = mstype.float_type check_type(dtype, valid_dtype, type(self).__name__) super(Uniform, self).__init__(seed, dtype, name, param) self.parameter_type = set_param_type({ 'low': low, 'high': high }, self.dtype) if low is not None and high is not None: self._low = cast_to_tensor(low, self.parameter_type) self._high = cast_to_tensor(high, self.parameter_type) check_greater(self.low, self.high, "low value", "high value") else: self._low = low if low is None else cast_to_tensor( low, self.parameter_type) self._high = high if high is None else cast_to_tensor( high, self.parameter_type) self.default_parameters = [self.low, self.high] self.parameter_names = ['low', 'high'] # ops needed for the class self.exp = exp_generic self.log = log_generic self.squeeze = P.Squeeze(0) self.cast = P.Cast() self.const = P.ScalarToArray() self.dtypeop = P.DType() self.fill = P.Fill() self.less = P.Less() self.lessequal = P.LessEqual() self.logicaland = P.LogicalAnd() self.select = P.Select() self.shape = P.Shape() self.sq = P.Square() self.sqrt = P.Sqrt() self.zeroslike = P.ZerosLike() self.uniform = C.uniform self.sametypeshape = P.SameTypeShape()
def _IgammaSeries(ax, x, a, enabled): """Helper function for computing Igamma using a power series.""" logicaland = P.LogicalAnd() greater = P.Greater() fill = P.Fill() shape = P.Shape() dtype = P.DType() select = P.Select() if dtype(ax) == mstype.float16: epsilon = eps_fp16 else: epsilon = eps_fp32 def cond(vals): enabled = vals[0] return enabled def body(vals): enabled = vals[0] r = vals[1] c = vals[2] ans = vals[3] x = vals[4] dc_da = vals[5] dans_da = vals[6] r = r + 1 dc_da = dc_da * (x / r) + (-1 * c * x) / (r * r) dans_da = dans_da + dc_da c = c * (x / r) ans = ans + c conditional = logicaland(enabled, greater(c / ans, epsilon)) return (conditional, select(enabled, r, vals[1]), select(enabled, c, vals[2]), select(enabled, ans, vals[3]), select(enabled, x, vals[4]), select(enabled, dc_da, vals[5]), select(enabled, dans_da, vals[6])) ones = fill(dtype(a), shape(a), 1) zeros = fill(dtype(a), shape(a), 0) vals = (enabled, a, ones, ones, x, zeros, zeros) vals = _while_helper_func(cond, body, vals) ans = vals[3] return (ans * ax) / a
def __init__(self, config): super(ETSNet, self).__init__() self.kernel_num = config.KERNEL_NUM self.inference = config.INFERENCE if config.INFERENCE: self.long_size = config.INFER_LONG_SIZE else: self.long_size = config.TRAIN_LONG_SIZE # backbone self.feature_extractor = ResNet(ResidualBlock, config.BACKBONE_LAYER_NUMS, config.BACKBONE_IN_CHANNELS, config.BACKBONE_OUT_CHANNELS) # neck self.feature_fusion = FPN(config.BACKBONE_OUT_CHANNELS, config.NECK_OUT_CHANNEL, self.long_size) # head self.conv1 = _conv(4 * config.NECK_OUT_CHANNEL, config.NECK_OUT_CHANNEL, kernel_size=3, stride=1, has_bias=True) self.bn1 = _bn(config.NECK_OUT_CHANNEL) self.relu1 = nn.ReLU() self.conv2 = _conv(config.NECK_OUT_CHANNEL, config.KERNEL_NUM, kernel_size=1, has_bias=True) self._upsample = P.ResizeBilinear((self.long_size, self.long_size), align_corners=True) if self.inference: self.one_float32 = Tensor(1.0, mstype.float32) self.sigmoid = P.Sigmoid() self.greater = P.Greater() self.logic_and = P.LogicalAnd() print('ETSNet initialized!')
def test_mode_init(self, config): self.test_batch_size = config.test_batch_size self.split = P.Split(axis=0, output_num=self.test_batch_size) self.split_shape = P.Split(axis=0, output_num=4) self.split_scores = P.Split(axis=1, output_num=self.num_classes) self.split_cls = P.Split(axis=0, output_num=self.num_classes - 1) self.tile = P.Tile() self.gather = P.GatherNd() self.rpn_max_num = config.rpn_max_num self.zeros_for_nms = Tensor( np.zeros((self.rpn_max_num, 3)).astype(self.dtype)) self.ones_mask = np.ones((self.rpn_max_num, 1)).astype(np.bool) self.zeros_mask = np.zeros((self.rpn_max_num, 1)).astype(np.bool) self.bbox_mask = Tensor( np.concatenate((self.ones_mask, self.zeros_mask, self.ones_mask, self.zeros_mask), axis=1)) self.nms_pad_mask = Tensor( np.concatenate((self.ones_mask, self.ones_mask, self.ones_mask, self.ones_mask, self.zeros_mask), axis=1)) self.test_score_thresh = Tensor( np.ones((self.rpn_max_num, 1)).astype(self.dtype) * config.test_score_thr) self.test_score_zeros = Tensor( np.ones((self.rpn_max_num, 1)).astype(self.dtype) * 0) self.test_box_zeros = Tensor( np.ones((self.rpn_max_num, 4)).astype(self.dtype) * -1) self.test_iou_thr = Tensor( np.ones((self.rpn_max_num, 1)).astype(self.dtype) * config.test_iou_thr) self.test_max_per_img = config.test_max_per_img self.nms_test = P.NMSWithMask(config.test_iou_thr) self.softmax = P.Softmax(axis=1) self.logicand = P.LogicalAnd() self.oneslike = P.OnesLike() self.test_topk = P.TopK(sorted=True) self.test_num_proposal = self.test_batch_size * self.rpn_max_num
def __init__(self, low=None, high=None, seed=None, dtype=mstype.float32, name="Uniform"): """ Constructor of Uniform distribution. """ param = dict(locals()) param['param_dict'] = {'low': low, 'high': high} valid_dtype = mstype.float_type Validator.check_type_name("dtype", dtype, valid_dtype, type(self).__name__) super(Uniform, self).__init__(seed, dtype, name, param) self._low = self._add_parameter(low, 'low') self._high = self._add_parameter(high, 'high') if self.low is not None and self.high is not None: check_greater(self.low, self.high, 'low', 'high') # ops needed for the class self.exp = exp_generic self.log = log_generic self.squeeze = P.Squeeze(0) self.cast = P.Cast() self.const = P.ScalarToArray() self.dtypeop = P.DType() self.fill = P.Fill() self.less = P.Less() self.lessequal = P.LessEqual() self.logicaland = P.LogicalAnd() self.select = P.Select() self.shape = P.Shape() self.sq = P.Square() self.zeroslike = P.ZerosLike() self.uniform = C.uniform
def __init__(self, config, representation_size, batch_size, num_classes, target_means=(0., 0., 0., 0.), target_stds=(0.1, 0.1, 0.2, 0.2)): super(Rcnn, self).__init__() cfg = config self.rcnn_loss_cls_weight = Tensor( np.array(cfg.rcnn_loss_cls_weight).astype(np.float16)) self.rcnn_loss_reg_weight = Tensor( np.array(cfg.rcnn_loss_reg_weight).astype(np.float16)) self.rcnn_fc_out_channels = cfg.rcnn_fc_out_channels self.target_means = target_means self.target_stds = target_stds self.num_classes = num_classes self.in_channels = cfg.rcnn_in_channels self.train_batch_size = batch_size self.test_batch_size = cfg.test_batch_size self.use_ambigous_sample = cfg.use_ambigous_sample shape_0 = (self.rcnn_fc_out_channels, representation_size) weights_0 = initializer("XavierUniform", shape=shape_0[::-1], dtype=mstype.float16).to_tensor() shape_1 = (self.rcnn_fc_out_channels, self.rcnn_fc_out_channels) weights_1 = initializer("XavierUniform", shape=shape_1[::-1], dtype=mstype.float16).to_tensor() self.shared_fc_0 = DenseNoTranpose(representation_size, self.rcnn_fc_out_channels, weights_0) self.shared_fc_1 = DenseNoTranpose(self.rcnn_fc_out_channels, self.rcnn_fc_out_channels, weights_1) cls_weight = initializer( 'Normal', shape=[num_classes, self.rcnn_fc_out_channels][::-1], dtype=mstype.float16).to_tensor() reg_weight = initializer( 'Normal', shape=[num_classes * 4, self.rcnn_fc_out_channels][::-1], dtype=mstype.float16).to_tensor() self.cls_scores = DenseNoTranpose(self.rcnn_fc_out_channels, num_classes, cls_weight) self.reg_scores = DenseNoTranpose(self.rcnn_fc_out_channels, num_classes * 4, reg_weight) self.flatten = P.Flatten() self.relu = P.ReLU() self.logicaland = P.LogicalAnd() self.loss_cls = P.SoftmaxCrossEntropyWithLogits() self.loss_bbox = P.SmoothL1Loss(beta=1.0) self.reshape = P.Reshape() self.onehot = P.OneHot() self.greater = P.Greater() self.equal = P.Equal() self.cast = P.Cast() self.sum_loss = P.ReduceSum() self.tile = P.Tile() self.expandims = P.ExpandDims() self.gather = P.GatherNd() self.argmax = P.ArgMaxWithValue(axis=1) self.on_value = Tensor(1.0, mstype.float32) self.off_value = Tensor(0.0, mstype.float32) self.value = Tensor(1.0, mstype.float16) self.num_bboxes = (cfg.num_expected_pos_stage2 + cfg.num_expected_neg_stage2) * batch_size if self.use_ambigous_sample: self.num_bboxes = (cfg.num_expected_pos_stage2 + cfg.num_expected_amb_stage2 + cfg.num_expected_neg_stage2) * batch_size rmv_first = np.ones((self.num_bboxes, self.num_classes)) rmv_first[:, 0] = np.zeros((self.num_bboxes, )) self.rmv_first_tensor = Tensor(rmv_first.astype(np.float16)) self.num_bboxes_test = cfg.rpn_max_num * cfg.test_batch_size range_max = np.arange(self.num_bboxes_test).astype(np.int32) self.range_max = Tensor(range_max)
def __init__(self): super(NetAnd, self).__init__() self.logicaland = P.LogicalAnd()
def __init__(self, config, batch_size, num_bboxes, add_gt_as_proposals): super(BboxAssignSampleForRcnn, self).__init__() cfg = config self.batch_size = batch_size self.neg_iou_thr = cfg.neg_iou_thr_stage2 self.pos_iou_thr = cfg.pos_iou_thr_stage2 self.min_pos_iou = cfg.min_pos_iou_stage2 self.num_gts = cfg.num_gts self.num_bboxes = num_bboxes self.num_expected_pos = cfg.num_expected_pos_stage2 self.num_expected_neg = cfg.num_expected_neg_stage2 self.num_expected_total = cfg.num_expected_total_stage2 self.add_gt_as_proposals = add_gt_as_proposals self.label_inds = Tensor( np.arange(1, self.num_gts + 1).astype(np.int32)) self.add_gt_as_proposals_valid = Tensor( np.array(self.add_gt_as_proposals * np.ones(self.num_gts), dtype=np.int32)) self.concat = P.Concat(axis=0) self.max_gt = P.ArgMaxWithValue(axis=0) self.max_anchor = P.ArgMaxWithValue(axis=1) self.sum_inds = P.ReduceSum() self.iou = P.IOU() self.greaterequal = P.GreaterEqual() self.greater = P.Greater() self.select = P.Select() self.gatherND = P.GatherNd() self.squeeze = P.Squeeze() self.cast = P.Cast() self.logicaland = P.LogicalAnd() self.less = P.Less() self.random_choice_with_mask_pos = P.RandomChoiceWithMask( self.num_expected_pos) self.random_choice_with_mask_neg = P.RandomChoiceWithMask( self.num_expected_neg) self.reshape = P.Reshape() self.equal = P.Equal() self.bounding_box_encode = P.BoundingBoxEncode(means=(0.0, 0.0, 0.0, 0.0), stds=(0.1, 0.1, 0.2, 0.2)) self.concat_axis1 = P.Concat(axis=1) self.logicalnot = P.LogicalNot() self.tile = P.Tile() # Check self.check_gt_one = Tensor( np.array(-1 * np.ones((self.num_gts, 4)), dtype=np.float16)) self.check_anchor_two = Tensor( np.array(-2 * np.ones((self.num_bboxes, 4)), dtype=np.float16)) # Init tensor self.assigned_gt_inds = Tensor( np.array(-1 * np.ones(num_bboxes), dtype=np.int32)) self.assigned_gt_zeros = Tensor( np.array(np.zeros(num_bboxes), dtype=np.int32)) self.assigned_gt_ones = Tensor( np.array(np.ones(num_bboxes), dtype=np.int32)) self.assigned_gt_ignores = Tensor( np.array(-1 * np.ones(num_bboxes), dtype=np.int32)) self.assigned_pos_ones = Tensor( np.array(np.ones(self.num_expected_pos), dtype=np.int32)) self.gt_ignores = Tensor( np.array(-1 * np.ones(self.num_gts), dtype=np.int32)) self.range_pos_size = Tensor( np.arange(self.num_expected_pos).astype(np.float16)) self.check_neg_mask = Tensor( np.array(np.ones(self.num_expected_neg - self.num_expected_pos), dtype=np.bool)) self.bboxs_neg_mask = Tensor( np.zeros((self.num_expected_neg, 4), dtype=np.float16)) self.labels_neg_mask = Tensor( np.array(np.zeros(self.num_expected_neg), dtype=np.uint8)) self.reshape_shape_pos = (self.num_expected_pos, 1) self.reshape_shape_neg = (self.num_expected_neg, 1) self.scalar_zero = Tensor(0.0, dtype=mstype.float16) self.scalar_neg_iou_thr = Tensor(self.neg_iou_thr, dtype=mstype.float16) self.scalar_pos_iou_thr = Tensor(self.pos_iou_thr, dtype=mstype.float16) self.scalar_min_pos_iou = Tensor(self.min_pos_iou, dtype=mstype.float16) self.expand_dims = P.ExpandDims() self.split = P.Split(axis=1, output_num=4) self.concat_last_axis = P.Concat(axis=-1) self.round = P.Round() self.image_h_w = Tensor( [cfg.img_height, cfg.img_width, cfg.img_height, cfg.img_width], dtype=mstype.float16) self.range = nn.Range(start=0, limit=cfg.num_expected_pos_stage2) self.crop_and_resize = P.CropAndResize(method="bilinear_v2") self.mask_shape = (cfg.mask_shape[0], cfg.mask_shape[1]) self.squeeze_mask_last = P.Squeeze(axis=-1)
'desc_bprop': [Tensor(np.ones((2, 3, 4, 5), np.bool_))], 'skip': ['backward']}), ('Greater', { 'block': P.Greater(), 'desc_inputs': [[2, 3, 4, 1], [4, 5]], 'desc_bprop': [Tensor(np.ones((2, 3, 4, 5), np.bool_))]}), ('GreaterEqual', { 'block': P.GreaterEqual(), 'desc_inputs': [[2, 3, 4, 1], [4, 5]], 'desc_bprop': [Tensor(np.ones((2, 3, 4, 5), np.bool_))]}), ('LogicalNot', { 'block': P.LogicalNot(), 'desc_inputs': [Tensor(np.zeros((3, 4, 5), np.bool_))], 'desc_bprop': [Tensor(np.ones((3, 4, 5), np.bool_))]}), ('LogicalAnd', { 'block': P.LogicalAnd(), 'desc_inputs': [Tensor(np.zeros((2, 3, 4), np.bool_)), Tensor(np.ones((1), np.bool_))], 'desc_bprop': [Tensor(np.zeros((2, 3, 4), np.bool_))]}), ('LogicalOr', { 'block': P.LogicalOr(), 'desc_inputs': [Tensor(np.zeros((3, 4, 5), np.bool_)), Tensor(np.ones((3, 1, 1), np.bool_))], 'desc_bprop': [Tensor(np.zeros((3, 4, 5), np.bool_))]}), ('NpuAllocFloatStatus', { 'block': P.NPUAllocFloatStatus(), 'desc_inputs': [], 'add_fack_input': True, 'fack_input_type': np.float32, 'desc_bprop': [Tensor(np.zeros([8]).astype(np.float32))], 'skip': ['backward']}), ('NpuGetFloatStatus', { 'block': P.NPUGetFloatStatus(),
'desc_inputs': [[2, 3, 4, 1], [4, 5]], 'desc_bprop': [Tensor(np.ones((2, 3, 4, 5), np.bool_))] }), ('GreaterEqual', { 'block': P.GreaterEqual(), 'desc_inputs': [[2, 3, 4, 1], [4, 5]], 'desc_bprop': [Tensor(np.ones((2, 3, 4, 5), np.bool_))] }), ('LogicalNot', { 'block': P.LogicalNot(), 'desc_inputs': [Tensor(np.zeros((3, 4, 5), np.bool_))], 'desc_bprop': [Tensor(np.ones((3, 4, 5), np.bool_))] }), ('LogicalAnd', { 'block': P.LogicalAnd(), 'desc_inputs': [ Tensor(np.zeros((2, 3, 4), np.bool_)), Tensor(np.ones((1), np.bool_)) ], 'desc_bprop': [Tensor(np.zeros((2, 3, 4), np.bool_))] }), ('LogicalOr', { 'block': P.LogicalOr(), 'desc_inputs': [ Tensor(np.zeros((3, 4, 5), np.bool_)), Tensor(np.ones((3, 1, 1), np.bool_)) ], 'desc_bprop': [Tensor(np.zeros((3, 4, 5), np.bool_))] }),
def __init__(self, config): super(Deeptext_VGG16, self).__init__() self.train_batch_size = config.batch_size self.num_classes = config.num_classes self.anchor_scales = config.anchor_scales self.anchor_ratios = config.anchor_ratios self.anchor_strides = config.anchor_strides self.target_means = tuple(config.rcnn_target_means) self.target_stds = tuple(config.rcnn_target_stds) # Anchor generator anchor_base_sizes = None self.anchor_base_sizes = list( self.anchor_strides ) if anchor_base_sizes is None else anchor_base_sizes self.anchor_generators = [] for anchor_base in self.anchor_base_sizes: self.anchor_generators.append( AnchorGenerator(anchor_base, self.anchor_scales, self.anchor_ratios)) self.num_anchors = len(self.anchor_ratios) * len(self.anchor_scales) featmap_sizes = config.feature_shapes assert len(featmap_sizes) == len(self.anchor_generators) self.anchor_list = self.get_anchors(featmap_sizes) # Rpn and rpn loss self.gt_labels_stage1 = Tensor( np.ones((self.train_batch_size, config.num_gts)).astype(np.uint8)) self.rpn_with_loss = RPN(config, self.train_batch_size, config.rpn_in_channels, config.rpn_feat_channels, config.num_anchors, config.rpn_cls_out_channels) # Proposal self.proposal_generator = Proposal(config, self.train_batch_size, config.activate_num_classes, config.use_sigmoid_cls) self.proposal_generator.set_train_local(config, True) self.proposal_generator_test = Proposal(config, config.test_batch_size, config.activate_num_classes, config.use_sigmoid_cls) self.proposal_generator_test.set_train_local(config, False) # Assign and sampler stage two self.bbox_assigner_sampler_for_rcnn = BboxAssignSampleForRcnn( config, self.train_batch_size, config.num_bboxes_stage2, True) self.decode = P.BoundingBoxDecode(max_shape=(576, 960), means=self.target_means, \ stds=self.target_stds) # Rcnn self.rcnn = Rcnn( config, config.rcnn_in_channels * config.roi_layer['out_size'] * config.roi_layer['out_size'], self.train_batch_size, self.num_classes) # Op declare self.squeeze = P.Squeeze() self.cast = P.Cast() self.concat = P.Concat(axis=0) self.concat_1 = P.Concat(axis=1) self.concat_2 = P.Concat(axis=2) self.reshape = P.Reshape() self.select = P.Select() self.greater = P.Greater() self.transpose = P.Transpose() # Test mode self.test_batch_size = config.test_batch_size self.split = P.Split(axis=0, output_num=self.test_batch_size) self.split_shape = P.Split(axis=0, output_num=4) self.split_scores = P.Split(axis=1, output_num=self.num_classes) self.split_cls = P.Split(axis=0, output_num=self.num_classes - 1) self.tile = P.Tile() self.gather = P.GatherNd() self.rpn_max_num = config.rpn_max_num self.zeros_for_nms = Tensor( np.zeros((self.rpn_max_num, 3)).astype(np.float32)) self.ones_mask = np.ones((self.rpn_max_num, 1)).astype(np.bool) self.zeros_mask = np.zeros((self.rpn_max_num, 1)).astype(np.bool) self.bbox_mask = Tensor( np.concatenate((self.ones_mask, self.zeros_mask, self.ones_mask, self.zeros_mask), axis=1)) self.nms_pad_mask = Tensor( np.concatenate((self.ones_mask, self.ones_mask, self.ones_mask, self.ones_mask, self.zeros_mask), axis=1)) self.test_score_thresh = Tensor( np.ones((self.rpn_max_num, 1)).astype(np.float32) * config.test_score_thr) self.test_score_zeros = Tensor( np.ones((self.rpn_max_num, 1)).astype(np.float32) * 0) self.test_box_zeros = Tensor( np.ones((self.rpn_max_num, 4)).astype(np.float32) * -1) self.test_iou_thr = Tensor( np.ones((self.rpn_max_num, 1)).astype(np.float32) * config.test_iou_thr) self.test_max_per_img = config.test_max_per_img self.nms_test = P.NMSWithMask(config.test_iou_thr) self.softmax = P.Softmax(axis=1) self.logicand = P.LogicalAnd() self.oneslike = P.OnesLike() self.test_topk = P.TopK(sorted=True) self.test_num_proposal = self.test_batch_size * self.rpn_max_num # Improve speed self.concat_start = (self.num_classes - 2) self.concat_end = (self.num_classes - 1) # Init tensor self.use_ambigous_sample = config.use_ambigous_sample roi_align_index = [ np.array(np.ones((config.num_expected_pos_stage2 + config.num_expected_neg_stage2, 1)) * i, dtype=np.float32) for i in range(self.train_batch_size) ] if self.use_ambigous_sample: roi_align_index = [ np.array(np.ones((config.num_expected_pos_stage2 + config.num_expected_amb_stage2 + config.num_expected_neg_stage2, 1)) * i, dtype=np.float32) for i in range(self.train_batch_size) ] roi_align_index_test = [np.array(np.ones((config.rpn_max_num, 1)) * i, dtype=np.float32) \ for i in range(self.test_batch_size)] self.roi_align_index_tensor = Tensor(np.concatenate(roi_align_index)) self.roi_align_index_test_tensor = Tensor( np.concatenate(roi_align_index_test)) self.roi_align4 = P.ROIAlign(pooled_width=7, pooled_height=7, spatial_scale=0.125) self.roi_align5 = P.ROIAlign(pooled_width=7, pooled_height=7, spatial_scale=0.0625) self.concat1 = P.Concat(axis=1) self.roi_align_fuse = _conv(in_channels=1024, out_channels=512, kernel_size=1, padding=0, stride=1) self.vgg16_feature_extractor = VGG16FeatureExtraction()
def __init__(self): super(NetConditionLackBranch, self).__init__() self.logicaland = P.LogicalAnd() self.logicalor = P.LogicalOr()
from mindspore.nn import Optimizer from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.nn.optim import Momentum from mindspore.train import Model from ....dataset_mock import MindData context.set_context(mode=context.GRAPH_MODE, enable_sparse=True) reduce_sum = P.ReduceSum() unsorted_segment_sum = P.UnsortedSegmentSum() transpose = P.Transpose() shape_op = P.Shape() reshape = P.Reshape() size_op = P.Size() invert_permutation = P.InvertPermutation() logical_and = P.LogicalAnd() def get_axis(x): shape = shape_op(x) length = F.tuple_len(shape) perm = F.make_range(0, length) return perm class MSELoss(nn.Cell): def __init__(self): super(MSELoss, self).__init__() self.reduce_sum = P.ReduceSum() self.square = P.Square() self.reduce_mean = P.ReduceMean()
'skip': ['backward'] }), # input x is not Tensor(bool) ('LogicalNot1', { 'block': (P.LogicalNot(), { 'exception': TypeError, 'error_keywords': ['LogicalNot'] }), 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.int32))], 'skip': ['backward'] }), # type of x and y not match ('LogicalAnd1', { 'block': (P.LogicalAnd(), { 'exception': TypeError, 'error_keywords': ['LogicalAnd'] }), 'desc_inputs': [ Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.bool_)) ], 'skip': ['backward'] }), # shape of x and y not match ('LogicalAnd2', { 'block': (P.LogicalAnd(), { 'exception': ValueError, 'error_keywords': ['LogicalAnd'] }),
def __init__(self, config): super(Mask_Rcnn_Resnet50, self).__init__() self.train_batch_size = config.batch_size self.num_classes = config.num_classes self.anchor_scales = config.anchor_scales self.anchor_ratios = config.anchor_ratios self.anchor_strides = config.anchor_strides self.target_means = tuple(config.rcnn_target_means) self.target_stds = tuple(config.rcnn_target_stds) # Anchor generator anchor_base_sizes = None self.anchor_base_sizes = list( self.anchor_strides) if anchor_base_sizes is None else anchor_base_sizes self.anchor_generators = [] for anchor_base in self.anchor_base_sizes: self.anchor_generators.append( AnchorGenerator(anchor_base, self.anchor_scales, self.anchor_ratios)) self.num_anchors = len(self.anchor_ratios) * len(self.anchor_scales) featmap_sizes = config.feature_shapes assert len(featmap_sizes) == len(self.anchor_generators) self.anchor_list = self.get_anchors(featmap_sizes) # Backbone resnet50 self.backbone = ResNetFea(ResidualBlockUsing, config.resnet_block, config.resnet_in_channels, config.resnet_out_channels, False) # Fpn self.fpn_ncek = FeatPyramidNeck(config.fpn_in_channels, config.fpn_out_channels, config.fpn_num_outs) # Rpn and rpn loss self.gt_labels_stage1 = Tensor(np.ones((self.train_batch_size, config.num_gts)).astype(np.uint8)) self.rpn_with_loss = RPN(config, self.train_batch_size, config.rpn_in_channels, config.rpn_feat_channels, config.num_anchors, config.rpn_cls_out_channels) # Proposal self.proposal_generator = Proposal(config, self.train_batch_size, config.activate_num_classes, config.use_sigmoid_cls) self.proposal_generator.set_train_local(config, True) self.proposal_generator_test = Proposal(config, config.test_batch_size, config.activate_num_classes, config.use_sigmoid_cls) self.proposal_generator_test.set_train_local(config, False) # Assign and sampler stage two self.bbox_assigner_sampler_for_rcnn = BboxAssignSampleForRcnn(config, self.train_batch_size, config.num_bboxes_stage2, True) self.decode = P.BoundingBoxDecode(max_shape=(768, 1280), means=self.target_means, \ stds=self.target_stds) # Roi self.roi_align = SingleRoIExtractor(config, config.roi_layer, config.roi_align_out_channels, config.roi_align_featmap_strides, self.train_batch_size, config.roi_align_finest_scale, mask=False) self.roi_align.set_train_local(config, True) self.roi_align_mask = SingleRoIExtractor(config, config.roi_layer, config.roi_align_out_channels, config.roi_align_featmap_strides, self.train_batch_size, config.roi_align_finest_scale, mask=True) self.roi_align_mask.set_train_local(config, True) self.roi_align_test = SingleRoIExtractor(config, config.roi_layer, config.roi_align_out_channels, config.roi_align_featmap_strides, 1, config.roi_align_finest_scale, mask=False) self.roi_align_test.set_train_local(config, False) self.roi_align_mask_test = SingleRoIExtractor(config, config.roi_layer, config.roi_align_out_channels, config.roi_align_featmap_strides, 1, config.roi_align_finest_scale, mask=True) self.roi_align_mask_test.set_train_local(config, False) # Rcnn self.rcnn_cls = RcnnCls(config, self.train_batch_size, self.num_classes) self.rcnn_mask = RcnnMask(config, self.train_batch_size, self.num_classes) # Op declare self.squeeze = P.Squeeze() self.cast = P.Cast() self.concat = P.Concat(axis=0) self.concat_1 = P.Concat(axis=1) self.concat_2 = P.Concat(axis=2) self.reshape = P.Reshape() self.select = P.Select() self.greater = P.Greater() self.transpose = P.Transpose() # Test mode self.test_batch_size = config.test_batch_size self.split = P.Split(axis=0, output_num=self.test_batch_size) self.split_shape = P.Split(axis=0, output_num=4) self.split_scores = P.Split(axis=1, output_num=self.num_classes) self.split_fb_mask = P.Split(axis=1, output_num=self.num_classes) self.split_cls = P.Split(axis=0, output_num=self.num_classes-1) self.tile = P.Tile() self.gather = P.GatherNd() self.rpn_max_num = config.rpn_max_num self.zeros_for_nms = Tensor(np.zeros((self.rpn_max_num, 3)).astype(np.float16)) self.ones_mask = np.ones((self.rpn_max_num, 1)).astype(np.bool) self.zeros_mask = np.zeros((self.rpn_max_num, 1)).astype(np.bool) self.bbox_mask = Tensor(np.concatenate((self.ones_mask, self.zeros_mask, self.ones_mask, self.zeros_mask), axis=1)) self.nms_pad_mask = Tensor(np.concatenate((self.ones_mask, self.ones_mask, self.ones_mask, self.ones_mask, self.zeros_mask), axis=1)) self.test_score_thresh = Tensor(np.ones((self.rpn_max_num, 1)).astype(np.float16) * config.test_score_thr) self.test_score_zeros = Tensor(np.ones((self.rpn_max_num, 1)).astype(np.float16) * 0) self.test_box_zeros = Tensor(np.ones((self.rpn_max_num, 4)).astype(np.float16) * -1) self.test_iou_thr = Tensor(np.ones((self.rpn_max_num, 1)).astype(np.float16) * config.test_iou_thr) self.test_max_per_img = config.test_max_per_img self.nms_test = P.NMSWithMask(config.test_iou_thr) self.softmax = P.Softmax(axis=1) self.logicand = P.LogicalAnd() self.oneslike = P.OnesLike() self.test_topk = P.TopK(sorted=True) self.test_num_proposal = self.test_batch_size * self.rpn_max_num # Improve speed self.concat_start = min(self.num_classes - 2, 55) self.concat_end = (self.num_classes - 1) # Init tensor roi_align_index = [np.array(np.ones((config.num_expected_pos_stage2 + config.num_expected_neg_stage2, 1)) * i, dtype=np.float16) for i in range(self.train_batch_size)] roi_align_index_test = [np.array(np.ones((config.rpn_max_num, 1)) * i, dtype=np.float16) \ for i in range(self.test_batch_size)] self.roi_align_index_tensor = Tensor(np.concatenate(roi_align_index)) self.roi_align_index_test_tensor = Tensor(np.concatenate(roi_align_index_test)) roi_align_index_pos = [np.array(np.ones((config.num_expected_pos_stage2, 1)) * i, dtype=np.float16) for i in range(self.train_batch_size)] self.roi_align_index_tensor_pos = Tensor(np.concatenate(roi_align_index_pos)) self.rcnn_loss_cls_weight = Tensor(np.array(config.rcnn_loss_cls_weight).astype(np.float16)) self.rcnn_loss_reg_weight = Tensor(np.array(config.rcnn_loss_reg_weight).astype(np.float16)) self.rcnn_loss_mask_fb_weight = Tensor(np.array(config.rcnn_loss_mask_fb_weight).astype(np.float16)) self.argmax_with_value = P.ArgMaxWithValue(axis=1) self.on_value = Tensor(1.0, mstype.float32) self.off_value = Tensor(0.0, mstype.float32) self.onehot = P.OneHot() self.reducesum = P.ReduceSum() self.sigmoid = P.Sigmoid() self.expand_dims = P.ExpandDims() self.test_mask_fb_zeros = Tensor(np.zeros((self.rpn_max_num, 28, 28)).astype(np.float16)) self.value = Tensor(1.0, mstype.float16)
def _IgammacContinuedFraction(ax, x, a, enabled): """Helper function for computing Igammac using a continued fraction.""" abs_x = P.Abs() logicaland = P.LogicalAnd() greater = P.Greater() less = P.Less() notequal = P.NotEqual() fill = P.Fill() shape = P.Shape() dtype = P.DType() select = P.Select() if dtype(ax) == mstype.float16: epsilon = eps_fp16 else: epsilon = eps_fp32 def cond(vals): enabled = vals[0] c = vals[5] return logicaland(less(c, 2000), enabled) def body(vals): enabled = vals[0] ans = vals[1] t = vals[2] y = vals[3] z = vals[4] c = vals[5] pkm1 = vals[6] qkm1 = vals[7] pkm2 = vals[8] qkm2 = vals[9] dpkm2_da = vals[10] dqkm2_da = vals[11] dpkm1_da = vals[12] dqkm1_da = vals[13] dans_da = vals[14] c = c + 1 y = y + 1 z = z + 2 yc = y * c pk = pkm1 * z - pkm2 * yc qk = qkm1 * z - qkm2 * yc qk_is_nonzero = notequal(qk, 0) r = pk / qk t = select(qk_is_nonzero, abs_x((ans - r) / r), fill(dtype(t), shape(t), 1)) ans = select(qk_is_nonzero, r, ans) dpk_da = dpkm1_da * z - pkm1 - dpkm2_da * yc + pkm2 * c dqk_da = dqkm1_da * z - qkm1 - dqkm2_da * yc + qkm2 * c dans_da_new = select(qk_is_nonzero, (dpk_da - ans * dqk_da) / qk, dans_da) grad_conditional = select(qk_is_nonzero, abs_x(dans_da_new - dans_da), fill(dtype(dans_da), shape(dans_da), 1)) pkm2 = pkm1 pkm1 = pk qkm2 = qkm1 qkm1 = qk dpkm2_da = dpkm1_da dqkm2_da = dqkm1_da dpkm1_da = dpk_da dqkm1_da = dqk_da rescale = greater(abs_x(pk), 1 / epsilon) pkm2 = select(rescale, pkm2 * epsilon, pkm2) pkm1 = select(rescale, pkm1 * epsilon, pkm1) qkm2 = select(rescale, qkm2 * epsilon, qkm2) qkm1 = select(rescale, qkm1 * epsilon, qkm1) dpkm2_da = select(rescale, dpkm2_da * epsilon, dpkm2_da) dqkm2_da = select(rescale, dqkm2_da * epsilon, dqkm2_da) dpkm1_da = select(rescale, dpkm1_da * epsilon, dpkm1_da) dqkm1_da = select(rescale, dqkm1_da * epsilon, dqkm1_da) conditional = logicaland(enabled, greater(grad_conditional, epsilon)) return (conditional, select(enabled, ans, vals[1]), select(enabled, t, vals[2]), select(enabled, y, vals[3]), select(enabled, z, vals[4]), c, select(enabled, pkm1, vals[6]), select(enabled, qkm1, vals[7]), select(enabled, pkm2, vals[8]), select(enabled, qkm2, vals[9]), select(enabled, dpkm2_da, vals[10]), select(enabled, dqkm2_da, vals[11]), select(enabled, dpkm1_da, vals[12]), select(enabled, dqkm1_da, vals[13]), select(enabled, dans_da_new, vals[14])) y = 1 - a z = x + y + 1 c = fill(dtype(x), shape(x), 0) pkm2 = fill(dtype(x), shape(x), 1) qkm2 = x pkm1 = x + 1 qkm1 = z * x ans = pkm1 / qkm1 t = fill(dtype(x), shape(x), 1) dpkm2_da = fill(dtype(x), shape(x), 0) dqkm2_da = fill(dtype(x), shape(x), 0) dpkm1_da = fill(dtype(x), shape(x), 0) dqkm1_da = -x dans_da = (dpkm1_da - ans * dqkm1_da) / qkm1 vals = (enabled, ans, t, y, z, c, pkm1, qkm1, pkm2, qkm2, dpkm2_da, dqkm2_da, dpkm1_da, dqkm1_da, dans_da) vals = _while_helper_func(cond, body, vals) ans = vals[1] return ans * ax
def __init__(self, config, batch_size, num_bboxes, add_gt_as_proposals): super(BboxAssignSampleForRcnn, self).__init__() cfg = config self.dtype = np.float32 self.ms_type = mstype.float32 self.batch_size = batch_size self.neg_iou_thr = cfg.neg_iou_thr_stage2 self.pos_iou_thr = cfg.pos_iou_thr_stage2 self.min_pos_iou = cfg.min_pos_iou_stage2 self.num_gts = cfg.num_gts self.num_bboxes = num_bboxes self.num_expected_pos = cfg.num_expected_pos_stage2 self.num_expected_neg = cfg.num_expected_neg_stage2 self.num_expected_total = cfg.num_expected_total_stage2 self.add_gt_as_proposals = add_gt_as_proposals self.label_inds = Tensor( np.arange(1, self.num_gts + 1).astype(np.int32)) self.add_gt_as_proposals_valid = Tensor( np.full(self.num_gts, self.add_gt_as_proposals, dtype=np.int32)) self.concat = P.Concat(axis=0) self.max_gt = P.ArgMaxWithValue(axis=0) self.max_anchor = P.ArgMaxWithValue(axis=1) self.sum_inds = P.ReduceSum() self.iou = P.IOU() self.greaterequal = P.GreaterEqual() self.greater = P.Greater() self.select = P.Select() self.gatherND = P.GatherNd() self.squeeze = P.Squeeze() self.cast = P.Cast() self.logicaland = P.LogicalAnd() self.less = P.Less() self.random_choice_with_mask_pos = P.RandomChoiceWithMask( self.num_expected_pos) self.random_choice_with_mask_neg = P.RandomChoiceWithMask( self.num_expected_neg) self.reshape = P.Reshape() self.equal = P.Equal() self.bounding_box_encode = P.BoundingBoxEncode(means=(0.0, 0.0, 0.0, 0.0), stds=(0.1, 0.1, 0.2, 0.2)) self.concat_axis1 = P.Concat(axis=1) self.logicalnot = P.LogicalNot() self.tile = P.Tile() # Check self.check_gt_one = Tensor( np.full((self.num_gts, 4), -1, dtype=self.dtype)) self.check_anchor_two = Tensor( np.full((self.num_bboxes, 4), -2, dtype=self.dtype)) # Init tensor self.assigned_gt_inds = Tensor(np.full(num_bboxes, -1, dtype=np.int32)) self.assigned_gt_zeros = Tensor( np.array(np.zeros(num_bboxes), dtype=np.int32)) self.assigned_gt_ones = Tensor( np.array(np.ones(num_bboxes), dtype=np.int32)) self.assigned_gt_ignores = Tensor( np.full(num_bboxes, -1, dtype=np.int32)) self.assigned_pos_ones = Tensor( np.array(np.ones(self.num_expected_pos), dtype=np.int32)) self.gt_ignores = Tensor(np.full(self.num_gts, -1, dtype=np.int32)) self.range_pos_size = Tensor( np.arange(self.num_expected_pos).astype(self.dtype)) self.check_neg_mask = Tensor( np.array(np.ones(self.num_expected_neg - self.num_expected_pos), dtype=np.bool)) self.bboxs_neg_mask = Tensor( np.zeros((self.num_expected_neg, 4), dtype=self.dtype)) self.labels_neg_mask = Tensor( np.array(np.zeros(self.num_expected_neg), dtype=np.uint8)) self.reshape_shape_pos = (self.num_expected_pos, 1) self.reshape_shape_neg = (self.num_expected_neg, 1) self.scalar_zero = Tensor(0.0, dtype=self.ms_type) self.scalar_neg_iou_thr = Tensor(self.neg_iou_thr, dtype=self.ms_type) self.scalar_pos_iou_thr = Tensor(self.pos_iou_thr, dtype=self.ms_type) self.scalar_min_pos_iou = Tensor(self.min_pos_iou, dtype=self.ms_type)
# shape of x and y not match ('LessEqual2', { 'block': (P.LessEqual(), {'exception': ValueError, 'error_keywords': ['LessEqual']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32)), Tensor(np.ones([3, 2]).astype(np.float32))], 'skip': ['backward']}), # input x is not Tensor(bool) ('LogicalNot1', { 'block': (P.LogicalNot(), {'exception': TypeError, 'error_keywords': ['LogicalNot']}), 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.int32))], 'skip': ['backward']}), # type of x and y not match ('LogicalAnd1', { 'block': (P.LogicalAnd(), {'exception': TypeError, 'error_keywords': ['LogicalAnd']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.bool_))], 'skip': ['backward']}), # shape of x and y not match ('LogicalAnd2', { 'block': (P.LogicalAnd(), {'exception': ValueError, 'error_keywords': ['LogicalAnd']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.bool_)), Tensor(np.ones([3, 2]).astype(np.bool_))], 'skip': ['backward']}), # type of x and y not match ('LogicalOr1', { 'block': (P.LogicalOr(), {'exception': TypeError, 'error_keywords': ['LogicalOr']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.bool_))], 'skip': ['backward']}), # shape of x and y not match ('LogicalOr2', {
def __init__(self, strategy1, strategy2): super().__init__() self.matmul = P.MatMul().shard(strategy1) self.equal = P.Equal().shard(strategy2) self.notequal = P.NotEqual().shard(strategy2) self.logical = P.LogicalAnd().shard(strategy2)