Ejemplo n.º 1
0
def probs_to_logits(probs, is_binary=False):
    """
    converts probabilities into logits.
        Args:
        probs (Tensor)
        is_binary (bool)
    """
    ps_clamped = clamp_probs(probs)
    if is_binary:
        return P.Log()(ps_clamped) - P.Log()(1-ps_clamped)
    return P.Log()(ps_clamped)
Ejemplo n.º 2
0
    def __init__(self,
                 probs=None,
                 seed=0,
                 dtype=mstype.int32,
                 name="Bernoulli"):
        """
        Constructor of Bernoulli distribution.
        """
        param = dict(locals())
        valid_dtype = mstype.int_type + mstype.uint_type
        check_type(dtype, valid_dtype, "Bernoulli")
        super(Bernoulli, self).__init__(seed, dtype, name, param)
        self.parameter_type = mstype.float32
        if probs is not None:
            self._probs = cast_to_tensor(probs, mstype.float32)
            check_prob(self.probs)
        else:
            self._probs = probs

        # ops needed for the class
        self.squeeze = P.Squeeze(0)
        self.cast = P.Cast()
        self.const = P.ScalarToArray()
        self.dtypeop = P.DType()
        self.erf = P.Erf()
        self.exp = P.Exp()
        self.floor = P.Floor()
        self.fill = P.Fill()
        self.log = P.Log()
        self.less = P.Less()
        self.shape = P.Shape()
        self.select = P.Select()
        self.sq = P.Square()
        self.sqrt = P.Sqrt()
        self.uniform = C.uniform
Ejemplo n.º 3
0
    def __init__(self,
                 mean=None,
                 sd=None,
                 seed=0,
                 dtype=mstype.float32,
                 name="Normal"):
        """
        Constructor of normal distribution.
        """
        param = dict(locals())
        super(Normal, self).__init__(dtype, name, param)
        if mean is not None and sd is not None:
            self._mean_value = convert_to_batch(mean, self._broadcast_shape,
                                                dtype)
            self._sd_value = convert_to_batch(sd, self._broadcast_shape, dtype)
            check_greater_equal_zero(self._sd_value, "Standard deviation")
        else:
            self._mean_value = mean
            self._sd_value = sd

        #ops needed for the class
        self.exp = P.Exp()
        self.add = P.TensorAdd()
        self.mul = P.Mul()
        self.sq = P.Square()
        self.log = P.Log()
        self.sqrt = P.Sqrt()
        self.realdiv = P.RealDiv()
        self.expm1 = P.Expm1() if get_context(
            'device_target') == 'Ascend' else self._expm1_by_step
        self.normal = P.Normal(seed=seed)
        self.shape = P.Shape()
        self.zeroslike = P.ZerosLike()
        self.const = P.ScalarToArray()
Ejemplo n.º 4
0
 def __init__(self, num_classes, num_boxes, neg_pre_positive, batch_size):
     super(MultiBoxLoss, self).__init__()
     self.num_classes = num_classes
     self.num_boxes = num_boxes
     self.neg_pre_positive = neg_pre_positive
     self.notequal = P.NotEqual()
     self.less = P.Less()
     self.tile = P.Tile()
     self.reduce_sum = P.ReduceSum()
     self.reduce_mean = P.ReduceMean()
     self.expand_dims = P.ExpandDims()
     self.smooth_l1_loss = P.SmoothL1Loss()
     self.cross_entropy = SoftmaxCrossEntropyWithLogits()
     self.maximum = P.Maximum()
     self.minimum = P.Minimum()
     self.sort_descend = P.TopK(True)
     self.sort = P.TopK(True)
     self.gather = P.GatherNd()
     self.max = P.ReduceMax()
     self.log = P.Log()
     self.exp = P.Exp()
     self.concat = P.Concat(axis=1)
     self.reduce_sum2 = P.ReduceSum(keep_dims=True)
     self.idx = Tensor(
         np.reshape(np.arange(batch_size * num_boxes), (-1, 1)), ms.int32)
Ejemplo n.º 5
0
    def __init__(self,
                 mean=None,
                 sd=None,
                 seed=0,
                 dtype=mstype.float32,
                 name="Normal"):
        """
        Constructor of normal distribution.
        """
        param = dict(locals())
        super(Normal, self).__init__(dtype, name, param)
        if  mean is not None and sd is not None:
            self._mean_value = convert_to_batch(mean, self._broadcast_shape, dtype)
            self._sd_value = convert_to_batch(sd, self._broadcast_shape, dtype)
            check_greater_equal_zero(self._sd_value, "Standard deviation")
        else:
            self._mean_value = mean
            self._sd_value = sd
        self.seed = seed

        #ops needed for the class
        self.const = P.ScalarToArray()
        self.erf = P.Erf()
        self.exp = P.Exp()
        self.expm1 = self._expm1_by_step
        self.fill = P.Fill()
        self.log = P.Log()
        self.shape = P.Shape()
        self.sq = P.Square()
        self.sqrt = P.Sqrt()
        self.zeroslike = P.ZerosLike()
Ejemplo n.º 6
0
    def __init__(self,
                 probs=None,
                 seed=0,
                 dtype=mstype.int32,
                 name="Geometric"):
        """
        Constructor of Geometric distribution.
        """
        param = dict(locals())
        super(Geometric, self).__init__(dtype, name, param)
        if probs is not None:
            self._probs = cast_to_tensor(probs, dtype=mstype.float32)
            check_prob(self._probs)
        else:
            self._probs = probs

        self.minval = np.finfo(np.float).tiny

        # ops needed for the class
        self.const = P.ScalarToArray()
        self.dtypeop = P.DType()
        self.fill = P.Fill()
        self.floor = P.Floor()
        self.issubclass = P.IsSubClass()
        self.less = P.Less()
        self.log = P.Log()
        self.pow = P.Pow()
        self.select = P.Select()
        self.shape = P.Shape()
        self.sq = P.Square()
        self.sqrt = P.Sqrt()
        self.uniform = P.UniformReal(seed=seed)
Ejemplo n.º 7
0
 def __init__(self, num_sampled, num_classes, num_true=1,
              sampled_values=None, remove_accidental_hits=True, seed=0,
              reduction='none'):
     super(SampledSoftmaxLoss, self).__init__()
     self.num_sampled = num_sampled
     self.num_classes = num_classes
     self.num_true = num_true
     self.sampled_values = sampled_values
     self.remove_accidental_hits = remove_accidental_hits
     self.seed = seed
     self.sampler = P.UniformSampler(
         num_true,
         num_sampled,
         True,
         num_classes,
         seed,
         remove_accidental_hits)
     self.cast = P.Cast()
     self.reshape = P.Reshape()
     self.shape = P.Shape()
     self.exp = P.Exp()
     self.log = P.Log()
     self.slice_op = P.Slice()
     self.matmul = P.MatMul(False, True)
     self.gather_v2 = P.GatherV2()
     self.reduce_max_true = P.ReduceMax(True)
     self.reduce_sum = P.ReduceSum()
     self.reduce_sum_true = P.ReduceSum(True)
     self.concat_dim0 = P.Concat(0)
     self.concat_dim1 = P.Concat(1)
     self.ones_like = P.OnesLike()
     self.zeros_like = P.ZerosLike()
     self.mul = P.Mul()
     self.expand_dims = P.ExpandDims()
Ejemplo n.º 8
0
    def __init__(self,
                 rate=None,
                 seed=0,
                 dtype=mstype.float32,
                 name="Exponential"):
        """
        Constructor of Exponential distribution.
        """
        param = dict(locals())
        valid_dtype = mstype.float_type
        check_type(dtype, valid_dtype, "Exponential")
        super(Exponential, self).__init__(seed, dtype, name, param)
        if rate is not None:
            self._rate = cast_to_tensor(rate, dtype)
            check_greater_zero(self._rate, "rate")
        else:
            self._rate = rate

        self.minval = np.finfo(np.float).tiny

        # ops needed for the class
        self.cast = P.Cast()
        self.const = P.ScalarToArray()
        self.dtypeop = P.DType()
        self.exp = P.Exp()
        self.fill = P.Fill()
        self.less = P.Less()
        self.log = P.Log()
        self.select = P.Select()
        self.shape = P.Shape()
        self.sqrt = P.Sqrt()
        self.sq = P.Square()
        self.uniform = C.uniform
Ejemplo n.º 9
0
 def __init__(self, sparse=False, stra_list=None):
     super(SoftmaxCrossEntropyExpand, self).__init__()
     if stra_list is None:
         stra_list = []
     if len(stra_list) < 11:
         stra_list = [None] * 11
     self.exp = P.Exp()
     self.reduce_sum = P.ReduceSum(keep_dims=True).set_strategy(
         strategy=stra_list[1])
     self.onehot = P.OneHot().set_strategy(strategy=stra_list[2])
     self.on_value = Tensor(1.0, mstype.float32)
     self.off_value = Tensor(0.0, mstype.float32)
     self.div = P.Div().set_strategy(strategy=stra_list[3])
     self.log = P.Log().set_strategy(strategy=stra_list[4])
     self.sum_cross_entropy = P.ReduceSum(keep_dims=False).set_strategy(
         strategy=stra_list[5])
     self.mul = P.Mul().set_strategy(strategy=stra_list[6])
     self.mul2 = P.Mul().set_strategy(strategy=stra_list[7])
     self.cast = P.Cast()
     self.reduce_mean = P.ReduceMean(keep_dims=False).set_strategy(
         strategy=stra_list[8])
     self.sparse = sparse
     self.reduce_max = P.ReduceMax(keep_dims=True).set_strategy(
         strategy=stra_list[9])
     self.sub = P.Sub().set_strategy(strategy=stra_list[10])
Ejemplo n.º 10
0
    def __init__(self, tag_to_index, batch_size=1, seq_length=128, is_training=True):

        super(CRF, self).__init__()
        self.target_size = len(tag_to_index)
        self.is_training = is_training
        self.tag_to_index = tag_to_index
        self.batch_size = batch_size
        self.seq_length = seq_length
        self.START_TAG = "<START>"
        self.STOP_TAG = "<STOP>"
        self.START_VALUE = Tensor(self.target_size-2, dtype=mstype.int32)
        self.STOP_VALUE = Tensor(self.target_size-1, dtype=mstype.int32)
        transitions = np.random.normal(size=(self.target_size, self.target_size)).astype(np.float32)
        transitions[tag_to_index[self.START_TAG], :] = -10000
        transitions[:, tag_to_index[self.STOP_TAG]] = -10000
        self.transitions = Parameter(Tensor(transitions), name="transition_matrix")
        self.cat = P.Concat(axis=-1)
        self.argmax = P.ArgMaxWithValue(axis=-1)
        self.log = P.Log()
        self.exp = P.Exp()
        self.sum = P.ReduceSum()
        self.tile = P.Tile()
        self.reduce_sum = P.ReduceSum(keep_dims=True)
        self.reshape = P.Reshape()
        self.expand = P.ExpandDims()
        self.mean = P.ReduceMean()
        init_alphas = np.ones(shape=(self.batch_size, self.target_size)) * -10000.0
        init_alphas[:, self.tag_to_index[self.START_TAG]] = 0.
        self.init_alphas = Tensor(init_alphas, dtype=mstype.float32)
        self.cast = P.Cast()
        self.reduce_max = P.ReduceMax(keep_dims=True)
        self.on_value = Tensor(1.0, dtype=mstype.float32)
        self.off_value = Tensor(0.0, dtype=mstype.float32)
        self.onehot = P.OneHot()
Ejemplo n.º 11
0
    def __init__(self,
                 config,
                 roi_layer,
                 out_channels,
                 featmap_strides,
                 batch_size=1,
                 finest_scale=56,
                 mask=False):
        super(SingleRoIExtractor, self).__init__()
        cfg = config
        self.train_batch_size = batch_size
        self.out_channels = out_channels
        self.featmap_strides = featmap_strides
        self.num_levels = len(self.featmap_strides)
        self.out_size = roi_layer['mask_out_size'] if mask else roi_layer['out_size']
        self.mask = mask
        self.sample_num = roi_layer['sample_num']
        self.roi_layers = self.build_roi_layers(self.featmap_strides)
        self.roi_layers = L.CellList(self.roi_layers)

        self.sqrt = P.Sqrt()
        self.log = P.Log()
        self.finest_scale_ = finest_scale
        self.clamp = C.clip_by_value

        self.cast = P.Cast()
        self.equal = P.Equal()
        self.select = P.Select()

        _mode_16 = False
        self.dtype = np.float16 if _mode_16 else np.float32
        self.ms_dtype = mstype.float16 if _mode_16 else mstype.float32
        self.set_train_local(cfg, training=True)
Ejemplo n.º 12
0
def log_generic(input_x):
    """
    Log op on Ascend is calculated as log(abs(x)).
    Fix this with putting negative values as nan.
    And log op on Ascend doesn't supprot int types.
    Fix this with casting the type.
    """
    log = P.Log()
    less = P.Less()
    lessequal = P.LessEqual()
    fill = P.Fill()
    cast = P.Cast()
    dtype = P.DType()
    shape = P.Shape()
    select = P.Select()
    checktype = P.IsSubClass()

    if not checktype(dtype(input_x), mstype.float_):
        input_x = cast(input_x, mstype.float32)
    nan = fill(dtype(input_x), shape(input_x), np.nan)
    inf = fill(dtype(input_x), shape(input_x), np.inf)
    neg_x = less(input_x, 0.0)
    nonpos_x = lessequal(input_x, 0.0)
    log_x = log(input_x)
    result = select(nonpos_x, -inf, log_x)
    return select(neg_x, nan, result)
Ejemplo n.º 13
0
    def __init__(self, config):
        super(CrossEntropyLoss, self).__init__()
        self.mean = P.ReduceMean()
        self.sum = P.ReduceSum().shard(((config.dp, config.mp),))
        self.onehot = P.OneHot().shard(((config.dp, config.mp), (), ()))
        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
        self.vocab_size = config.vocab_size
        self.max = P.ArgMaxWithValue(axis=-1, keep_dims=True).shard(
            ((config.dp, config.mp),))
        self.eps_const = Tensor(1e-24, mstype.float32)
        self.sub = P.Sub().shard(((config.dp, config.mp), (config.dp, 1)))
        self.exp = P.Exp().shard(((config.dp, config.mp),))
        self.div = P.RealDiv().shard(((config.dp, config.mp), (config.dp, 1)))
        self.log = P.Log().shard(((config.dp, config.mp),))
        self.add = P.TensorAdd().shard(((config.dp, config.mp), ()))
        self.mul = P.Mul().shard(
            ((config.dp, config.mp), (config.dp, config.mp)))
        self.neg = P.Neg().shard(((config.dp, config.mp),))
        self.sum2 = P.ReduceSum().shard(((1,),))

        self.mul2 = P.Mul().shard(((1,), (1,)))
        self.add2 = P.TensorAdd()
        self.div2 = P.RealDiv()
        if config.stage_num > 1:
            self.div2.add_prim_attr("end", 0)
Ejemplo n.º 14
0
    def __init__(self, params, learning_rate, momentum, matrix_A, matrix_G, A_inv_max, G_inv_max, weight_decay=0.0,
                 loss_scale=1.0,
                 decay_filter=lambda x: x.name not in []):
        super(THOR, self).__init__(learning_rate, params, weight_decay, loss_scale)
        if isinstance(momentum, float) and momentum < 0.0:
            raise ValueError("momentum should be at least 0.0, but got momentum {}".format(momentum))
        self.momentum = Parameter(Tensor(momentum, mstype.float32))
        self.params = self.parameters
        self.moments = self.params.clone(prefix="moments", init='zeros')
        self.hyper_map = C.HyperMap()
        self.opt = P.ApplyMomentum()
        self.matrix_A = ParameterTuple(matrix_A)
        self.matrix_G = ParameterTuple(matrix_G)
        self.A_inv_max = ParameterTuple(A_inv_max)
        self.G_inv_max = ParameterTuple(G_inv_max)
        self.cube_matmul_left = P.CusMatMulCubeFraczLeftCast()
        self.cube_matmul_left_fc = P.CusMatMulCubeDenseLeft()
        self.cube_matmul_right_fc = P.CusMatMulCubeDenseRight()
        self.cube_matmul_right_mul = P.CusMatMulCubeFraczRightMul()
        self.transpose = P.Transpose()
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.mul = P.Mul()
        self.weight_idx = []
        for i in range(len(self.params)):
            if "conv" in self.params[i].name or "end_point" in self.params[i].name:
                self.weight_idx.append(i)
        self.weight_idx.append(len(self.params))
        self.feature_map = [1.0 / 12544, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136,
                            1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136,
                            1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784,
                            1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784,
                            1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196,
                            1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196,
                            1.0 / 196, 1.0 / 196, 1.0 / 196,
                            1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49,
                            1.0]
        mean = _get_gradients_mean()
        degree = _get_device_num()
        parameter_length = len(self.feature_map)
        self.grad_reducer_Amax = DistributedGradReducerThor(parameter_length, ((27,), 2), mean, degree)
        self.grad_reducer_Gmax = DistributedGradReducerThor(parameter_length, ((27,), 4), mean, degree)
        self.grad_reducer_A = DistributedGradReducerThor(parameter_length, ((27,), 6), mean, degree)
        self.grad_reducer_G = DistributedGradReducerThor(parameter_length, ((27,), 8), mean, degree)
        self.matrix_A_inv = ()
        self.matrix_G_inv = ()
        self.matrix_max_inv = ()

        for i in range(54):
            self.matrix_max_inv = self.matrix_max_inv + (
                Parameter(initializer(1, [1], mstype.float32), name="matrix_max" + str(i), requires_grad=False),)
        self.log = P.Log()
        self.exp = P.Exp()
        self.sqrt = P.Sqrt()
        self.matrix_max_inv = ParameterTuple(self.matrix_max_inv)
        self.assign = P.Assign()
        self.cast = P.Cast()
        self.thor = True
        self.weight_decay = weight_decay * loss_scale
        self.decay_flags = tuple(decay_filter(x) for x in self.parameters)
Ejemplo n.º 15
0
    def __init__(self,
                 probs=None,
                 seed=0,
                 dtype=mstype.int32,
                 name="Bernoulli"):
        """
        Constructor of Bernoulli distribution.
        """
        param = dict(locals())
        super(Bernoulli, self).__init__(dtype, name, param)
        if probs is not None:
            self._probs = cast_to_tensor(probs, dtype=mstype.float32)
            check_prob(self.probs)
        else:
            self._probs = probs
        self.seed = seed

        # ops needed for the class
        self.cast = P.Cast()
        self.const = P.ScalarToArray()
        self.dtypeop = P.DType()
        self.erf = P.Erf()
        self.fill = P.Fill()
        self.log = P.Log()
        self.less = P.Less()
        self.shape = P.Shape()
        self.select = P.Select()
        self.sq = P.Square()
        self.sqrt = P.Sqrt()
        self.uniform = P.UniformReal(seed=seed)
Ejemplo n.º 16
0
    def __init__(self):
        super(IGamma, self).__init__()
        # const numbers
        # If more data types are supported, this float max value need to be selected.
        self.log_maxfloat32 = Tensor(np.log(np.finfo(np.float32).max),
                                     mstype.float32)

        # operations
        self.logicaland = P.LogicalAnd()
        self.logicalor = P.LogicalOr()
        self.logicalnot = P.LogicalNot()
        self.equal = P.Equal()
        self.greater = P.Greater()
        self.less = P.Less()
        self.neg = P.Neg()
        self.log = P.Log()
        self.exp = P.Exp()
        self.select = P.Select()
        self.zeroslike = P.ZerosLike()
        self.fill = P.Fill()
        self.shape = P.Shape()
        self.dtype = P.DType()
        self.lgamma = LGamma()
        self.const = P.ScalarToArray()
        self.cast = P.Cast()
Ejemplo n.º 17
0
 def __init__(self):
     super(LogSigmoid, self).__init__()
     self.mul = P.Mul()
     self.exp = P.Exp()
     self.add = P.TensorAdd()
     self.rec = P.Reciprocal()
     self.log = P.Log()
Ejemplo n.º 18
0
    def __init__(self,
                 probs=None,
                 seed=0,
                 dtype=mstype.int32,
                 name="Bernoulli"):
        """
        Constructor of Bernoulli distribution.
        """
        param = dict(locals())
        super(Bernoulli, self).__init__(dtype, name, param)
        if probs is not None:
            self._probs = cast_to_tensor(probs)
            check_prob(self._probs)
        else:
            self._probs = probs

        # ops needed for the class
        self.log = P.Log()
        self.add = P.TensorAdd()
        self.mul = P.Mul()
        self.sqrt = P.Sqrt()
        self.realdiv = P.RealDiv()
        self.shape = P.Shape()
        self.const = P.ScalarToArray()
        self.less = P.Less()
        self.cast = P.Cast()
        self.normal = P.Normal(seed=seed)
        self.erf = P.Erf()
        self.sqrt = P.Sqrt()
Ejemplo n.º 19
0
 def __init__(self):
     super(BoundingBoxEncode, self).__init__()
     self.split = P.Split(axis=1, output_num=4)
     self.ones = 1.0
     self.half = 0.5
     self.log = P.Log()
     self.concat = P.Concat(axis=1)
Ejemplo n.º 20
0
    def __init__(self,
                 low=None,
                 high=None,
                 seed=0,
                 dtype=mstype.float32,
                 name="Uniform"):
        """
        Constructor of Uniform distribution.
        """
        param = dict(locals())
        super(Uniform, self).__init__(dtype, name, param)
        if low is not None and high is not None:
            self._low = convert_to_batch(low, self._broadcast_shape, dtype)
            self._high = convert_to_batch(high, self._broadcast_shape, dtype)
            check_greater(self.low, self.high, "low value", "high value")
        else:
            self._low = low
            self._high = high

    # ops needed for the class
        self.const = P.ScalarToArray()
        self.dtypeop = P.DType()
        self.exp = P.Exp()
        self.fill = P.Fill()
        self.less = P.Less()
        self.lessequal = P.LessEqual()
        self.log = P.Log()
        self.logicaland = P.LogicalAnd()
        self.select = P.Select()
        self.shape = P.Shape()
        self.sq = P.Square()
        self.sqrt = P.Sqrt()
        self.uniform = P.UniformReal(seed=seed)
        self.zeroslike = P.ZerosLike()
Ejemplo n.º 21
0
 def __init__(self, axis, keep_dims=False):
     super(ReduceLogSumExp, self).__init__()
     validator.check_value_type('axis', axis, [int, list, tuple], self.cls_name)
     validator.check_value_type('keep_dims', keep_dims, [bool], self.cls_name)
     self.axis = axis
     self.exp = P.Exp()
     self.sum = P.ReduceSum(keep_dims)
     self.log = P.Log()
Ejemplo n.º 22
0
    def construct(self, prediction, pred_xy, pred_wh, y_true, gt_box, input_shape):
        """
        prediction : origin output from yolo
        pred_xy: (sigmoid(xy)+grid)/grid_size
        pred_wh: (exp(wh)*anchors)/input_shape
        y_true : after normalize
        gt_box: [batch, maxboxes, xyhw] after normalize
        """
        object_mask = y_true[:, :, :, :, 4:5]
        class_probs = y_true[:, :, :, :, 5:]
        true_boxes = y_true[:, :, :, :, :4]

        grid_shape = P.Shape()(prediction)[1:3]
        grid_shape = P.Cast()(F.tuple_to_array(grid_shape[::-1]), ms.float32)

        pred_boxes = self.concat((pred_xy, pred_wh))
        true_wh = y_true[:, :, :, :, 2:4]
        true_wh = P.Select()(P.Equal()(true_wh, 0.0),
                             P.Fill()(P.DType()(true_wh),
                                      P.Shape()(true_wh), 1.0),
                             true_wh)
        true_wh = P.Log()(true_wh / self.anchors * input_shape)
        # 2-w*h for large picture, use small scale, since small obj need more precise
        box_loss_scale = 2 - y_true[:, :, :, :, 2:3] * y_true[:, :, :, :, 3:4]

        gt_shape = P.Shape()(gt_box)
        gt_box = P.Reshape()(gt_box, (gt_shape[0], 1, 1, 1, gt_shape[1], gt_shape[2]))

        # add one more dimension for broadcast
        iou = self.iou(P.ExpandDims()(pred_boxes, -2), gt_box)
        # gt_box is x,y,h,w after normalize
        # [batch, grid[0], grid[1], num_anchor, num_gt]
        best_iou = self.reduce_max(iou, -1)
        # [batch, grid[0], grid[1], num_anchor]

        # ignore_mask IOU too small
        ignore_mask = best_iou < self.ignore_threshold
        ignore_mask = P.Cast()(ignore_mask, ms.float32)
        ignore_mask = P.ExpandDims()(ignore_mask, -1)
        # ignore_mask backpro will cause a lot maximunGrad and minimumGrad time consume.
        # so we turn off its gradient
        ignore_mask = F.stop_gradient(ignore_mask)

        confidence_loss = self.confidence_loss(object_mask, prediction[:, :, :, :, 4:5], ignore_mask)
        class_loss = self.class_loss(object_mask, prediction[:, :, :, :, 5:], class_probs)

        object_mask_me = P.Reshape()(object_mask, (-1, 1))  # [8, 72, 72, 3, 1]
        box_loss_scale_me = P.Reshape()(box_loss_scale, (-1, 1))
        pred_boxes_me = xywh2x1y1x2y2(pred_boxes)
        pred_boxes_me = P.Reshape()(pred_boxes_me, (-1, 4))
        true_boxes_me = xywh2x1y1x2y2(true_boxes)
        true_boxes_me = P.Reshape()(true_boxes_me, (-1, 4))
        ciou = self.giou(pred_boxes_me, true_boxes_me)
        ciou_loss = object_mask_me * box_loss_scale_me * (1 - ciou)
        ciou_loss_me = self.reduce_sum(ciou_loss, ())
        loss = ciou_loss_me * 10 + confidence_loss + class_loss
        batch_size = P.Shape()(prediction)[0]
        return loss / batch_size
Ejemplo n.º 23
0
    def __init__(self,
                 num_sampled,
                 num_classes,
                 num_true=1,
                 sampled_values=None,
                 remove_accidental_hits=True,
                 seed=0,
                 reduction='none'):
        super(SampledSoftmaxLoss, self).__init__(reduction)

        if num_true < 1:
            raise ValueError(f"num_true {num_true} is less than 1.")
        if seed < 0:
            raise ValueError(f"seed {seed} is less than 0.")
        if num_sampled > num_classes:
            raise ValueError(
                f"num_sampled {num_sampled} is great than num_classes {num_classes}."
            )
        if num_true > num_classes:
            raise ValueError(
                f"num_true {num_true} is great than num_classes {num_classes}."
            )
        if sampled_values is not None:
            if not isinstance(sampled_values, (list, tuple)):
                raise TypeError(
                    f"sampled_values {sampled_values} is not a list.")
            if len(sampled_values) != 3:
                raise ValueError(
                    f"sampled_values size {len(sampled_values)} is not 3.")

        self.num_sampled = num_sampled
        self.num_classes = num_classes
        self.num_true = num_true
        self.sampled_values = sampled_values
        self.remove_accidental_hits = remove_accidental_hits
        self.seed = seed
        self.sampler = P.LogUniformCandidateSampler(num_true, num_sampled,
                                                    True, num_classes, seed)
        self.cast = P.Cast()
        self.reshape = P.Reshape()
        self.shape = P.Shape()
        self.exp = P.Exp()
        self.log = P.Log()
        self.slice_op = P.Slice()
        self.matmul = P.MatMul(False, True)
        self.gather_v2 = P.Gather()
        self.reduce_max_true = P.ReduceMax(True)
        self.reduce_sum = P.ReduceSum()
        self.reduce_sum_true = P.ReduceSum(True)
        self.concat_dim0 = P.Concat(0)
        self.concat_dim1 = P.Concat(1)
        self.ones_like = P.OnesLike()
        self.zeros_like = P.ZerosLike()
        self.mul = P.Mul()
        self.expand_dims = P.ExpandDims()
        self.dtype = P.DType()
        self.compute_accidental_hits = P.ComputeAccidentalHits(num_true)
        self.scatter_nd = P.ScatterNd()
Ejemplo n.º 24
0
 def __init__(self, power=0, name='PowerTransform', param=None):
     param = dict(locals()) if param is None else param
     super(PowerTransform, self).__init__(name=name, param=param)
     validator.check_value_type('power', power, [int, float], self.name)
     self._power = power
     self.pow = P.Pow()
     self.exp = P.Exp()
     self.log = P.Log()
     self.log1p = self._log1p_by_step
     self.expm1 = self._expm1_by_step
Ejemplo n.º 25
0
    def construct(self, img1, img2):
        max_val = _convert_img_dtype_to_float32(self.max_val, self.max_val)
        img1 = _convert_img_dtype_to_float32(img1, self.max_val)
        img2 = _convert_img_dtype_to_float32(img2, self.max_val)

        mse = P.ReduceMean()(F.square(img1 - img2), (-3, -2, -1))
        # 10*log_10(max_val^2/MSE)
        psnr = 10 * P.Log()(F.square(max_val) / mse) / F.scalar_log(10.0)

        return psnr
Ejemplo n.º 26
0
 def __init__(self, weight_angle=10):
     super(LossFunc, self).__init__()
     self.split = P.Split(1, 5)
     self.min = P.Minimum()
     self.log = P.Log()
     self.cos = P.Cos()
     self.mean = P.ReduceMean()
     #self.flatten = P.Flatten()
     self.sum = P.ReduceSum()
     self.weight_angle = weight_angle
     self.max = P.Maximum()
     self.print = P.Print()
Ejemplo n.º 27
0
    def __init__(self):
        super(CrossEntropy, self).__init__()

        self.exp = P.Exp()
        self.sum = P.ReduceSum()
        self.reshape = P.Reshape()
        self.log = P.Log()
        self.cast = P.Cast()
        self.eps_const = Tensor(eps, dtype=mstype.float32)
        self.onehot = P.OneHot()
        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
Ejemplo n.º 28
0
    def construct(self, img1, img2):
        _check_input_4d(F.shape(img1), "img1", self.cls_name)
        _check_input_4d(F.shape(img2), "img2", self.cls_name)
        P.SameTypeShape()(img1, img2)
        max_val = _convert_img_dtype_to_float32(self.max_val, self.max_val)
        img1 = _convert_img_dtype_to_float32(img1, self.max_val)
        img2 = _convert_img_dtype_to_float32(img2, self.max_val)

        mse = P.ReduceMean()(F.square(img1 - img2), (-3, -2, -1))
        psnr = 10 * P.Log()(F.square(max_val) / mse) / F.scalar_log(10.0)

        return psnr
Ejemplo n.º 29
0
 def __init__(self, temp=0.1):
     super(LossNet, self).__init__()
     self.concat = P.Concat()
     self.exp = P.Exp()
     self.t = P.Transpose()
     self.diag_part = P.DiagPart()
     self.matmul = P.MatMul()
     self.sum = P.ReduceSum()
     self.sum_keep_dim = P.ReduceSum(keep_dims=True)
     self.log = P.Log()
     self.mean = P.ReduceMean()
     self.shape = P.Shape()
     self.eye = P.Eye()
     self.temp = temp
Ejemplo n.º 30
0
    def construct(self, grid, prediction, pred_xy, pred_wh, y_true, gt_box):

        object_mask = y_true[:, :, :, :, 4:5]
        class_probs = y_true[:, :, :, :, 5:]

        grid_shape = P.Shape()(prediction)[1:3]
        grid_shape = P.Cast()(F.tuple_to_array(grid_shape[::-1]), ms.float32)

        pred_boxes = self.concat((pred_xy, pred_wh))
        true_xy = y_true[:, :, :, :, :2] * grid_shape - grid
        true_wh = y_true[:, :, :, :, 2:4]
        true_wh = P.Select()(P.Equal()(true_wh,
                                       0.0), P.Fill()(P.DType()(true_wh),
                                                      P.Shape()(true_wh), 1.0),
                             true_wh)
        true_wh = P.Log()(true_wh / self.anchors * self.input_shape)
        box_loss_scale = 2 - y_true[:, :, :, :, 2:3] * y_true[:, :, :, :, 3:4]

        gt_shape = P.Shape()(gt_box)
        gt_box = P.Reshape()(gt_box,
                             (gt_shape[0], 1, 1, 1, gt_shape[1], gt_shape[2]))

        iou = self.iou(P.ExpandDims()(pred_boxes, -2),
                       gt_box)  # [batch, grid[0], grid[1], num_anchor, num_gt]
        best_iou = self.reduce_max(iou,
                                   -1)  # [batch, grid[0], grid[1], num_anchor]
        ignore_mask = best_iou < self.ignore_threshold
        ignore_mask = P.Cast()(ignore_mask, ms.float32)
        ignore_mask = P.ExpandDims()(ignore_mask, -1)
        ignore_mask = F.stop_gradient(ignore_mask)

        xy_loss = object_mask * box_loss_scale * self.cross_entropy(
            prediction[:, :, :, :, :2], true_xy)
        wh_loss = object_mask * box_loss_scale * 0.5 * P.Square()(
            true_wh - prediction[:, :, :, :, 2:4])
        confidence_loss = self.cross_entropy(prediction[:, :, :, :, 4:5],
                                             object_mask)
        confidence_loss = object_mask * confidence_loss + (
            1 - object_mask) * confidence_loss * ignore_mask
        class_loss = object_mask * self.cross_entropy(
            prediction[:, :, :, :, 5:], class_probs)

        # Get smooth loss
        xy_loss = self.reduce_sum(xy_loss, ())
        wh_loss = self.reduce_sum(wh_loss, ())
        confidence_loss = self.reduce_sum(confidence_loss, ())
        class_loss = self.reduce_sum(class_loss, ())

        loss = xy_loss + wh_loss + confidence_loss + class_loss
        return loss / P.Shape()(prediction)[0]