Exemple #1
0
def erf_generic(x):
    select = P.Select()
    less = P.Less()
    abs_cal = P.Abs()

    return select(less(abs_cal(x), 1), erf_f32_generic(x),
                  1 - erfc_f32_generic(x))
Exemple #2
0
    def __init__(self,
                 scale=1.0,
                 shift=0.0,
                 name='ScalarAffine'):
        """
        Constructor of ScalarAffine Bijector.
        """
        param = dict(locals())
        validator.check_value_type(
            'scale', scale, [int, float], type(self).__name__)
        validator.check_value_type(
            'shift', shift, [int, float], type(self).__name__)
        super(ScalarAffine, self).__init__(
            is_constant_jacobian=True,
            is_injective=True,
            name=name,
            dtype=None,
            param=param)

        self._scale = cast_to_tensor(scale)
        self._shift = cast_to_tensor(shift)

        self.abs = P.Abs()
        self.oneslike = P.OnesLike()
        self.dtypeop = P.DType()
        self.cast = P.Cast()
        self.log = log_generic
Exemple #3
0
def erfc_generic(x):
    select = P.Select()
    greater = P.Greater()
    abs_cal = P.Abs()

    return select(greater(abs_cal(x), 1), erfc_f32_generic(x),
                  1 - erf_f32_generic(x))
Exemple #4
0
    def __init__(self, sharpness=1.0, name='Softplus'):
        """
        Constructor of Softplus Bijector.
        """
        param = dict(locals())
        param['param_dict'] = {'sharpness': sharpness}
        super(Softplus, self).__init__(name=name, dtype=None, param=param)
        self._sharpness = self._add_parameter(sharpness, 'sharpness')

        self.exp = exp_generic
        self.log = log_generic
        self.expm1 = P.Expm1()
        self.abs = P.Abs()
        self.dtypeop = P.DType()
        self.cast = P.Cast()
        self.fill = P.Fill()
        self.greater = P.Greater()
        self.less = P.Less()
        self.log_sigmoid = LogSigmoid()
        self.logicalor = P.LogicalOr()
        self.select = P.Select()
        self.shape = P.Shape()
        self.sigmoid = P.Sigmoid()
        self.softplus = self._softplus
        self.inverse_softplus = self._inverse_softplus

        self.threshold = np.log(np.finfo(np.float32).eps) + 1
        self.tiny = np.exp(self.threshold)
Exemple #5
0
def erfc_f32_generic(x):
    """
    Calculate erfc for dtype of f32
    """
    k_maxlog = 88.72283905206835
    k_erfc_pcoefficient = [
        +2.326819970068386e-2, -1.387039388740657e-1, +3.687424674597105e-1,
        -5.824733027278666e-1, +6.210004621745983e-1, -4.944515323274145e-1,
        +3.404879937665872e-1, -2.741127028184656e-1, +5.638259427386472e-1
    ]
    k_erfc_rcoefficient = [
        -1.047766399936249e+1, +1.297719955372516e+1, -7.495518717768503e+0,
        +2.921019019210786e+0, -1.015265279202700e+0, +4.218463358204948e-1,
        -2.820767439740514e-1, +5.641895067754075e-1
    ]
    abs_cal = P.Abs()
    select = P.Select()
    less = P.Less()
    fill = P.Fill()
    dtype = P.DType()
    shape = P.Shape()

    abs_x = abs_cal(x)
    z = exp_generic(-x * x)
    q = 1 / abs_x
    y = q * q
    poly1 = _evaluate_polynomial(y, k_erfc_pcoefficient)
    poly2 = _evaluate_polynomial(y, k_erfc_rcoefficient)
    p = select(less(abs_x, 2.0), poly1, poly2)
    y = z * q * p
    zeros = fill(dtype(x), shape(x), 0)
    y_clamp = select(less(z, -k_maxlog), zeros, y)
    return select(less(x, 0), 2.0 - y_clamp, y_clamp)
Exemple #6
0
    def __init__(self, sharpness=1.0, name='Softplus'):
        """
        Constructor of Softplus Bijector.
        """
        param = dict(locals())
        validator.check_value_type('sharpness', sharpness, [int, float],
                                   type(self).__name__)
        super(Softplus, self).__init__(name=name, param=param)
        self._sharpness = cast_to_tensor(sharpness)

        self.exp = exp_generic
        self.log = log_generic
        self.expm1 = expm1_generic
        self.abs = P.Abs()
        self.dtypeop = P.DType()
        self.fill = P.Fill()
        self.greater = P.Greater()
        self.less = P.Less()
        self.log_sigmoid = LogSigmoid()
        self.logicalor = P.LogicalOr()
        self.select = P.Select()
        self.shape = P.Shape()
        self.sigmoid = P.Sigmoid()
        self.softplus = self._softplus
        self.inverse_softplus = self._inverse_softplus

        self.threshold = np.log(np.finfo(np.float32).eps) + 1
        self.tiny = np.exp(self.threshold)
Exemple #7
0
    def __init__(self):
        super(DiGamma, self).__init__()
        # const numbers
        self.k_lanczos_gamma = 7
        self.k_base_lanczos_coeff = 0.99999999999980993227684700473478
        self.k_lanczos_coefficients = [676.520368121885098567009190444019,
                                       -1259.13921672240287047156078755283,
                                       771.3234287776530788486528258894,
                                       -176.61502916214059906584551354,
                                       12.507343278686904814458936853,
                                       -0.13857109526572011689554707,
                                       9.984369578019570859563e-6,
                                       1.50563273514931155834e-7]
        self.nan = np.nan
        self.pi = np.pi
        self.lanczos_gamma_plus_one_half = self.k_lanczos_gamma + 0.5
        self.log_lanczos_gamma_plus_one_half = np.log(self.lanczos_gamma_plus_one_half)

        # operations
        self.log1p = P.Log1p()
        self.abs = P.Abs()
        self.shape = P.Shape()
        self.dtype = P.DType()
        self.fill = P.Fill()
        self.floor = P.Floor()
        self.equal = P.Equal()
        self.less = P.Less()
        self.select = P.Select()
        self.sin = P.Sin()
        self.cos = P.Cos()
        self.logicaland = P.LogicalAnd()
Exemple #8
0
def _abs_max(gradients):
    """
    Transform gradients to saliency through abs then take max along
    channels.
    """
    gradients = op.Abs()(gradients)
    saliency = op.ReduceMax(keep_dims=True)(gradients, axis=1)
    return saliency
Exemple #9
0
def erfc_f64_generic(x):
    """
    Calculate erfc for dtype of f64
    """
    k_maxlog = 7.09782712893383996843e2
    k_erfc_pcoefficient = [
        2.46196981473530512524e-10, 5.64189564831068821977e-1,
        7.46321056442269912687e0, 4.86371970985681366614e1,
        1.96520832956077098242e2, 5.26445194995477358631e2,
        9.34528527171957607540e2, 1.02755188689515710272e3,
        5.57535335369399327526e2
    ]
    k_erfc_qcoefficient = [
        1.00000000000000000000e0, 1.32281951154744992508e1,
        8.67072140885989742329e1, 3.54937778887819891062e2,
        9.75708501743205489753e2, 1.82390916687909736289e3,
        2.24633760818710981792e3, 1.65666309194161350182e3,
        5.57535340817727675546e2
    ]
    k_erfc_rcoefficient = [
        5.64189583547755073984e-1, 1.27536670759978104416e0,
        5.01905042251180477414e0, 6.16021097993053585195e0,
        7.40974269950448939160e0, 2.97886665372100240670e0
    ]
    k_erfc_scoefficient = [
        1.00000000000000000000e0, 2.26052863220117276590e0,
        9.39603524938001434673e0, 1.20489539808096656605e1,
        1.70814450747565897222e1, 9.60896809063285878198e0,
        3.36907645100081516050e02
    ]
    abs_cal = P.Abs()
    select = P.Select()
    less = P.Less()
    fill = P.Fill()
    dtype = P.DType()
    shape = P.Shape()

    abs_x = abs_cal(x)
    z = -x * x
    exp_z = exp_generic(z)

    temp1 = exp_z * _evaluate_polynomial(
        abs_x, k_erfc_pcoefficient) / _evaluate_polynomial(
            abs_x, k_erfc_qcoefficient)
    temp2 = exp_z * _evaluate_polynomial(
        abs_x, k_erfc_rcoefficient) / _evaluate_polynomial(
            abs_x, k_erfc_scoefficient)
    y = select(less(abs_x, 8.0), temp1, temp2)
    zeros = fill(dtype(x), shape(x), 0)
    y_clamp = select(less(z, k_maxlog), zeros, y)

    poly2 = _evaluate_polynomial(y, k_erfc_rcoefficient)
    p = select(less(abs_x, 2.0), poly1, poly2)
    y = z * q * p
    zeros = fill(dtype(x), shape(x), 0)
    y_clamp = select(less(z, -k_maxlog), zeros, y)
    return select(less(x, 0), 2.0 - y_clamp, y_clamp)
Exemple #10
0
 def __init__(self, scale):
     super(L1Regularizer, self).__init__()
     Validator.check_value_type("scale", scale, [int, float], self.cls_name)
     if scale <= 0:
         raise ValueError("scale should be a number which greater than 0")
     if math.isinf(scale) or math.isnan(scale):
         raise ValueError("scale can not be INF or NAN")
     self.abs = P.Abs()
     self.reduce_sum = P.ReduceSum()
     self.scale = Tensor(scale, dtype=mstype.float32)
Exemple #11
0
    def construct(self, x):
        """
        Construct method.
        """
        output_hm, output_wh, output_off, output_kps = self.centerface_network(x)
        output_hm_nms, _ = self.maxpool2d(output_hm)
        abs_error = P.Abs()(output_hm - output_hm_nms)
        abs_out = P.Abs()(output_hm)
        error = abs_error / (abs_out + 1e-12)

        # cannot use P.Equal()(output_hm, output_hm_nms), since maxpooling output has 0.1% error
        keep = P.Select()(P.LessEqual()(error, 1e-3), \
           P.Fill()(ms.float32, P.Shape()(error), 1.0), \
           P.Fill()(ms.float32, P.Shape()(error), 0.0))
        output_hm = output_hm * keep

        # get topK and index
        scores = self.reshape(output_hm, (self.test_batch, -1))
        topk_scores, topk_inds = self.topk(scores, self.k)
        return topk_scores, output_wh, output_off, output_kps, topk_inds
Exemple #12
0
def abs_max(gradients):
    """
    Transform gradients to saliency through abs then take max along channels.

    Args:
        gradients (_Tensor): Gradients which will be transformed to saliency map.

    Returns:
        _Tensor, saliency map integrated from gradients.
    """
    gradients = op.Abs()(gradients)
    saliency = op.ReduceMax(keep_dims=True)(gradients, axis=1)
    return saliency
Exemple #13
0
    def __init__(self, scale=1.0, shift=0.0, name='ScalarAffine'):
        """
        Constructor of scalar affine bijector.
        """
        param = dict(locals())
        validator.check_value_type('scale', scale, [int, float], name)
        validator.check_value_type('shift', shift, [int, float], name)
        self._scale = cast_to_tensor(scale)
        self._shift = cast_to_tensor(shift)
        super(ScalarAffine, self).__init__(is_constant_jacobian=True,
                                           is_injective=True,
                                           name=name,
                                           dtype=None,
                                           param=param)

        self.abs = P.Abs()
        self.log = log_by_step

        self.checktensor = CheckTensor()
Exemple #14
0
    def __init__(self,
                 num_bits=2,
                 compute_type=mstype.float32,
                 clip_value=1.0,
                 per_channel=False):
        self.num_bits = num_bits
        self.compute_type = compute_type
        self.clip_value = clip_value
        self.per_channel = per_channel

        self.clamp = C.clip_by_value
        self.abs = P.Abs()
        self.sum = P.ReduceSum()
        self.nelement = F.size
        self.div = P.Div()
        self.cast = P.Cast()
        self.max = P.ReduceMax()
        self.min = P.ReduceMin()
        self.floor = P.Floor()
Exemple #15
0
    def __init__(self,
                 num_bits=8,
                 compute_type=mstype.float32,
                 clip_value=1.0,
                 per_channel=False):
        super(QuantizeWeightCell, self).__init__()
        self.num_bits = num_bits
        self.compute_type = compute_type
        self.clip_value = clip_value
        self.per_channel = per_channel

        self.clamp = C.clip_by_value
        self.abs = P.Abs()
        self.sum = P.ReduceSum()
        self.nelement = F.size
        self.div = P.Div()
        self.cast = P.Cast()
        self.max = P.ReduceMax()
        self.min = P.ReduceMin()
        self.round = P.Round()
Exemple #16
0
    def __init__(self, scale=1.0, shift=0.0, name='ScalarAffine'):
        """
        Constructor of ScalarAffine Bijector.
        """
        param = dict(locals())
        param['param_dict'] = {'scale': scale, 'shift': shift}
        super(ScalarAffine, self).__init__(is_constant_jacobian=True,
                                           is_injective=True,
                                           name=name,
                                           dtype=None,
                                           param=param)

        self._scale = self._add_parameter(scale, 'scale')
        self._shift = self._add_parameter(shift, 'shift')

        self.abs = P.Abs()
        self.oneslike = P.OnesLike()
        self.dtypeop = P.DType()
        self.cast = P.Cast()
        self.log = log_generic
Exemple #17
0
 def __init__(self,reduction='mean'):
     super().__init__()
     self.abs = P.Abs()
     self.squeeze = P.Squeeze(-1)
Exemple #18
0
def tensor_abs(inputs):
    """Apply abs function."""
    return P.Abs()(inputs)
Exemple #19
0
def _IgammacContinuedFraction(ax, x, a, enabled):
    """Helper function for computing Igammac using a continued fraction."""

    abs_x = P.Abs()
    logicaland = P.LogicalAnd()
    greater = P.Greater()
    less = P.Less()
    notequal = P.NotEqual()
    fill = P.Fill()
    shape = P.Shape()
    dtype = P.DType()
    select = P.Select()

    if dtype(ax) == mstype.float16:
        epsilon = eps_fp16
    else:
        epsilon = eps_fp32

    def cond(vals):
        enabled = vals[0]
        c = vals[5]
        return logicaland(less(c, 2000), enabled)

    def body(vals):
        enabled = vals[0]
        ans = vals[1]
        t = vals[2]
        y = vals[3]
        z = vals[4]
        c = vals[5]
        pkm1 = vals[6]
        qkm1 = vals[7]
        pkm2 = vals[8]
        qkm2 = vals[9]

        dpkm2_da = vals[10]
        dqkm2_da = vals[11]
        dpkm1_da = vals[12]
        dqkm1_da = vals[13]
        dans_da = vals[14]

        c = c + 1
        y = y + 1
        z = z + 2

        yc = y * c
        pk = pkm1 * z - pkm2 * yc
        qk = qkm1 * z - qkm2 * yc
        qk_is_nonzero = notequal(qk, 0)
        r = pk / qk

        t = select(qk_is_nonzero, abs_x((ans - r) / r), fill(dtype(t), shape(t), 1))
        ans = select(qk_is_nonzero, r, ans)

        dpk_da = dpkm1_da * z - pkm1 - dpkm2_da * yc + pkm2 * c
        dqk_da = dqkm1_da * z - qkm1 - dqkm2_da * yc + qkm2 * c
        dans_da_new = select(qk_is_nonzero, (dpk_da - ans * dqk_da) / qk, dans_da)
        grad_conditional = select(qk_is_nonzero,
                                  abs_x(dans_da_new - dans_da),
                                  fill(dtype(dans_da), shape(dans_da), 1))

        pkm2 = pkm1
        pkm1 = pk
        qkm2 = qkm1
        qkm1 = qk

        dpkm2_da = dpkm1_da
        dqkm2_da = dqkm1_da
        dpkm1_da = dpk_da
        dqkm1_da = dqk_da

        rescale = greater(abs_x(pk), 1 / epsilon)
        pkm2 = select(rescale, pkm2 * epsilon, pkm2)
        pkm1 = select(rescale, pkm1 * epsilon, pkm1)
        qkm2 = select(rescale, qkm2 * epsilon, qkm2)
        qkm1 = select(rescale, qkm1 * epsilon, qkm1)

        dpkm2_da = select(rescale, dpkm2_da * epsilon, dpkm2_da)
        dqkm2_da = select(rescale, dqkm2_da * epsilon, dqkm2_da)
        dpkm1_da = select(rescale, dpkm1_da * epsilon, dpkm1_da)
        dqkm1_da = select(rescale, dqkm1_da * epsilon, dqkm1_da)

        conditional = logicaland(enabled, greater(grad_conditional, epsilon))

        return (conditional, select(enabled, ans, vals[1]), select(enabled, t, vals[2]),
                select(enabled, y, vals[3]), select(enabled, z, vals[4]),
                c, select(enabled, pkm1, vals[6]),
                select(enabled, qkm1, vals[7]), select(enabled, pkm2, vals[8]),
                select(enabled, qkm2, vals[9]), select(enabled, dpkm2_da, vals[10]),
                select(enabled, dqkm2_da, vals[11]), select(enabled, dpkm1_da, vals[12]),
                select(enabled, dqkm1_da, vals[13]), select(enabled, dans_da_new, vals[14]))

    y = 1 - a
    z = x + y + 1
    c = fill(dtype(x), shape(x), 0)
    pkm2 = fill(dtype(x), shape(x), 1)
    qkm2 = x
    pkm1 = x + 1
    qkm1 = z * x
    ans = pkm1 / qkm1
    t = fill(dtype(x), shape(x), 1)
    dpkm2_da = fill(dtype(x), shape(x), 0)
    dqkm2_da = fill(dtype(x), shape(x), 0)
    dpkm1_da = fill(dtype(x), shape(x), 0)
    dqkm1_da = -x
    dans_da = (dpkm1_da - ans * dqkm1_da) / qkm1
    vals = (enabled, ans, t, y, z, c, pkm1, qkm1, pkm2, qkm2, dpkm2_da, dqkm2_da, dpkm1_da, dqkm1_da, dans_da)
    vals = _while_helper_func(cond, body, vals)
    ans = vals[1]
    return ans * ax
Exemple #20
0
    def __init__(self,
                 vocab_size,
                 embedding_size,
                 field_size,
                 param_init='normal',
                 target='CPU',
                 slice_mode='batch_slice',
                 feature_num_list=None,
                 max_norm=None,
                 sparse=True,
                 operator='SUM'):
        super(MultiFieldEmbeddingLookup,
              self).__init__(vocab_size, embedding_size, param_init, target,
                             slice_mode, feature_num_list, max_norm, sparse)
        self.field_size = validator.check_positive_int(field_size,
                                                       'field_size')
        self.operator = operator

        self.mul = P.Mul()
        self.inf_mask_mul = P.Mul()
        self.bias_add = P.Add()
        self.inf_add = P.Add()
        self.merge_op = None
        self.count_op = P.UnsortedSegmentSum()
        self.abs = P.Abs()
        self.equal = P.Equal()
        self.add = P.Add()
        self.cast = P.Cast()
        self.div_no_nan = P.DivNoNan()
        self.expand = P.ExpandDims()
        self.max_mask_mul = P.Mul()
        self.max_no_equal = P.NotEqual()

        if operator == MultiFieldEmbeddingLookup.OPERATOR_SUM:
            self.merge_op = P.UnsortedSegmentSum()
        elif operator == MultiFieldEmbeddingLookup.OPERATOR_MAX:
            self.merge_op = P.UnsortedSegmentMax()
        elif operator == MultiFieldEmbeddingLookup.OPERATOR_MEAN:
            self.merge_op = P.UnsortedSegmentSum()
        else:
            raise ValueError(
                "The operator supports ['SUM', 'MAX', 'MEAN'], but found: " +
                str(operator))

        parallel_mode = _get_parallel_mode()
        is_auto_parallel = parallel_mode in (ParallelMode.SEMI_AUTO_PARALLEL,
                                             ParallelMode.AUTO_PARALLEL)
        if slice_mode in ["table_row_slice", "batch_slice"
                          ] and is_auto_parallel:
            self.merge_op.shard(
                ((get_group_size(), 1, 1), (get_group_size(), 1)))
            self.expand.shard(((get_group_size(), ), ))
            self.bias_add.shard(((1, 1), (1, 1)))
            self.mul.shard(
                ((get_group_size(), 1, 1), (get_group_size(), 1, 1)))
            self.count_op.shard(((get_group_size(), 1), (get_group_size(), 1)))
            self.add.shard(((get_group_size(), ), (get_group_size(), )))
            self.div_no_nan.shard(
                ((get_group_size(), 1), (get_group_size(), 1)))
            self.max_mask_mul.shard(
                ((get_group_size(), 1), (get_group_size(), 1)))
            self.max_no_equal.shard(((1, ), ()))
            if operator == MultiFieldEmbeddingLookup.OPERATOR_MAX:
                self.equal.shard(((get_group_size(), 1, 1), ()))
                self.inf_mask_mul.shard(((get_group_size(), 1, 1), ()))
                self.merge_op.shard(
                    ((get_group_size(), 1), (get_group_size(), )))
                self.count_op.shard(
                    ((get_group_size(), ), (get_group_size(), )))
                self.inf_add.shard(
                    ((get_group_size(), 1, 1), (get_group_size(), 1, 1)))
        elif slice_mode == "table_column_slice" and is_auto_parallel:
            self.merge_op.shard(((1, 1, get_group_size()), (1, 1)))
            self.div_no_nan.shard(((1, get_group_size()), (1, 1)))
            self.bias_add.shard(((1, 1), (1, 1)))
            self.mul.shard(((1, 1, 1), (1, 1, get_group_size())))
            self.count_op.shard(((1, 1), (1, 1)))
            self.add.shard(((1, ), (1, )))
            self.max_mask_mul.shard(((1, get_group_size()), (1, 1)))
            self.expand.shard(((1, ), ))
            self.max_no_equal.shard(((1, ), ()))
            if operator == MultiFieldEmbeddingLookup.OPERATOR_MAX:
                self.equal.shard(((1, 1, 1), ()))
                self.inf_mask_mul.shard(((1, 1, 1), ()))
                self.merge_op.shard(((1, get_group_size()), (1, )))
                self.count_op.shard(((1, ), (1, )))
                self.inf_add.shard(((1, 1, get_group_size()), (1, 1, 1)))
        else:
            if is_auto_parallel:
                raise ValueError(
                    "slice_mode should be  ['table_row_slice', 'batch_slice' and \
                       'table_column_slice'], but get " + str(slice_mode))

        # Min value for fp32
        self.negative_inf_value = -3.402823466E+38
Exemple #21
0
 def __init__(self, reduction='mean'):
     super(CustomLoss, self).__init__(reduction)
     self.abs = P.Abs()
Exemple #22
0
 def __init__(self, reduction='mean', weights=1.0):
     super(WeightedLoss, self).__init__(reduction)
     self.abs = P.Abs()
     self.weights = weights
Exemple #23
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 has_bias=True,
                 activation=None):
        super(Dense_Thor, self).__init__()
        self.thor = True
        self.in_channels = Validator.check_positive_int(in_channels)
        self.out_channels = Validator.check_positive_int(out_channels)
        self.has_bias = Validator.check_bool(has_bias)
        if isinstance(weight_init, Tensor):
            if weight_init.dim() != 2 or weight_init.shape[0] != out_channels or \
                    weight_init.shape[1] != in_channels:
                raise ValueError("Weight init shape error.")
        self.weight = Parameter(initializer(weight_init,
                                            [out_channels, in_channels]),
                                name="weight")

        self.bias = None
        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.dim() != 1 or bias_init.shape[0] != out_channels:
                    raise ValueError("Bias init shape error.")
            self.bias = Parameter(initializer(bias_init, [out_channels]),
                                  name="bias")
            self.bias_add = P.BiasAdd()

        self.matmul = P.MatMul(transpose_b=True)
        self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None

        self.matrix_A = Parameter(Tensor(
            np.zeros([in_channels, in_channels]).astype(np.float32)),
                                  name='matrix_A',
                                  requires_grad=False)
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.mul = P.Mul()
        self.is_Ascend = True
        if context.get_context("device_target") == "Ascend":
            if out_channels == 1001:
                self.matrix_G = Parameter(Tensor(
                    np.zeros([1024, 1024]).astype(np.float32)),
                                          name='matrix_G',
                                          requires_grad=False)
                self.pad = P.Pad(((0, 23), (0, 23)))
                self.pad1 = P.Pad(((0, 7), (0, 7)))
                self.slice = P.Slice()
                self.add = P.TensorAdd()
            else:
                self.matrix_G = Parameter(Tensor(
                    np.eye(out_channels).astype(np.float32)),
                                          name="matrix_G",
                                          requires_grad=False)
                self.abs = P.Abs()
                self.reduce_max = P.ReduceMax(keep_dims=False)
                self.neg = P.Neg()
                self.reduce_sum = P.ReduceSum()
            self.matmul = P.MatMul(transpose_b=True)
            self.cube_matmul = P.CusMatMulCube(transpose_a=True)
            self.cast = P.Cast()
            self.is_nsp_layer = (out_channels == 2)
        else:
            self.is_Ascend = False
            self.matrix_G = Parameter(Tensor(
                np.eye(out_channels).astype(np.float32)),
                                      name="matrix_G",
                                      requires_grad=False)
            self.cube_matmul = P.MatMul(transpose_a=True)
        self.getG = P.InsertGradientOf(self.save_gradient)
Exemple #24
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 damping=0.03,
                 loss_scale=1,
                 frequency=100,
                 has_bias=False,
                 activation=None,
                 batch_size=12):
        super(Dense_Thor, self).__init__()
        self.in_channels = Validator.check_positive_int(in_channels)
        self.out_channels = Validator.check_positive_int(out_channels)
        self.has_bias = Validator.check_bool(has_bias)
        self.thor = True
        if isinstance(weight_init, Tensor):
            if weight_init.dim() != 2 or weight_init.shape()[0] != out_channels or \
                    weight_init.shape()[1] != in_channels:
                raise ValueError("weight_init shape error")

        self.weight = Parameter(initializer(weight_init,
                                            [out_channels, in_channels]),
                                name="weight")

        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.dim() != 1 or bias_init.shape(
                )[0] != out_channels:
                    raise ValueError("bias_init shape error")

            self.bias = Parameter(initializer(bias_init, [out_channels]),
                                  name="bias")

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()

        self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None
        self.matrix_A_inv = Parameter(Tensor(
            np.zeros([in_channels, in_channels]).astype(np.float16)),
                                      name='matrix_A_inv',
                                      requires_grad=False)
        self.matrix_G_inv = Parameter(Tensor(
            np.zeros([out_channels, out_channels]).astype(np.float16)),
                                      name="matrix_G_inv",
                                      requires_grad=False)
        self.fake_G = Tensor(
            np.zeros([out_channels, out_channels]).astype(np.float16))

        self.matmul = P.MatMul(transpose_b=True)
        self.cube_matmul = P.CusMatMulCube(transpose_a=True)
        self.matrix_combine = P.CusMatrixCombine()
        self.cholesky = P.CusCholeskyTrsm()
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.cov_step = Parameter(initializer(0, [1], mstype.int32),
                                  name="cov_step",
                                  requires_grad=False)
        self.mul = P.Mul()
        self.cast = P.Cast()
        self.damping = damping
        self.loss_scale = Tensor(1 / loss_scale, mstype.float16)
        self.vector_matmul = P.CusBatchMatMul()
        self.gather = P.GatherV2()
        self.assignadd = P.AssignAdd()
        self.freq = Tensor(frequency, mstype.int32)
        self.axis = 0
        self.abs = P.Abs()
        self.reduce_max = P.ReduceMax(keep_dims=False)
        self.log = P.Log()
        self.exp = P.Exp()
        self.dampingA = Tensor(np.identity(in_channels), mstype.float32)
        self.dampingG = Tensor(np.identity(out_channels), mstype.float32)
        self.sqrt = P.Sqrt()
        self.getG = P.InsertGradientOf(self.save_gradient)
        self.batch_size = batch_size
Exemple #25
0
     'block': P.NPUClearFloatStatus(),
     'desc_inputs': [Tensor(np.zeros([8]).astype(np.float32))],
     'desc_bprop': [Tensor(np.zeros([8]).astype(np.float32))],
     'skip': ['backward']}),
 ('CheckValid', {
     'block': P.CheckValid(),
     'desc_inputs': [[20000, 4], [3]],
     'desc_bprop': [[20000]],
     'skip': ['backward']}),
 ('NMSWithMask', {
     'block': P.NMSWithMask(0.5),
     'desc_inputs': [[128, 5]],
     'desc_bprop': [[128, 5], [128], [128]],
     'skip': ['backward']}),
 ('Abs', {
     'block': P.Abs(),
     'desc_inputs': [[4]],
     'desc_bprop': [[4]]}),
 ('CumSum', {
     'block': P.CumSum(),
     'desc_const': [0],
     'desc_inputs': [Tensor(np.array([[3, 4],[1, 6]]).astype(np.float16))],
     'desc_bprop': [Tensor(np.array([[3, 4],[4, 10]]).astype(np.float16))]}),
 ('ReduceSum_3', {
     'block': P.ReduceSum(),
     'desc_const': [0],
     'desc_inputs': [[3, 2]],
     'desc_bprop': [[2]]}),
 ('ReduceSum_4', {
     'block': P.ReduceSum(keep_dims=True),
     'desc_const': [0],
Exemple #26
0
 def __init__(self):
     super(Net, self).__init__()
     self.ops = P.Abs()
Exemple #27
0
def absolute(inputs: Tensor) -> Tensor:
    """Get the absolute value of a tensor value."""
    abs_op = op.Abs()
    outputs = abs_op(inputs)
    return outputs
        'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32))],
        'skip': ['backward']}),
    # dims is not 2
    ('NMSWithMask2', {
        'block': (P.NMSWithMask(), {'exception': ValueError, 'error_keywords': ['NMSWithMask']}),
        'desc_inputs': [Tensor(np.ones([3, 4, 2]).astype(np.float32))],
        'skip': ['backward']}),
    # shape[1] is not 5
    ('NMSWithMask3', {
        'block': (P.NMSWithMask(), {'exception': ValueError, 'error_keywords': ['NMSWithMask']}),
        'desc_inputs': [Tensor(np.ones([3, 2]).astype(np.float32))],
        'skip': ['backward']}),

    # input is not tensor
    ('Abs0', {
        'block': (P.Abs(), {'exception': TypeError, 'error_keywords': ['Abs']}),
        'desc_inputs': [5.0],
        'skip': ['backward']}),
    # input is Tensor(bool)
    ('Abs1', {
        'block': (P.Abs(), {'exception': TypeError, 'error_keywords': ['Abs']}),
        'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.bool_))],
        'skip': ['backward']}),

    # input is not tensor
    ('Sign0', {
        'block': (P.Sign(), {'exception': TypeError, 'error_keywords': ['Sign']}),
        'desc_inputs': [5.0],
        'skip': ['backward']}),
    # input is Tensor(bool)
    ('Sign1', {
Exemple #29
0
 def setup_method(self):
     """Setup the test case."""
     self.net = SimpleLinear()
     self.relu = P.ReLU()
     self.abs_ = P.Abs()
     self.reshape = P.Reshape()
Exemple #30
0
 def __init__(self, strategy1, strategy2):
     super().__init__()
     self.matmul = P.MatMul().set_strategy(strategy1)
     self.abs = P.Abs().set_strategy(strategy2)
     self.matmul2 = P.MatMul().set_strategy(strategy1)