Example #1
0
    def __init__(self, sharpness=1.0, name='Softplus'):
        """
        Constructor of Softplus Bijector.
        """
        param = dict(locals())
        param['param_dict'] = {'sharpness': sharpness}
        super(Softplus, self).__init__(name=name, dtype=None, param=param)
        self._sharpness = self._add_parameter(sharpness, 'sharpness')

        self.exp = exp_generic
        self.log = log_generic
        self.expm1 = P.Expm1()
        self.abs = P.Abs()
        self.dtypeop = P.DType()
        self.cast = P.Cast()
        self.fill = P.Fill()
        self.greater = P.Greater()
        self.less = P.Less()
        self.log_sigmoid = LogSigmoid()
        self.logicalor = P.LogicalOr()
        self.select = P.Select()
        self.shape = P.Shape()
        self.sigmoid = P.Sigmoid()
        self.softplus = self._softplus
        self.inverse_softplus = self._inverse_softplus

        self.threshold = np.log(np.finfo(np.float32).eps) + 1
        self.tiny = np.exp(self.threshold)
Example #2
0
    def __init__(self, sharpness=1.0, name='Softplus'):
        """
        Constructor of Softplus Bijector.
        """
        param = dict(locals())
        validator.check_value_type('sharpness', sharpness, [int, float],
                                   type(self).__name__)
        super(Softplus, self).__init__(name=name, param=param)
        self._sharpness = cast_to_tensor(sharpness)

        self.exp = exp_generic
        self.log = log_generic
        self.expm1 = expm1_generic
        self.abs = P.Abs()
        self.dtypeop = P.DType()
        self.fill = P.Fill()
        self.greater = P.Greater()
        self.less = P.Less()
        self.log_sigmoid = LogSigmoid()
        self.logicalor = P.LogicalOr()
        self.select = P.Select()
        self.shape = P.Shape()
        self.sigmoid = P.Sigmoid()
        self.softplus = self._softplus
        self.inverse_softplus = self._inverse_softplus

        self.threshold = np.log(np.finfo(np.float32).eps) + 1
        self.tiny = np.exp(self.threshold)
Example #3
0
    def __init__(self):
        super(IGamma, self).__init__()
        # const numbers
        # If more data types are supported, this float max value need to be selected.
        self.log_maxfloat32 = Tensor(np.log(np.finfo(np.float32).max),
                                     mstype.float32)

        # operations
        self.logicaland = P.LogicalAnd()
        self.logicalor = P.LogicalOr()
        self.logicalnot = P.LogicalNot()
        self.equal = P.Equal()
        self.greater = P.Greater()
        self.less = P.Less()
        self.neg = P.Neg()
        self.log = P.Log()
        self.exp = P.Exp()
        self.select = P.Select()
        self.zeroslike = P.ZerosLike()
        self.fill = P.Fill()
        self.shape = P.Shape()
        self.dtype = P.DType()
        self.lgamma = LGamma()
        self.const = P.ScalarToArray()
        self.cast = P.Cast()
Example #4
0
    def __init__(self, batch_size=4):
        super(DiceLoss, self).__init__()

        self.threshold0 = Tensor(0.5, mstype.float32)
        self.zero_float32 = Tensor(0.0, mstype.float32)
        self.k = int(640 * 640)
        self.negative_one_int32 = Tensor(-1, mstype.int32)
        self.batch_size = batch_size
        self.concat = P.Concat()
        self.less_equal = P.LessEqual()
        self.greater = P.Greater()
        self.reduce_sum = P.ReduceSum()
        self.reduce_sum_keep_dims = P.ReduceSum(keep_dims=True)
        self.reduce_mean = P.ReduceMean()
        self.reduce_min = P.ReduceMin()
        self.cast = P.Cast()
        self.minimum = P.Minimum()
        self.expand_dims = P.ExpandDims()
        self.select = P.Select()
        self.fill = P.Fill()
        self.topk = P.TopK(sorted=True)
        self.shape = P.Shape()
        self.sigmoid = P.Sigmoid()
        self.reshape = P.Reshape()
        self.slice = P.Slice()
        self.logical_and = P.LogicalAnd()
        self.logical_or = P.LogicalOr()
        self.equal = P.Equal()
        self.zeros_like = P.ZerosLike()
        self.add = P.TensorAdd()
        self.gather = P.Gather()
    def __init__(self,
                 network,
                 optimizer,
                 scale_update_cell=None,
                 accumulation_steps=1,
                 enable_global_norm=False):
        super(BertTrainAccumulateStepsWithLossScaleCell,
              self).__init__(auto_prefix=False)
        self.network = network
        self.network.set_grad()
        self.weights = optimizer.parameters
        self.optimizer = optimizer
        self.accumulation_steps = accumulation_steps
        self.enable_global_norm = enable_global_norm
        self.one = Tensor(np.array([1]).astype(np.int32))
        self.zero = Tensor(np.array([0]).astype(np.int32))
        self.local_step = Parameter(initializer(0, [1], mstype.int32),
                                    name="local_step")
        self.accu_grads = self.weights.clone(prefix="accu_grads", init='zeros')
        self.accu_overflow = Parameter(initializer(0, [1], mstype.int32),
                                       name="accu_overflow")
        self.loss = Parameter(initializer(0, [1], mstype.float32),
                              name="accu_loss")

        self.grad = C.GradOperation(get_by_list=True, sens_param=True)
        self.reducer_flag = False
        self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
        if self.parallel_mode in [
                ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL
        ]:
            self.reducer_flag = True
        self.grad_reducer = F.identity
        self.degree = 1
        if self.reducer_flag:
            self.degree = get_group_size()
            self.grad_reducer = DistributedGradReducer(optimizer.parameters,
                                                       False, self.degree)
        self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
        self.overflow_reducer = F.identity
        if self.is_distributed:
            self.overflow_reducer = P.AllReduce()
        self.cast = P.Cast()
        self.alloc_status = P.NPUAllocFloatStatus()
        self.get_status = P.NPUGetFloatStatus()
        self.clear_before_grad = P.NPUClearFloatStatus()
        self.reduce_sum = P.ReduceSum(keep_dims=False)
        self.base = Tensor(1, mstype.float32)
        self.less_equal = P.LessEqual()
        self.logical_or = P.LogicalOr()
        self.not_equal = P.NotEqual()
        self.select = P.Select()
        self.reshape = P.Reshape()
        self.hyper_map = C.HyperMap()
        self.loss_scale = None
        self.loss_scaling_manager = scale_update_cell
        if scale_update_cell:
            self.loss_scale = Parameter(Tensor(
                scale_update_cell.get_loss_scale(), dtype=mstype.float32),
                                        name="loss_scale")
Example #6
0
 def __init__(self):
     super(MathBinaryNet2, self).__init__()
     self.less_equal = P.LessEqual()
     self.greater = P.Greater()
     self.logic_or = P.LogicalOr()
     self.logic_and = P.LogicalAnd()
     self.number = 3
     self.flag = True
Example #7
0
    def __init__(self,
                 probs=None,
                 seed=None,
                 dtype=mstype.int32,
                 name="Categorical"):
        param = dict(locals())
        param['param_dict'] = {'probs': probs}
        valid_dtype = mstype.uint_type + mstype.int_type + mstype.float_type
        Validator.check_type_name("dtype", dtype, valid_dtype,
                                  type(self).__name__)
        super(Categorical, self).__init__(seed, dtype, name, param)

        self._probs = self._add_parameter(probs, 'probs')
        if self.probs is not None:
            check_rank(self.probs)
            check_prob(self.probs)
            check_sum_equal_one(probs)

            # update is_scalar_batch and broadcast_shape
            # drop one dimension
            if self.probs.shape[:-1] == ():
                self._is_scalar_batch = True
            self._broadcast_shape = self._broadcast_shape[:-1]

        self.argmax = P.ArgMaxWithValue(axis=-1)
        self.broadcast = broadcast_to
        self.cast = P.Cast()
        self.clip_by_value = C.clip_by_value
        self.concat = P.Concat(-1)
        self.cumsum = P.CumSum()
        self.dtypeop = P.DType()
        self.exp = exp_generic
        self.expand_dim = P.ExpandDims()
        self.fill = P.Fill()
        self.gather = P.GatherNd()
        self.greater = P.Greater()
        self.issubclass = P.IsSubClass()
        self.less = P.Less()
        self.log = log_generic
        self.log_softmax = P.LogSoftmax()
        self.logicor = P.LogicalOr()
        self.logicand = P.LogicalAnd()
        self.multinomial = P.Multinomial(seed=self.seed)
        self.reshape = P.Reshape()
        self.reduce_sum = P.ReduceSum(keep_dims=True)
        self.select = P.Select()
        self.shape = P.Shape()
        self.softmax = P.Softmax()
        self.squeeze = P.Squeeze()
        self.squeeze_first_axis = P.Squeeze(0)
        self.squeeze_last_axis = P.Squeeze(-1)
        self.square = P.Square()
        self.transpose = P.Transpose()
        self.is_nan = P.IsNan()

        self.index_type = mstype.int32
        self.nan = np.nan
Example #8
0
def test_logicalor():
    op = P.LogicalOr()
    op_wrapper = OpNetWrapper(op)

    input_x = Tensor(np.array([True, False, False]))
    input_y = Tensor(np.array([True, True, False]))
    outputs = op_wrapper(input_x, input_y)

    assert np.allclose(outputs.asnumpy(), (True, True, False))
Example #9
0
    def __init__(self,
                 loc=None,
                 scale=None,
                 seed=None,
                 dtype=mstype.float32,
                 name="Logistic"):
        """
        Constructor of Logistic.
        """
        param = dict(locals())
        param['param_dict'] = {'loc': loc, 'scale': scale}
        valid_dtype = mstype.float_type
        Validator.check_type_name("dtype", dtype, valid_dtype,
                                  type(self).__name__)
        super(Logistic, self).__init__(seed, dtype, name, param)

        self._loc = self._add_parameter(loc, 'loc')
        self._scale = self._add_parameter(scale, 'scale')
        if self._scale is not None:
            check_greater_zero(self._scale, "scale")

        # ops needed for the class
        self.cast = P.Cast()
        self.const = P.ScalarToArray()
        self.consttensor = P.ScalarToTensor()
        self.dtypeop = P.DType()
        self.exp = exp_generic
        self.expm1 = P.Expm1()
        self.fill = P.Fill()
        self.less = P.Less()
        self.log = log_generic
        self.log1p = P.Log1p()
        self.logicalor = P.LogicalOr()
        self.erf = P.Erf()
        self.greater = P.Greater()
        self.sigmoid = P.Sigmoid()
        self.squeeze = P.Squeeze(0)
        self.select = P.Select()
        self.shape = P.Shape()
        self.softplus = self._softplus
        self.sqrt = P.Sqrt()
        self.uniform = C.uniform

        self.threshold = np.log(np.finfo(np.float32).eps) + 1.
        self.tiny = np.finfo(np.float).tiny
        self.sd_const = np.pi / np.sqrt(3)
Example #10
0
 def __init__(self):
     super(NetOr, self).__init__()
     self.logicalor = P.LogicalOr()
Example #11
0
     'desc_inputs': [[2, 3, 4, 1], [4, 5]],
     'desc_bprop': [Tensor(np.ones((2, 3, 4, 5), np.bool_))]}),
 ('GreaterEqual', {
     'block': P.GreaterEqual(),
     'desc_inputs': [[2, 3, 4, 1], [4, 5]],
     'desc_bprop': [Tensor(np.ones((2, 3, 4, 5), np.bool_))]}),
 ('LogicalNot', {
     'block': P.LogicalNot(),
     'desc_inputs': [Tensor(np.zeros((3, 4, 5), np.bool_))],
     'desc_bprop': [Tensor(np.ones((3, 4, 5), np.bool_))]}),
 ('LogicalAnd', {
         'block': P.LogicalAnd(),
         'desc_inputs': [Tensor(np.zeros((2, 3, 4), np.bool_)), Tensor(np.ones((1), np.bool_))],
         'desc_bprop': [Tensor(np.zeros((2, 3, 4), np.bool_))]}),
 ('LogicalOr', {
         'block': P.LogicalOr(),
         'desc_inputs': [Tensor(np.zeros((3, 4, 5), np.bool_)), Tensor(np.ones((3, 1, 1), np.bool_))],
         'desc_bprop': [Tensor(np.zeros((3, 4, 5), np.bool_))]}),
 ('NpuAllocFloatStatus', {
     'block': P.NPUAllocFloatStatus(),
     'desc_inputs': [],
     'add_fack_input': True,
     'fack_input_type': np.float32,
     'desc_bprop': [Tensor(np.zeros([8]).astype(np.float32))],
     'skip': ['backward']}),
 ('NpuGetFloatStatus', {
     'block': P.NPUGetFloatStatus(),
     'desc_inputs': [Tensor(np.zeros([8]).astype(np.float32))],
     'desc_bprop': [Tensor(np.zeros([8]).astype(np.float32))],
     'skip': ['backward']}),
 ('NpuClearFloatStatus', {
Example #12
0
 def __init__(self):
     super(NetConditionLackBranch, self).__init__()
     self.logicaland = P.LogicalAnd()
     self.logicalor = P.LogicalOr()
        'skip': ['backward']}),

    # type of x and y not match
    ('LogicalAnd1', {
        'block': (P.LogicalAnd(), {'exception': TypeError, 'error_keywords': ['LogicalAnd']}),
        'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.bool_))],
        'skip': ['backward']}),
    # shape of x and y not match
    ('LogicalAnd2', {
        'block': (P.LogicalAnd(), {'exception': ValueError, 'error_keywords': ['LogicalAnd']}),
        'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.bool_)), Tensor(np.ones([3, 2]).astype(np.bool_))],
        'skip': ['backward']}),

    # type of x and y not match
    ('LogicalOr1', {
        'block': (P.LogicalOr(), {'exception': TypeError, 'error_keywords': ['LogicalOr']}),
        'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.bool_))],
        'skip': ['backward']}),
    # shape of x and y not match
    ('LogicalOr2', {
        'block': (P.LogicalOr(), {'exception': ValueError, 'error_keywords': ['LogicalOr']}),
        'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.bool_)), Tensor(np.ones([3, 2]).astype(np.bool_))],
        'skip': ['backward']}),

    # input is not tensor
    ('NPUGetFloatStatus0', {
        'block': (P.NPUGetFloatStatus(), {'exception': TypeError, 'error_keywords': ['NPUGetFloatStatus']}),
        'desc_inputs': [5.0],
        'skip': ['backward']}),
    # input is Tensor(int32), not Tensor(float32)
    ('NPUGetFloatStatus1', {
Example #14
0
    # shape of x and y not match
    ('LogicalAnd2', {
        'block': (P.LogicalAnd(), {
            'exception': ValueError,
            'error_keywords': ['LogicalAnd']
        }),
        'desc_inputs': [
            Tensor(np.ones([3, 4]).astype(np.bool_)),
            Tensor(np.ones([3, 2]).astype(np.bool_))
        ],
        'skip': ['backward']
    }),

    # type of x and y not match
    ('LogicalOr1', {
        'block': (P.LogicalOr(), {
            'exception': TypeError,
            'error_keywords': ['LogicalOr']
        }),
        'desc_inputs': [
            Tensor(np.ones([3, 4]).astype(np.int32)),
            Tensor(np.ones([3, 4]).astype(np.bool_))
        ],
        'skip': ['backward']
    }),
    # shape of x and y not match
    ('LogicalOr2', {
        'block': (P.LogicalOr(), {
            'exception': ValueError,
            'error_keywords': ['LogicalOr']
        }),
Example #15
0
 def __init__(self, strategy1, strategy2):
     super().__init__()
     self.matmul = P.MatMul().shard(strategy1)
     self.equal = P.Equal().shard(strategy2)
     self.notequal = P.NotEqual().shard(strategy2)
     self.logical = P.LogicalOr().shard(strategy2)
Example #16
0
     'block': P.LogicalNot(),
     'desc_inputs': [Tensor(np.zeros((3, 4, 5), np.bool_))],
     'desc_bprop': [Tensor(np.ones((3, 4, 5), np.bool_))]
 }),
 ('LogicalAnd', {
     'block':
     P.LogicalAnd(),
     'desc_inputs': [
         Tensor(np.zeros((2, 3, 4), np.bool_)),
         Tensor(np.ones((1), np.bool_))
     ],
     'desc_bprop': [Tensor(np.zeros((2, 3, 4), np.bool_))]
 }),
 ('LogicalOr', {
     'block':
     P.LogicalOr(),
     'desc_inputs': [
         Tensor(np.zeros((3, 4, 5), np.bool_)),
         Tensor(np.ones((3, 1, 1), np.bool_))
     ],
     'desc_bprop': [Tensor(np.zeros((3, 4, 5), np.bool_))]
 }),
 ('NpuAllocFloatStatus', {
     'block': P.NPUAllocFloatStatus(),
     'desc_inputs': [],
     'add_fack_input': True,
     'fack_input_type': np.float32,
     'desc_bprop': [Tensor(np.zeros([8]).astype(np.float32))],
     'skip': ['backward']
 }),
 ('NpuGetFloatStatus', {