def log_generic(input_x): """ Log op on Ascend is calculated as log(abs(x)). Fix this with putting negative values as nan. And log op on Ascend doesn't supprot int types. Fix this with casting the type. """ log = P.Log() less = P.Less() lessequal = P.LessEqual() fill = P.Fill() cast = P.Cast() dtype = P.DType() shape = P.Shape() select = P.Select() checktype = P.IsSubClass() if not checktype(dtype(input_x), mstype.float_): input_x = cast(input_x, mstype.float32) nan = fill(dtype(input_x), shape(input_x), np.nan) inf = fill(dtype(input_x), shape(input_x), np.inf) neg_x = less(input_x, 0.0) nonpos_x = lessequal(input_x, 0.0) log_x = log(input_x) result = select(nonpos_x, -inf, log_x) return select(neg_x, nan, result)
def __init__(self, probs=None, seed=0, dtype=mstype.int32, name="Geometric"): """ Constructor of Geometric distribution. """ param = dict(locals()) super(Geometric, self).__init__(dtype, name, param) if probs is not None: self._probs = cast_to_tensor(probs, dtype=mstype.float32) check_prob(self._probs) else: self._probs = probs self.minval = np.finfo(np.float).tiny # ops needed for the class self.const = P.ScalarToArray() self.dtypeop = P.DType() self.fill = P.Fill() self.floor = P.Floor() self.issubclass = P.IsSubClass() self.less = P.Less() self.log = P.Log() self.pow = P.Pow() self.select = P.Select() self.shape = P.Shape() self.sq = P.Square() self.sqrt = P.Sqrt() self.uniform = P.UniformReal(seed=seed)
def __init__(self, probs=None, seed=None, dtype=mstype.int32, name="Categorical"): param = dict(locals()) param['param_dict'] = {'probs': probs} valid_dtype = mstype.uint_type + mstype.int_type + mstype.float_type Validator.check_type_name("dtype", dtype, valid_dtype, type(self).__name__) super(Categorical, self).__init__(seed, dtype, name, param) self._probs = self._add_parameter(probs, 'probs') if self.probs is not None: check_rank(self.probs) check_prob(self.probs) check_sum_equal_one(probs) # update is_scalar_batch and broadcast_shape # drop one dimension if self.probs.shape[:-1] == (): self._is_scalar_batch = True self._broadcast_shape = self._broadcast_shape[:-1] self.argmax = P.ArgMaxWithValue(axis=-1) self.broadcast = broadcast_to self.cast = P.Cast() self.clip_by_value = C.clip_by_value self.concat = P.Concat(-1) self.cumsum = P.CumSum() self.dtypeop = P.DType() self.exp = exp_generic self.expand_dim = P.ExpandDims() self.fill = P.Fill() self.gather = P.GatherNd() self.greater = P.Greater() self.issubclass = P.IsSubClass() self.less = P.Less() self.log = log_generic self.log_softmax = P.LogSoftmax() self.logicor = P.LogicalOr() self.logicand = P.LogicalAnd() self.multinomial = P.Multinomial(seed=self.seed) self.reshape = P.Reshape() self.reduce_sum = P.ReduceSum(keep_dims=True) self.select = P.Select() self.shape = P.Shape() self.softmax = P.Softmax() self.squeeze = P.Squeeze() self.squeeze_first_axis = P.Squeeze(0) self.squeeze_last_axis = P.Squeeze(-1) self.square = P.Square() self.transpose = P.Transpose() self.is_nan = P.IsNan() self.index_type = mstype.int32 self.nan = np.nan
def exp_generic(input_x): """ Log op on Ascend doesn't supprot int types. Fix this with casting the type. """ exp = P.Exp() cast = P.Cast() dtype = P.DType() checktype = P.IsSubClass() if not checktype(dtype(input_x), mstype.float_): input_x = cast(input_x, mstype.float32) return exp(input_x)
def exp_by_step(input_x): """ Log op on Ascend doesn't supprot int types. Fix this with casting the type. """ exp = P.Exp() cast = P.Cast() dtype = P.DType() checktype = P.IsSubClass() if checktype(dtype(input_x), mstype.int_): input_x = cast(input_x, mstype.float32) elif checktype(dtype(input_x), mstype.float_): pass else: return None return exp(input_x)
def __init__(self, is_constant_jacobian=False, is_injective=True, name=None, dtype=None, param=None): """ Constructor of Bijector class. """ super(Bijector, self).__init__() validator.check_value_type('name', name, [str], type(self).__name__) validator.check_value_type('is_constant_jacobian', is_constant_jacobian, [bool], name) validator.check_value_type('is_injective', is_injective, [bool], name) if dtype is not None: validator.check_type_name("dtype", dtype, mstype.float_type, type(self).__name__) self._name = name self._dtype = dtype self._parameters = {} # parsing parameters for k in param.keys(): if k == 'param': continue if not (k == 'self' or k.startswith('_')): self._parameters[k] = param[k] # if no bijector is used as an argument during initilization if 'bijector' not in param.keys(): self._batch_shape = self._calc_batch_shape() self._is_scalar_batch = self._check_is_scalar_batch() self._is_constant_jacobian = is_constant_jacobian self._is_injective = is_injective self.context_mode = context.get_context('mode') self.checktensor = CheckTensor() # ops needed for the base class self.cast_base = P.Cast() self.dtype_base = P.DType() self.shape_base = P.Shape() self.fill_base = P.Fill() self.sametypeshape_base = P.SameTypeShape() self.issubclass_base = P.IsSubClass()
def __init__(self, probs=None, seed=None, dtype=mstype.int32, name="Geometric"): """ Constructor of Geometric distribution. """ param = dict(locals()) valid_dtype = mstype.int_type + mstype.uint_type + mstype.float_type check_type(dtype, valid_dtype, type(self).__name__) super(Geometric, self).__init__(seed, dtype, name, param) self.parameter_type = set_param_type({'probs1': probs}, mstype.float32) if probs is not None: self._probs = cast_to_tensor(probs, self.parameter_type) check_prob(self._probs) else: self._probs = probs self.default_parameters = [self.probs] self.parameter_names = ['probs1'] self.minval = np.finfo(np.float).tiny # ops needed for the class self.exp = exp_generic self.log = log_generic self.squeeze = P.Squeeze(0) self.cast = P.Cast() self.const = P.ScalarToArray() self.dtypeop = P.DType() self.fill = P.Fill() self.floor = P.Floor() self.issubclass = P.IsSubClass() self.less = P.Less() self.pow = P.Pow() self.select = P.Select() self.shape = P.Shape() self.sq = P.Square() self.sqrt = P.Sqrt() self.uniform = C.uniform
def __init__(self, probs=None, seed=None, dtype=mstype.int32, name="Geometric"): """ Constructor of Geometric distribution. """ param = dict(locals()) param['param_dict'] = {'probs': probs} valid_dtype = mstype.int_type + mstype.uint_type + mstype.float_type Validator.check_type_name("dtype", dtype, valid_dtype, type(self).__name__) super(Geometric, self).__init__(seed, dtype, name, param) self._probs = self._add_parameter(probs, 'probs') if self._probs is not None: check_prob(self.probs) self.minval = np.finfo(np.float).tiny # ops needed for the class self.exp = exp_generic self.log = log_generic self.squeeze = P.Squeeze(0) self.cast = P.Cast() self.const = P.ScalarToArray() self.dtypeop = P.DType() self.fill = P.Fill() self.floor = P.Floor() self.issubclass = P.IsSubClass() self.less = P.Less() self.pow = P.Pow() self.select = P.Select() self.shape = P.Shape() self.sq = P.Square() self.uniform = C.uniform
# shape of x and y not match ('SameTypeShape3', { 'block': (P.SameTypeShape(), { 'exception': ValueError, 'error_keywords': ['SameTypeShape'] }), 'desc_inputs': [ Tensor(np.ones([3, 4]).astype(np.float32)), Tensor(np.ones([3, 3]).astype(np.float32)) ], 'skip': ['backward'] }), # sub_type is None ('IsSubClass0', { 'block': (P.IsSubClass(), { 'exception': TypeError, 'error_keywords': ['IsSubClass'] }), 'desc_inputs': [None, mstype.number], 'skip': ['backward'] }), # type_ is None ('IsSubClass1', { 'block': (P.IsSubClass(), { 'exception': TypeError, 'error_keywords': ['IsSubClass'] }), 'desc_inputs': [mstype.number, None], 'skip': ['backward'] }),
'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32)), 5.0], 'skip': ['backward']}), # type of x and y not match ('SameTypeShape2', { 'block': (P.SameTypeShape(), {'exception': TypeError, 'error_keywords': ['SameTypeShape']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32)), Tensor(np.ones([3, 4]).astype(np.int32))], 'skip': ['backward']}), # shape of x and y not match ('SameTypeShape3', { 'block': (P.SameTypeShape(), {'exception': ValueError, 'error_keywords': ['SameTypeShape']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32)), Tensor(np.ones([3, 3]).astype(np.float32))], 'skip': ['backward']}), # sub_type is None ('IsSubClass0', { 'block': (P.IsSubClass(), {'exception': TypeError, 'error_keywords': ['IsSubClass']}), 'desc_inputs': [None, mstype.number], 'skip': ['backward']}), # type_ is None ('IsSubClass1', { 'block': (P.IsSubClass(), {'exception': TypeError, 'error_keywords': ['IsSubClass']}), 'desc_inputs': [mstype.number, None], 'skip': ['backward']}), # inst is var ('IsInstance0', { 'block': (P.IsInstance(), {'exception': ValueError, 'error_keywords': ['IsInstance']}), 'desc_inputs': [5.0, mstype.number], 'skip': ['backward']}), # t is not mstype.Type ('IsInstance1', {