def __init__(self, rate=None, seed=None, dtype=mstype.float32, name="Poisson"): """ Constructor of Poisson. """ param = dict(locals()) param['param_dict'] = {'rate': rate} valid_dtype = mstype.int_type + mstype.uint_type + mstype.float_type Validator.check_type_name("dtype", dtype, valid_dtype, type(self).__name__) super(Poisson, self).__init__(seed, dtype, name, param) self._rate = self._add_parameter(rate, 'rate') if self.rate is not None: check_greater_zero(self.rate, 'rate') # ops needed for the class self.exp = exp_generic self.log = log_generic self.squeeze = P.Squeeze(0) self.cast = P.Cast() self.floor = P.Floor() self.dtypeop = P.DType() self.shape = P.Shape() self.fill = P.Fill() self.less = P.Less() self.equal = P.Equal() self.select = P.Select() self.lgamma = nn.LGamma() self.igamma = nn.IGamma() self.poisson = C.poisson
def __init__(self, loc, scale, seed=0, dtype=mstype.float32, name="Gumbel"): """ Constructor of Gumbel distribution. """ valid_dtype = mstype.float_type Validator.check_type_name("dtype", dtype, valid_dtype, type(self).__name__) gumbel_cdf = msb.GumbelCDF(loc, scale) super(Gumbel, self).__init__( distribution=msd.Uniform(0.0, 1.0, dtype=dtype), bijector=msb.Invert(gumbel_cdf), seed=seed, name=name) # overwrite default_parameters and parameter_names self._reset_parameters() self._loc = self._add_parameter(loc, 'loc') self._scale = self._add_parameter(scale, 'scale') self._gumbel_bijector = gumbel_cdf # ops needed for the class self.cast = P.Cast() self.const = P.ScalarToArray() self.exp = exp_generic self.expm1 = expm1_generic self.fill = P.Fill() self.lgamma = nn.LGamma() self.log = log_generic self.shape = P.Shape() self.sqrt = P.Sqrt()
def __init__(self, concentration=None, rate=None, seed=None, dtype=mstype.float32, name="Gamma"): """ Constructor of Gamma. """ param = dict(locals()) param['param_dict'] = {'concentration': concentration, 'rate': rate} valid_dtype = mstype.float_type Validator.check_type_name("dtype", dtype, valid_dtype, type(self).__name__) # As some operators can't accept scalar input, check the type here if isinstance(concentration, (int, float)): raise TypeError("Input concentration can't be scalar") if isinstance(rate, (int, float)): raise TypeError("Input rate can't be scalar") super(Gamma, self).__init__(seed, dtype, name, param) self._concentration = self._add_parameter(concentration, 'concentration') self._rate = self._add_parameter(rate, 'rate') if self._concentration is not None: check_greater_zero(self._concentration, "concentration") if self._rate is not None: check_greater_zero(self._rate, "rate") # ops needed for the class self.log = log_generic self.square = P.Square() self.sqrt = P.Sqrt() self.squeeze = P.Squeeze(0) self.cast = P.Cast() self.dtypeop = P.DType() self.fill = P.Fill() self.shape = P.Shape() self.select = P.Select() self.greater = P.Greater() self.lgamma = nn.LGamma() self.digamma = nn.DiGamma() self.igamma = nn.IGamma()
('UnfoldGrad', { 'block': GradWrapUnfold(UnfoldNetValid()), 'desc_inputs': [Tensor(np.ones([1, 1, 3, 3], np.float32))], 'desc_bprop': [Tensor(np.ones([1, 4, 2, 2], np.float32))], 'skip': ['backward']}), ('LogSigmoid', { 'block': nn.LogSigmoid(), 'desc_inputs': [Tensor(np.array([1, 2, 3, 4]).astype(np.float32))], 'desc_bprop': [Tensor(np.array([1, 2, 3, 4]).astype(np.float32))], 'skip': ['backward']}), ('ReduceLogSumExp', { 'block': nn.ReduceLogSumExp((0,), False), 'desc_inputs': [Tensor(np.array([3, 4, 5, 6]).astype(np.float32))], 'skip': ['backward']}), ('LGamma', { 'block': nn.LGamma(), 'desc_inputs': [Tensor(np.array([3, 4, 5, 6]).astype(np.float32))], 'skip': ['backward']}), ('IGamma', { 'block': nn.IGamma(), 'desc_inputs': [Tensor(np.array([3, 4, 5, 6]).astype(np.float32)), Tensor(np.array([3, 4, 5, 6]).astype(np.float32))], 'skip': ['backward']}), ('DiGamma', { 'block': nn.DiGamma(), 'desc_inputs': [Tensor(np.array([3, 4, 5, 6]).astype(np.float32))], 'skip': ['backward']}), ('LBeta', { 'block': nn.LBeta(), 'desc_inputs': [Tensor(np.array([3, 4, 5, 6]).astype(np.float32)), Tensor(np.array([3, 4, 5, 6]).astype(np.float32))],