示例#1
0
文件: gumbel.py 项目: lynex/mindspore
    def __init__(self,
                 loc,
                 scale,
                 seed=0,
                 dtype=mstype.float32,
                 name="Gumbel"):
        """
        Constructor of Gumbel distribution.
        """
        valid_dtype = mstype.float_type
        Validator.check_type_name("dtype", dtype, valid_dtype, type(self).__name__)
        gumbel_cdf = msb.GumbelCDF(loc, scale)
        super(Gumbel, self).__init__(
            distribution=msd.Uniform(0.0, 1.0, dtype=dtype),
            bijector=msb.Invert(gumbel_cdf),
            seed=seed, name=name)

        # overwrite default_parameters and parameter_names
        self._reset_parameters()
        self._loc = self._add_parameter(loc, 'loc')
        self._scale = self._add_parameter(scale, 'scale')
        self._gumbel_bijector = gumbel_cdf

        # ops needed for the class
        self.cast = P.Cast()
        self.const = P.ScalarToArray()
        self.exp = exp_generic
        self.expm1 = expm1_generic
        self.fill = P.Fill()
        self.lgamma = nn.LGamma()
        self.log = log_generic
        self.shape = P.Shape()
        self.sqrt = P.Sqrt()
示例#2
0
    def __init__(self,
                 mean=None,
                 sd=None,
                 seed=0,
                 dtype=mstype.float32,
                 name="Normal"):
        """
        Constructor of normal distribution.
        """
        param = dict(locals())
        super(Normal, self).__init__(dtype, name, param)
        if  mean is not None and sd is not None:
            self._mean_value = convert_to_batch(mean, self._broadcast_shape, dtype)
            self._sd_value = convert_to_batch(sd, self._broadcast_shape, dtype)
            check_greater_equal_zero(self._sd_value, "Standard deviation")
        else:
            self._mean_value = mean
            self._sd_value = sd
        self.seed = seed

        #ops needed for the class
        self.const = P.ScalarToArray()
        self.erf = P.Erf()
        self.exp = P.Exp()
        self.expm1 = self._expm1_by_step
        self.fill = P.Fill()
        self.log = P.Log()
        self.shape = P.Shape()
        self.sq = P.Square()
        self.sqrt = P.Sqrt()
        self.zeroslike = P.ZerosLike()
示例#3
0
    def __init__(self,
                 probs=None,
                 seed=0,
                 dtype=mstype.int32,
                 name="Bernoulli"):
        """
        Constructor of Bernoulli distribution.
        """
        param = dict(locals())
        super(Bernoulli, self).__init__(dtype, name, param)
        if probs is not None:
            self._probs = cast_to_tensor(probs)
            check_prob(self._probs)
        else:
            self._probs = probs

        # ops needed for the class
        self.log = P.Log()
        self.add = P.TensorAdd()
        self.mul = P.Mul()
        self.sqrt = P.Sqrt()
        self.realdiv = P.RealDiv()
        self.shape = P.Shape()
        self.const = P.ScalarToArray()
        self.less = P.Less()
        self.cast = P.Cast()
        self.normal = P.Normal(seed=seed)
        self.erf = P.Erf()
        self.sqrt = P.Sqrt()
示例#4
0
    def __init__(self,
                 probs=None,
                 seed=None,
                 dtype=mstype.int32,
                 name="Bernoulli"):
        """
        Constructor of Bernoulli.
        """
        param = dict(locals())
        param['param_dict'] = {'probs': probs}
        valid_dtype = mstype.int_type + mstype.uint_type + mstype.float_type
        Validator.check_type_name("dtype", dtype, valid_dtype,
                                  type(self).__name__)
        super(Bernoulli, self).__init__(seed, dtype, name, param)

        self._probs = self._add_parameter(probs, 'probs')
        if self._probs is not None:
            check_prob(self.probs)

        # ops needed for the class
        self.exp = exp_generic
        self.log = log_generic
        self.squeeze = P.Squeeze(0)
        self.cast = P.Cast()
        self.const = P.ScalarToArray()
        self.floor = P.Floor()
        self.fill = P.Fill()
        self.less = P.Less()
        self.shape = P.Shape()
        self.select = P.Select()
        self.uniform = C.uniform
示例#5
0
    def __init__(self):
        super(IGamma, self).__init__()
        # const numbers
        # If more data types are supported, this float max value need to be selected.
        self.log_maxfloat32 = Tensor(np.log(np.finfo(np.float32).max),
                                     mstype.float32)

        # operations
        self.logicaland = P.LogicalAnd()
        self.logicalor = P.LogicalOr()
        self.logicalnot = P.LogicalNot()
        self.equal = P.Equal()
        self.greater = P.Greater()
        self.less = P.Less()
        self.neg = P.Neg()
        self.log = P.Log()
        self.exp = P.Exp()
        self.select = P.Select()
        self.zeroslike = P.ZerosLike()
        self.fill = P.Fill()
        self.shape = P.Shape()
        self.dtype = P.DType()
        self.lgamma = LGamma()
        self.const = P.ScalarToArray()
        self.cast = P.Cast()
示例#6
0
    def __init__(self,
                 mean=None,
                 sd=None,
                 seed=None,
                 dtype=mstype.float32,
                 name="Normal"):
        """
        Constructor of Normal.
        """
        param = dict(locals())
        param['param_dict'] = {'mean': mean, 'sd': sd}
        valid_dtype = mstype.float_type
        Validator.check_type(type(self).__name__, dtype, valid_dtype)
        super(Normal, self).__init__(seed, dtype, name, param)

        self._mean_value = self._add_parameter(mean, 'mean')
        self._sd_value = self._add_parameter(sd, 'sd')
        if self._sd_value is not None:
            check_greater_zero(self._sd_value, "Standard deviation")

        # ops needed for the class
        self.exp = exp_generic
        self.expm1 = expm1_generic
        self.log = log_generic
        self.erf = P.Erf()
        self.squeeze = P.Squeeze(0)
        self.cast = P.Cast()
        self.const = P.ScalarToArray()
        self.shape = P.Shape()
        self.sq = P.Square()
        self.sqrt = P.Sqrt()
示例#7
0
    def __init__(self,
                 probs=None,
                 seed=0,
                 dtype=mstype.int32,
                 name="Bernoulli"):
        """
        Constructor of Bernoulli distribution.
        """
        param = dict(locals())
        super(Bernoulli, self).__init__(dtype, name, param)
        if probs is not None:
            self._probs = cast_to_tensor(probs, dtype=mstype.float32)
            check_prob(self.probs)
        else:
            self._probs = probs
        self.seed = seed

        # ops needed for the class
        self.cast = P.Cast()
        self.const = P.ScalarToArray()
        self.dtypeop = P.DType()
        self.erf = P.Erf()
        self.fill = P.Fill()
        self.log = P.Log()
        self.less = P.Less()
        self.shape = P.Shape()
        self.select = P.Select()
        self.sq = P.Square()
        self.sqrt = P.Sqrt()
        self.uniform = P.UniformReal(seed=seed)
示例#8
0
    def __init__(self,
                 loc=None,
                 scale=None,
                 seed=0,
                 dtype=mstype.float32,
                 name="LogNormal"):
        """
        Constructor of LogNormal distribution.
        """
        super(LogNormal, self).__init__(distribution=msd.Normal(loc, scale, dtype=dtype),
                                        bijector=msb.Exp(),
                                        seed=seed, name=name)

        self.log_2pi = np.log(2 * np.pi)

        #ops needed for the class
        self.exp = exp_generic
        self.expm1 = expm1_generic
        self.log = log_generic
        self.const = P.ScalarToArray()
        self.erf = P.Erf()
        self.fill = P.Fill()
        self.shape = P.Shape()
        self.sq = P.Square()
        self.sqrt = P.Sqrt()
        self.zeroslike = P.ZerosLike()
示例#9
0
    def __init__(self,
                 mean=None,
                 sd=None,
                 seed=0,
                 dtype=mstype.float32,
                 name="Normal"):
        """
        Constructor of normal distribution.
        """
        param = dict(locals())
        super(Normal, self).__init__(dtype, name, param)
        if mean is not None and sd is not None:
            self._mean_value = convert_to_batch(mean, self._broadcast_shape,
                                                dtype)
            self._sd_value = convert_to_batch(sd, self._broadcast_shape, dtype)
            check_greater_equal_zero(self._sd_value, "Standard deviation")
        else:
            self._mean_value = mean
            self._sd_value = sd

        #ops needed for the class
        self.exp = P.Exp()
        self.add = P.TensorAdd()
        self.mul = P.Mul()
        self.sq = P.Square()
        self.log = P.Log()
        self.sqrt = P.Sqrt()
        self.realdiv = P.RealDiv()
        self.expm1 = P.Expm1() if get_context(
            'device_target') == 'Ascend' else self._expm1_by_step
        self.normal = P.Normal(seed=seed)
        self.shape = P.Shape()
        self.zeroslike = P.ZerosLike()
        self.const = P.ScalarToArray()
示例#10
0
    def __init__(self,
                 low=None,
                 high=None,
                 seed=0,
                 dtype=mstype.float32,
                 name="Uniform"):
        """
        Constructor of Uniform distribution.
        """
        param = dict(locals())
        super(Uniform, self).__init__(dtype, name, param)
        if low is not None and high is not None:
            self._low = convert_to_batch(low, self._broadcast_shape, dtype)
            self._high = convert_to_batch(high, self._broadcast_shape, dtype)
            check_greater(self.low, self.high, "low value", "high value")
        else:
            self._low = low
            self._high = high

    # ops needed for the class
        self.const = P.ScalarToArray()
        self.dtypeop = P.DType()
        self.exp = P.Exp()
        self.fill = P.Fill()
        self.less = P.Less()
        self.lessequal = P.LessEqual()
        self.log = P.Log()
        self.logicaland = P.LogicalAnd()
        self.select = P.Select()
        self.shape = P.Shape()
        self.sq = P.Square()
        self.sqrt = P.Sqrt()
        self.uniform = P.UniformReal(seed=seed)
        self.zeroslike = P.ZerosLike()
示例#11
0
    def __init__(self,
                 rate=None,
                 seed=None,
                 dtype=mstype.float32,
                 name="Exponential"):
        """
        Constructor of Exponential.
        """
        param = dict(locals())
        param['param_dict'] = {'rate': rate}
        valid_dtype = mstype.float_type
        Validator.check_type_name("dtype", dtype, valid_dtype,
                                  type(self).__name__)
        super(Exponential, self).__init__(seed, dtype, name, param)

        self._rate = self._add_parameter(rate, 'rate')
        if self.rate is not None:
            check_greater_zero(self.rate, 'rate')

        self.minval = np.finfo(np.float).tiny

        # ops needed for the class
        self.exp = exp_generic
        self.log = log_generic
        self.squeeze = P.Squeeze(0)
        self.cast = P.Cast()
        self.const = P.ScalarToArray()
        self.dtypeop = P.DType()
        self.fill = P.Fill()
        self.less = P.Less()
        self.select = P.Select()
        self.shape = P.Shape()
        self.uniform = C.uniform
示例#12
0
    def __init__(self,
                 probs=None,
                 seed=0,
                 dtype=mstype.int32,
                 name="Geometric"):
        """
        Constructor of Geometric distribution.
        """
        param = dict(locals())
        super(Geometric, self).__init__(dtype, name, param)
        if probs is not None:
            self._probs = cast_to_tensor(probs, dtype=mstype.float32)
            check_prob(self._probs)
        else:
            self._probs = probs

        self.minval = np.finfo(np.float).tiny

        # ops needed for the class
        self.const = P.ScalarToArray()
        self.dtypeop = P.DType()
        self.fill = P.Fill()
        self.floor = P.Floor()
        self.issubclass = P.IsSubClass()
        self.less = P.Less()
        self.log = P.Log()
        self.pow = P.Pow()
        self.select = P.Select()
        self.shape = P.Shape()
        self.sq = P.Square()
        self.sqrt = P.Sqrt()
        self.uniform = P.UniformReal(seed=seed)
示例#13
0
    def __init__(self,
                 rate=None,
                 seed=0,
                 dtype=mstype.float32,
                 name="Exponential"):
        """
        Constructor of Exponential distribution.
        """
        param = dict(locals())
        super(Exponential, self).__init__(dtype, name, param)
        if rate is not None:
            self._rate = cast_to_tensor(rate, mstype.float32)
            check_greater_zero(self._rate, "rate")
        else:
            self._rate = rate

        self.minval = np.finfo(np.float).tiny

        # ops needed for the class
        self.const = P.ScalarToArray()
        self.dtypeop = P.DType()
        self.exp = P.Exp()
        self.fill = P.Fill()
        self.less = P.Less()
        self.log = P.Log()
        self.select = P.Select()
        self.shape = P.Shape()
        self.sqrt = P.Sqrt()
        self.sq = P.Square()
        self.uniform = P.UniformReal(seed=seed)
示例#14
0
    def __init__(self,
                 loc=None,
                 scale=None,
                 seed=0,
                 dtype=mstype.float32,
                 name="LogNormal"):
        """
        Constructor of LogNormal distribution.
        """
        super(LogNormal, self).__init__(distribution=msd.Normal(loc, scale, dtype=dtype),
                                        bijector=msb.Exp(),
                                        seed=seed, name=name)

        # overwrite default_parameters and parameter_names
        self._reset_parameters()
        self._loc = self._add_parameter(loc, 'loc')
        self._scale = self._add_parameter(scale, 'scale')

        self.log_2pi = np.log(2 * np.pi)

        #ops needed for the class
        self.dtypeop = P.DType()
        self.exp = exp_generic
        self.expm1 = P.Expm1()
        self.log = log_generic
        self.const = P.ScalarToArray()
        self.erf = P.Erf()
        self.fill = P.Fill()
        self.greater = P.Greater()
        self.select = P.Select()
        self.shape = P.Shape()
        self.sq = P.Square()
        self.sqrt = P.Sqrt()
        self.cast = P.Cast()
        self.squeeze = P.Squeeze(0)
示例#15
0
    def __init__(self,
                 probs=None,
                 seed=0,
                 dtype=mstype.int32,
                 name="Bernoulli"):
        """
        Constructor of Bernoulli distribution.
        """
        param = dict(locals())
        valid_dtype = mstype.int_type + mstype.uint_type + mstype.float_type
        check_type(dtype, valid_dtype, type(self).__name__)
        super(Bernoulli, self).__init__(seed, dtype, name, param)
        self.parameter_type = mstype.float32
        if probs is not None:
            self._probs = cast_to_tensor(probs, mstype.float32)
            check_prob(self.probs)
        else:
            self._probs = probs

        # ops needed for the class
        self.exp = exp_generic
        self.log = log_generic
        self.erf = erf_generic
        self.squeeze = P.Squeeze(0)
        self.cast = P.Cast()
        self.const = P.ScalarToArray()
        self.dtypeop = P.DType()
        self.floor = P.Floor()
        self.fill = P.Fill()
        self.less = P.Less()
        self.shape = P.Shape()
        self.select = P.Select()
        self.sq = P.Square()
        self.sqrt = P.Sqrt()
        self.uniform = C.uniform
示例#16
0
文件: cvae.py 项目: tilaba/mindspore
 def __init__(self, encoder, decoder, hidden_size, latent_size,
              num_classes):
     super(ConditionalVAE, self).__init__()
     self.encoder = encoder
     self.decoder = decoder
     if (not isinstance(encoder, Cell)) or (not isinstance(decoder, Cell)):
         raise TypeError('The encoder and decoder should be Cell type.')
     self.hidden_size = check_int_positive(hidden_size)
     self.latent_size = check_int_positive(latent_size)
     if hidden_size < latent_size:
         raise ValueError(
             'The latent_size should be less than or equal to the hidden_size.'
         )
     self.num_classes = check_int_positive(num_classes)
     self.normal = C.normal
     self.exp = P.Exp()
     self.reshape = P.Reshape()
     self.shape = P.Shape()
     self.concat = P.Concat(axis=1)
     self.to_tensor = P.ScalarToArray()
     self.one_hot = OneHot(depth=num_classes)
     self.dense1 = Dense(self.hidden_size, self.latent_size)
     self.dense2 = Dense(self.hidden_size, self.latent_size)
     self.dense3 = Dense(self.latent_size + self.num_classes,
                         self.hidden_size)
示例#17
0
    def __init__(self,
                 rate=None,
                 seed=0,
                 dtype=mstype.float32,
                 name="Exponential"):
        """
        Constructor of Exponential distribution.
        """
        param = dict(locals())
        valid_dtype = mstype.float_type
        check_type(dtype, valid_dtype, type(self).__name__)
        super(Exponential, self).__init__(seed, dtype, name, param)
        self.parameter_type = dtype
        if rate is not None:
            self._rate = cast_to_tensor(rate, self.parameter_type)
            check_greater_zero(self._rate, "rate")
        else:
            self._rate = rate

        self.minval = np.finfo(np.float).tiny

        # ops needed for the class
        self.exp = exp_generic
        self.log = log_generic
        self.squeeze = P.Squeeze(0)
        self.cast = P.Cast()
        self.const = P.ScalarToArray()
        self.dtypeop = P.DType()
        self.fill = P.Fill()
        self.less = P.Less()
        self.select = P.Select()
        self.shape = P.Shape()
        self.sqrt = P.Sqrt()
        self.sq = P.Square()
        self.uniform = C.uniform
示例#18
0
    def __init__(self,
                 mean=None,
                 sd=None,
                 seed=0,
                 dtype=mstype.float32,
                 name="Normal"):
        """
        Constructor of normal distribution.
        """
        param = dict(locals())
        valid_dtype = mstype.float_type
        check_type(dtype, valid_dtype, type(self).__name__)
        super(Normal, self).__init__(seed, dtype, name, param)
        self.parameter_type = dtype
        if  mean is not None and sd is not None:
            self._mean_value = cast_to_tensor(mean, self.parameter_type)
            self._sd_value = cast_to_tensor(sd, self.parameter_type)
            check_greater_zero(self._sd_value, "Standard deviation")
        else:
            self._mean_value = mean
            self._sd_value = sd

        #ops needed for the class
        self.exp = exp_generic
        self.expm1 = expm1_generic
        self.log = log_generic
        self.erf = erf_generic
        self.squeeze = P.Squeeze(0)
        self.cast = P.Cast()
        self.const = P.ScalarToArray()
        self.fill = P.Fill()
        self.shape = P.Shape()
        self.sq = P.Square()
        self.sqrt = P.Sqrt()
        self.zeroslike = P.ZerosLike()
示例#19
0
 def __init__(self):
     super(VaeGan, self).__init__()
     self.E = Encoder()
     self.G = Decoder()
     self.D = Discriminator()
     self.dense = nn.Dense(20, 400)
     self.vae = VAE(self.E, self.G, 400, 20)
     self.shape = P.Shape()
     self.to_tensor = P.ScalarToArray()
示例#20
0
    def construct(self, rois, feat1, feat2, feat3, feat4):
        feats = (feat1, feat2, feat3, feat4)
        res = self.res_
        target_lvls = self._c_map_roi_levels(rois)
        for i in range(self.num_levels):
            mask = self.equal(target_lvls, P.ScalarToArray()(i))
            mask = P.Reshape()(mask, (-1, 1, 1, 1))
            roi_feats_t = self.roi_layers[i](feats[i], rois)
            mask = self.cast(P.Tile()(self.cast(mask, mstype.int32), (1, 256, self.out_size, self.out_size)),
                             mstype.bool_)
            res = self.select(mask, roi_feats_t, res)

        return res
 def __init__(self, task):
     super(AleatoricLoss, self).__init__()
     self.task = task
     if self.task == 'classification':
         self.sum = P.ReduceSum()
         self.exp = P.Exp()
         self.normal = C.normal
         self.to_tensor = P.ScalarToArray()
         self.entropy = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
     else:
         self.mean = P.ReduceMean()
         self.exp = P.Exp()
         self.pow = P.Pow()
示例#22
0
 def __init__(self, encoder, decoder, hidden_size, latent_size):
     super(VAE, self).__init__()
     self.encoder = encoder
     self.decoder = decoder
     self.hidden_size = check_int_positive(hidden_size)
     self.latent_size = check_int_positive(latent_size)
     self.normal = C.normal
     self.exp = P.Exp()
     self.reshape = P.Reshape()
     self.shape = P.Shape()
     self.to_tensor = P.ScalarToArray()
     self.dense1 = Dense(self.hidden_size, self.latent_size)
     self.dense2 = Dense(self.hidden_size, self.latent_size)
     self.dense3 = Dense(self.latent_size, self.hidden_size)
示例#23
0
    def __init__(self,
                 low=None,
                 high=None,
                 seed=None,
                 dtype=mstype.float32,
                 name="Uniform"):
        """
        Constructor of Uniform distribution.
        """
        param = dict(locals())
        valid_dtype = mstype.float_type
        check_type(dtype, valid_dtype, type(self).__name__)
        super(Uniform, self).__init__(seed, dtype, name, param)
        self.parameter_type = set_param_type({
            'low': low,
            'high': high
        }, self.dtype)
        if low is not None and high is not None:
            self._low = cast_to_tensor(low, self.parameter_type)
            self._high = cast_to_tensor(high, self.parameter_type)
            check_greater(self.low, self.high, "low value", "high value")
        else:
            self._low = low if low is None else cast_to_tensor(
                low, self.parameter_type)
            self._high = high if high is None else cast_to_tensor(
                high, self.parameter_type)

        self.default_parameters = [self.low, self.high]
        self.parameter_names = ['low', 'high']

        # ops needed for the class
        self.exp = exp_generic
        self.log = log_generic
        self.squeeze = P.Squeeze(0)
        self.cast = P.Cast()
        self.const = P.ScalarToArray()
        self.dtypeop = P.DType()
        self.fill = P.Fill()
        self.less = P.Less()
        self.lessequal = P.LessEqual()
        self.logicaland = P.LogicalAnd()
        self.select = P.Select()
        self.shape = P.Shape()
        self.sq = P.Square()
        self.sqrt = P.Sqrt()
        self.zeroslike = P.ZerosLike()
        self.uniform = C.uniform

        self.sametypeshape = P.SameTypeShape()
示例#24
0
    def __init__(self,
                 loc=None,
                 scale=None,
                 seed=None,
                 dtype=mstype.float32,
                 name="Logistic"):
        """
        Constructor of Logistic.
        """
        param = dict(locals())
        param['param_dict'] = {'loc': loc, 'scale': scale}
        valid_dtype = mstype.float_type
        Validator.check_type_name("dtype", dtype, valid_dtype,
                                  type(self).__name__)
        super(Logistic, self).__init__(seed, dtype, name, param)

        self._loc = self._add_parameter(loc, 'loc')
        self._scale = self._add_parameter(scale, 'scale')
        if self._scale is not None:
            check_greater_zero(self._scale, "scale")

        # ops needed for the class
        self.cast = P.Cast()
        self.const = P.ScalarToArray()
        self.consttensor = P.ScalarToTensor()
        self.dtypeop = P.DType()
        self.exp = exp_generic
        self.expm1 = P.Expm1()
        self.fill = P.Fill()
        self.less = P.Less()
        self.log = log_generic
        self.log1p = P.Log1p()
        self.logicalor = P.LogicalOr()
        self.erf = P.Erf()
        self.greater = P.Greater()
        self.sigmoid = P.Sigmoid()
        self.squeeze = P.Squeeze(0)
        self.select = P.Select()
        self.shape = P.Shape()
        self.softplus = self._softplus
        self.sqrt = P.Sqrt()
        self.uniform = C.uniform

        self.threshold = np.log(np.finfo(np.float32).eps) + 1.
        self.tiny = np.finfo(np.float).tiny
        self.sd_const = np.pi / np.sqrt(3)
示例#25
0
 def __init__(self, encoder, decoder, hidden_size, latent_size):
     super(VAE, self).__init__()
     self.encoder = encoder
     self.decoder = decoder
     if (not isinstance(encoder, Cell)) or (not isinstance(decoder, Cell)):
         raise TypeError('The encoder and decoder should be Cell type.')
     self.hidden_size = Validator.check_positive_int(hidden_size)
     self.latent_size = Validator.check_positive_int(latent_size)
     if hidden_size < latent_size:
         raise ValueError('The latent_size should be less than or equal to the hidden_size.')
     self.normal = C.normal
     self.exp = P.Exp()
     self.reshape = P.Reshape()
     self.shape = P.Shape()
     self.to_tensor = P.ScalarToArray()
     self.dense1 = Dense(self.hidden_size, self.latent_size)
     self.dense2 = Dense(self.hidden_size, self.latent_size)
     self.dense3 = Dense(self.latent_size, self.hidden_size)
示例#26
0
 def __init__(self, encoder, decoder, hidden_size, latent_size,
              num_classes):
     super(ConditionalVAE, self).__init__()
     self.encoder = encoder
     self.decoder = decoder
     self.hidden_size = check_int_positive(hidden_size)
     self.latent_size = check_int_positive(latent_size)
     self.num_classes = check_int_positive(num_classes)
     self.normal = C.normal
     self.exp = P.Exp()
     self.reshape = P.Reshape()
     self.shape = P.Shape()
     self.concat = P.Concat(axis=1)
     self.to_tensor = P.ScalarToArray()
     self.one_hot = OneHot(depth=num_classes)
     self.dense1 = Dense(self.hidden_size, self.latent_size)
     self.dense2 = Dense(self.hidden_size, self.latent_size)
     self.dense3 = Dense(self.latent_size + self.num_classes,
                         self.hidden_size)
示例#27
0
    def __init__(self,
                 mean=None,
                 sd=None,
                 seed=None,
                 dtype=mstype.float32,
                 name="Normal"):
        """
        Constructor of Normal.
        """
        param = dict(locals())
        valid_dtype = mstype.float_type
        check_type(dtype, valid_dtype, type(self).__name__)
        super(Normal, self).__init__(seed, dtype, name, param)
        self.parameter_type = set_param_type(
            {'mean': mean, 'sd': sd}, self.dtype)
        if mean is not None and sd is not None:
            self._mean_value = cast_to_tensor(mean, self.parameter_type)
            self._sd_value = cast_to_tensor(sd, self.parameter_type)
            check_greater_zero(self._sd_value, "Standard deviation")
        else:
            self._mean_value = mean if mean is None else cast_to_tensor(
                mean, self.parameter_type)
            self._sd_value = sd if sd is None else cast_to_tensor(
                sd, self.parameter_type)

        self.default_parameters = [self._mean_value, self._sd_value]
        self.parameter_names = ['mean', 'sd']

        # ops needed for the class
        self.exp = exp_generic
        self.expm1 = expm1_generic
        self.log = log_generic
        self.erf = P.Erf()
        self.squeeze = P.Squeeze(0)
        self.cast = P.Cast()
        self.const = P.ScalarToArray()
        self.fill = P.Fill()
        self.shape = P.Shape()
        self.sq = P.Square()
        self.sqrt = P.Sqrt()
        self.zeroslike = P.ZerosLike()
        self.dtypeop = P.DType()
        self.sametypeshape = P.SameTypeShape()
示例#28
0
    def __init__(self,
                 probs=None,
                 seed=None,
                 dtype=mstype.int32,
                 name="Geometric"):
        """
        Constructor of Geometric distribution.
        """
        param = dict(locals())
        valid_dtype = mstype.int_type + mstype.uint_type + mstype.float_type
        check_type(dtype, valid_dtype, type(self).__name__)
        super(Geometric, self).__init__(seed, dtype, name, param)
        self.parameter_type = set_param_type({'probs1': probs}, mstype.float32)
        if probs is not None:
            self._probs = cast_to_tensor(probs, self.parameter_type)
            check_prob(self._probs)
        else:
            self._probs = probs

        self.default_parameters = [self.probs]
        self.parameter_names = ['probs1']

        self.minval = np.finfo(np.float).tiny

        # ops needed for the class
        self.exp = exp_generic
        self.log = log_generic
        self.squeeze = P.Squeeze(0)
        self.cast = P.Cast()
        self.const = P.ScalarToArray()
        self.dtypeop = P.DType()
        self.fill = P.Fill()
        self.floor = P.Floor()
        self.issubclass = P.IsSubClass()
        self.less = P.Less()
        self.pow = P.Pow()
        self.select = P.Select()
        self.shape = P.Shape()
        self.sq = P.Square()
        self.sqrt = P.Sqrt()
        self.uniform = C.uniform
示例#29
0
    def __init__(self,
                 loc=None,
                 scale=None,
                 seed=None,
                 dtype=mstype.float32,
                 name="Cauchy"):
        """
        Constructor of Cauchy.
        """
        param = dict(locals())
        param['param_dict'] = {'loc': loc, 'scale': scale}
        valid_dtype = mstype.float_type
        Validator.check_type_name("dtype", dtype, valid_dtype,
                                  type(self).__name__)
        super(Cauchy, self).__init__(seed, dtype, name, param)

        self._loc = self._add_parameter(loc, 'loc')
        self._scale = self._add_parameter(scale, 'scale')
        if self._scale is not None:
            check_greater_zero(self._scale, "scale")

        # ops needed for the class
        self.atan = P.Atan()
        self.cast = P.Cast()
        self.const = P.ScalarToArray()
        self.dtypeop = P.DType()
        self.exp = exp_generic
        self.fill = P.Fill()
        self.less = P.Less()
        self.log = log_generic
        self.log1p = log1p_generic
        self.squeeze = P.Squeeze(0)
        self.shape = P.Shape()
        self.sq = P.Square()
        self.sqrt = P.Sqrt()
        self.tan = P.Tan()
        self.uniform = C.uniform

        self.entropy_const = np.log(4 * np.pi)
示例#30
0
    def __init__(self,
                 probs=None,
                 seed=None,
                 dtype=mstype.int32,
                 name="Geometric"):
        """
        Constructor of Geometric distribution.
        """
        param = dict(locals())
        param['param_dict'] = {'probs': probs}
        valid_dtype = mstype.int_type + mstype.uint_type + mstype.float_type
        Validator.check_type_name("dtype", dtype, valid_dtype,
                                  type(self).__name__)
        super(Geometric, self).__init__(seed, dtype, name, param)

        self._probs = self._add_parameter(probs, 'probs')
        if self._probs is not None:
            check_prob(self.probs)

        self.minval = np.finfo(np.float).tiny

        # ops needed for the class
        self.exp = exp_generic
        self.log = log_generic
        self.squeeze = P.Squeeze(0)
        self.cast = P.Cast()
        self.const = P.ScalarToArray()
        self.dtypeop = P.DType()
        self.fill = P.Fill()
        self.floor = P.Floor()
        self.issubclass = P.IsSubClass()
        self.less = P.Less()
        self.pow = P.Pow()
        self.select = P.Select()
        self.shape = P.Shape()
        self.sq = P.Square()
        self.uniform = C.uniform