def test_arguments(): """ args passing during initialization. """ n = msd.Normal() assert isinstance(n, msd.Distribution) n = msd.Normal([3.0], [4.0], dtype=dtype.float32) assert isinstance(n, msd.Distribution)
def __init__(self, shape, seed=0): super(Sampling, self).__init__() self.n = msd.Normal(np.array([3.0]), np.array([[2.0], [4.0]]), seed=seed, dtype=dtype.float32) self.shape = shape
def __init__(self, loc=None, scale=None, seed=0, dtype=mstype.float32, name="LogNormal"): """ Constructor of LogNormal distribution. """ super(LogNormal, self).__init__(distribution=msd.Normal(loc, scale, dtype=dtype), bijector=msb.Exp(), seed=seed, name=name) # overwrite default_parameters and parameter_names self._reset_parameters() self._loc = self._add_parameter(loc, 'loc') self._scale = self._add_parameter(scale, 'scale') self.log_2pi = np.log(2 * np.pi) #ops needed for the class self.dtypeop = P.DType() self.exp = exp_generic self.expm1 = P.Expm1() self.log = log_generic self.const = P.ScalarToArray() self.erf = P.Erf() self.fill = P.Fill() self.greater = P.Greater() self.select = P.Select() self.shape = P.Shape() self.sq = P.Square() self.sqrt = P.Sqrt() self.cast = P.Cast() self.squeeze = P.Squeeze(0)
def __init__(self, loc=None, scale=None, seed=0, dtype=mstype.float32, name="LogNormal"): """ Constructor of LogNormal distribution. """ super(LogNormal, self).__init__(distribution=msd.Normal(loc, scale, dtype=dtype), bijector=msb.Exp(), seed=seed, name=name) self.log_2pi = np.log(2 * np.pi) #ops needed for the class self.exp = exp_generic self.expm1 = expm1_generic self.log = log_generic self.const = P.ScalarToArray() self.erf = P.Erf() self.fill = P.Fill() self.shape = P.Shape() self.sq = P.Square() self.sqrt = P.Sqrt() self.zeroslike = P.ZerosLike()
def __init__(self, input_dim, hidden_num, hidden_dim, output_dim, mu, lamb, nonlinear="leaky-relu", norm_prod='paths', square_prod=False): super(BaseModel, self).__init__() self.input_dim = input_dim self.hidden_num = hidden_num self.hidden_dim = hidden_dim self.output_dim = output_dim self.mu = mu self.lamb = lamb self.nonlinear = nonlinear self.norm_prod = norm_prod self.square_prod = square_prod self.normal = msd.Normal(dtype=mstype.float32) self.extra_params = [] # initialize current adjacency matrix self.adjacency = msnp.ones( (self.input_dim, self.input_dim), dtype=mstype.float32) - msnp.eye( self.input_dim, dtype=mstype.float32) # Generate layer_list layer_list = [self.hidden_dim] * self.hidden_num layer_list.insert(0, self.input_dim) layer_list.append(self.output_dim) # Instantiate the parameters of each layer in the model of each variable tmp_weights = list() tmp_biases = list() for i, item in enumerate(layer_list[:-1]): in_dim = item out_dim = layer_list[i + 1] tmp_weights.append( Parameter(msnp.zeros((self.input_dim, out_dim, in_dim), dtype=mstype.float32), requires_grad=True, name='w' + str(i))) tmp_biases.append( Parameter(msnp.zeros((self.input_dim, out_dim), dtype=mstype.float32), requires_grad=True, name='b' + str(i))) self.weights = ParameterTuple(tmp_weights) self.biases = ParameterTuple(tmp_biases) # reset initialization parameters self.reset_params()
def test_normal_shape_errpr(): """ Invalid shapes. """ with pytest.raises(ValueError): msd.Normal([[2.], [1.]], [[2.], [3.], [4.]], dtype=dtype.float32)
def __init__(self): super(NormalConstruct, self).__init__() self.normal = msd.Normal(3.0, 4.0) self.normal1 = msd.Normal()
def __init__(self): super(NormalBasics, self).__init__() self.n = msd.Normal(3.0, 4.0, dtype=dtype.float32)
def test_seed(): with pytest.raises(TypeError): msd.Normal(0., 1., seed='seed')
def test_name(): with pytest.raises(TypeError): msd.Normal(0., 1., name=1.0)
def test_type(): with pytest.raises(TypeError): msd.Normal(0., 1., dtype=dtype.int32)
def __init__(self): super().__init__() self.normal_dist = msd.Normal(dtype=mstype.float32) self.bernoulli_dist = msd.Bernoulli(dtype=mstype.float32) self.reduce_sum = P.ReduceSum(keep_dims=True)
def __init__(self): super(KL, self).__init__() self.n = msd.Normal(np.array([3.0]), np.array([4.0]), dtype=dtype.float32)
def __init__(self): super(LogProb, self).__init__() self.n = msd.Normal(np.array([3.0]), np.array([[2.0], [4.0]]), dtype=dtype.float32)
def __init__(self): super(Net, self).__init__() self.normal = msd.Normal(0., 1., dtype=dtype.float32)
def __init__(self): super(NormalProb, self).__init__() self.normal = msd.Normal(3.0, 4.0, dtype=dtype.float32)
def __init__(self): super(NormalProb1, self).__init__() self.normal = msd.Normal()
def test_sd(): with pytest.raises(ValueError): msd.Normal(0., 0.) with pytest.raises(ValueError): msd.Normal(0., -1.)
def __init__(self, shape, seed=0): super(Sampling, self).__init__() self.n1 = msd.Normal(0, 1, seed=seed, dtype=dtype.float32) self.shape = shape
def __init__(self): super(Net1, self).__init__() self.normal = msd.Normal(dtype=dtype.float32) self.normal1 = msd.Normal(0.0, 1.0, dtype=dtype.float32) self.normal2 = msd.Normal(3.0, 4.0, dtype=dtype.float32)
def __init__(self): super(NormalCrossEntropy, self).__init__() self.n1 = msd.Normal(np.array([3.0]), np.array([4.0]), dtype=dtype.float32) self.n2 = msd.Normal(dtype=dtype.float32)
def __init__(self): super(Basics, self).__init__() self.n = msd.Normal(np.array([3.0]), np.array([2.0, 4.0]), dtype=dtype.float32)