示例#1
0
 def ppf(
     self,
     pits: Union[Sequence[float], ArrayLike1D],
     parameters: Optional[Union[Sequence[float], ArrayLike1D]] = None,
 ) -> NDArray:
     parameters = self._check_constraints(parameters)
     pits = asarray(pits)
     nu = parameters[0]
     var = stats.gennorm(nu).var()
     return stats.gennorm(nu, scale=1.0 / sqrt(var)).ppf(pits)
示例#2
0
 def cdf(
     self,
     resids: ArrayLike,
     parameters: Optional[Union[Sequence[float], ArrayLike1D]] = None,
 ) -> NDArray:
     parameters = self._check_constraints(parameters)
     nu = parameters[0]
     var = stats.gennorm(nu).var()
     resids = asarray(resids)
     return stats.gennorm(nu, scale=1.0 / sqrt(var)).cdf(resids)
示例#3
0
def dist_test(log, conf_level = 0.95, dist = 'normal'):
    # test for normal
    if dist == 'normal':
        rv = norm(loc = np.mean(log), scale = np.std(log, ddof = 1))
        test = kstest(log, rv.cdf)
        D_crit = 1.3581/np.sqrt(len(log))
        # test result
        if test[0] > D_crit or test[1] < 1 - conf_level:
            result = False
        else:
            result = True
        return result
    # test for ged
    elif dist == 'ged':
        beta = 1.3
        log2 = [abs(x)**beta for x in log]
        scale = (np.mean(log2)*beta)**(1/beta)
        rv = gennorm(beta = beta, scale = scale)
        D_crit = 1.3581/np.sqrt(len(log))
        #test result
        if test[0] > D_crit or test[1] < 1 - conf_level:
            result = False
        else:
            result = True
        return result
    else:
        return None
示例#4
0
    def ind_risk(self, method = 'norm', start = year_1, end = today, conf_level = 0.95, beta = 1.3, expo = 0.94):
        """
        Common methods include: 
            - "norm": Normal (Guassian) with 1 year period, 60 days periods, 30 days periods
            - "student": T of Student with 60 days periods, 30 days periods
            - Generalized Error with 1 year period, 60 days periods, 30 days periods
                Inform BETA
            - EWMA with lambda = 0.94 or lambda = 0.97
                Inform factor
            - Level of Confidence = 0.95 or 0.99
        """
        if method == 'norm':
            df = self.data
            df = df[df.index >= start]
            df = df[df.index <= end]
            R =  df['return'].mean()
            S = np.std(df['return'], ddof = 1)
            rv = norm(loc = R, scale = S)
            var = rv.ppf(1 - conf_level)
            return var

        elif method == 'student':
            df = self.data
            df = df[df.index >= start]
            df = df[df.index <= end]
            R = df['return'].mean()
            S = np.std(df['return'], ddof = 1)
            rv = t(loc = R, scale = S, df = df.shape[0] - 1)
            var = rv.ppf(1 - conf_level)
            return var

        elif method == 'ged':
            df = self.data
            df = df[df.index >= start]
            df = df[df.index <= end]
            log = df['return']
            log2 = [abs(x)**beta for x in log]
            scale = (np.mean(log2)*beta)**(1/beta)
            rv = gennorm(beta = beta, scale = scale)
            var = rv.ppf(1 - conf_level)
            return var

        elif method == 'ewma':
            df = self.data
            df = df[df.index <= end]
            log = df['return']
            log = pd.DataFrame(log)
            index = list(log.index)
            log['factor'] = [len(index) - index.index(d) - 1 for d in log.index]
            log['weight'] = (1-expo)*(expo**log['factor'])
            log['ewma_return'] = log['return']*log['weight']
            log['ewma_std'] = (log['return']**2)*log['weight']
            R = log['ewma_return'].sum()
            S = np.sqrt(log['ewma_std'].sum())
            rv = norm(loc = R, scale = S)
            var = rv.ppf(1 - conf_level)
            return var
        else:
            print('Undefined Method')
            return None
示例#5
0
    def testGeneralizedNormalLogCDF(self):
        if self.dtype is np.float32:
            self.skipTest('32-bit precision not sufficient for LogCDF')
        batch_size = 50
        mu = self._rng.randn(batch_size)
        sigma = self._rng.rand(batch_size) + 1.
        power = self._rng.rand(batch_size) + 1.
        x = np.linspace(-100., 10., batch_size).astype(np.float64)

        gnormal = tfd.GeneralizedNormal(loc=self.make_input(mu),
                                        scale=self.make_input(sigma),
                                        power=self.make_input(power),
                                        validate_args=True)

        cdf = gnormal.log_cdf(x)
        self.assertAllEqual(self.evaluate(gnormal.batch_shape_tensor()),
                            cdf.shape)
        self.assertAllEqual(self.evaluate(gnormal.batch_shape_tensor()),
                            self.evaluate(cdf).shape)
        self.assertAllEqual(gnormal.batch_shape, cdf.shape)
        self.assertAllEqual(gnormal.batch_shape, self.evaluate(cdf).shape)
        expected_cdf = sp_stats.gennorm(power, loc=mu, scale=sigma).logcdf(x)
        self.assertAllClose(expected_cdf,
                            self.evaluate(cdf),
                            atol=0,
                            rtol=1e-3)
示例#6
0
def test_rbf():
    key1, key2 = random.split(random.PRNGKey(0), 2)
    x = np.arange(8).reshape(-1, 1)
    tests = []
    for ls in np.linspace(0.9, 2., 5):
        gauss_ref = norm(loc=0., scale=ls)
        lapl_ref = laplace(loc=0., scale=ls)

        tests.append(
            (f"Gauss_sc_{ls}", GenGaussKernel.make_gauss(length_scale=ls),
             gauss_ref.var(), gauss_ref.pdf(x)))
        tests.append(
            (f"Lapl_sc_{ls}", GenGaussKernel.make_laplace(length_scale=ls),
             lapl_ref.var(), lapl_ref.pdf(x)))
        for shape in np.linspace(0.9, 2., 5):
            gg_ref = gennorm(beta=shape, scale=ls)
            tests.append((f"GG__sh_{shape}__sc_{ls}",
                          GenGaussKernel.make(shape=shape, length_scale=ls),
                          gg_ref.var(), gg_ref.pdf(x)))

    for (n, k, v, pdf) in tests:
        #print(n)
        assert np.abs(v - k.var()) < 1e-4, f"{n}: {v} != {k.var()}"
        g = k(x)
        assert np.allclose(pdf.squeeze(), g[0, :], atol=1e-1)
示例#7
0
文件: MNIST.py 项目: srxzr/DPtorch
def adjust_noise(optimizer, epoch, args, noise_dict):
    keys = list(noise_dict.keys())
    keys.sort()
    select = keys[0]
    for k in keys:
        select = k
        if k > epoch:
            break

    if args.noisemodel == 'johnson':
        noise_model = st.johnsonsu(*noise_dict[select])
    if args.noisemodel == 'gaussian':
        print("GAUSSIAN NOISE set")
        noise_model = st.norm(*noise_dict[select])
    if args.noisemodel == 'laplace':
        noise_model = st.laplace(*noise_dict[select])

    if args.noisemodel == 'cauchy':
        noise_model = st.cauchy(*noise_dict[select])

    if args.noisemodel == 'gennorm':
        noise_model = st.gennorm(*noise_dict[select])

    if args.noisemodel == 'studentt':
        noise_model = st.t(*noise_dict[select])

    print('noise model is', args.noisemodel, noise_dict[select])
    optimizer.noise_generator = noise_model
示例#8
0
 def partial_moment(self, n, z=0, parameters=None):
     parameters = self._check_constraints(parameters)
     nu = parameters[0]
     scale = 1. / sqrt(stats.gennorm(nu).var())
     moment = (scale**n) * self._ord_gennorm_partial_moment(
         n, z / scale, nu)
     return moment
示例#9
0
    def testGeneralizedNormalLogPDF(self):
        batch_size = 6
        mu = tf.constant([3.] * batch_size, dtype=self.dtype)
        sigma = tf.constant([math.sqrt(10.)] * batch_size, dtype=self.dtype)
        power = tf.constant([4.] * batch_size, dtype=self.dtype)
        x = np.array([-2.5, 2.5, 4., 0., -1., 2.], dtype=np.float32)
        gnormal = tfd.GeneralizedNormal(loc=mu,
                                        scale=sigma,
                                        power=power,
                                        validate_args=True)
        log_pdf = gnormal.log_prob(x)
        self.assertAllEqual(self.evaluate(gnormal.batch_shape_tensor()),
                            log_pdf.shape)
        self.assertAllEqual(self.evaluate(gnormal.batch_shape_tensor()),
                            self.evaluate(log_pdf).shape)
        self.assertAllEqual(gnormal.batch_shape, log_pdf.shape)
        self.assertAllEqual(gnormal.batch_shape, self.evaluate(log_pdf).shape)

        pdf = gnormal.prob(x)
        self.assertAllEqual(self.evaluate(gnormal.batch_shape_tensor()),
                            pdf.shape)
        self.assertAllEqual(self.evaluate(gnormal.batch_shape_tensor()),
                            self.evaluate(pdf).shape)
        self.assertAllEqual(gnormal.batch_shape, pdf.shape)
        self.assertAllEqual(gnormal.batch_shape, self.evaluate(pdf).shape)

        expected_log_pdf = sp_stats.gennorm(
            self.evaluate(power),
            loc=self.evaluate(mu),
            scale=self.evaluate(sigma)).logpdf(x)
        self.assertAllClose(expected_log_pdf, self.evaluate(log_pdf))
        self.assertAllClose(np.exp(expected_log_pdf), self.evaluate(pdf))
示例#10
0
    def moment(self, n, parameters=None):
        if n < 0:
            return nan

        parameters = self._check_constraints(parameters)
        nu = parameters[0]
        var = stats.gennorm(nu).var()
        return stats.gennorm.moment(n, nu, scale=1. / sqrt(var))
示例#11
0
        def obj(s, args=None):
            """args = [min, max, beta, ppf]"""

            loc = (args[1]+args[0])/2.
            beta = args[2]
            ppf = args[3]
            d = gennorm(loc=loc, scale=s, beta=beta)
            r = sum((d.ppf([1.-ppf, .5,  ppf]) - np.array([args[0], loc, args[1]]))**2)
            return r
示例#12
0
    def moment(
        self, n: int, parameters: Optional[Union[Sequence[float], ArrayLike1D]] = None
    ) -> float:
        if n < 0:
            return nan

        parameters = self._check_constraints(parameters)
        nu = parameters[0]
        var = stats.gennorm(nu).var()
        return stats.gennorm.moment(n, nu, scale=1.0 / sqrt(var))
示例#13
0
 def partial_moment(
     self,
     n: int,
     z: float = 0.0,
     parameters: Optional[Union[Sequence[float], ArrayLike1D]] = None,
 ) -> float:
     parameters = self._check_constraints(parameters)
     nu = parameters[0]
     scale = 1.0 / sqrt(stats.gennorm(nu).var())
     moment = (scale ** n) * self._ord_gennorm_partial_moment(n, z / scale, nu)
     return moment
示例#14
0
 def __init__(self, loc, scale, beta, validate_args=None):
     self.loc, self.scale = broadcast_all(loc, scale)
     (self.beta, ) = broadcast_all(beta)
     self.scipy_dist = stats.gennorm(
         loc=self.loc.cpu().detach().numpy(),
         scale=self.scale.cpu().detach().numpy(),
         beta=self.beta.cpu().detach().numpy())
     if isinstance(loc, Number) and isinstance(scale, Number):
         batch_shape = torch.Size()
     else:
         batch_shape = self.loc.size()
     super(GeneralizedNormal, self).__init__(batch_shape,
                                             validate_args=validate_args)
示例#15
0
    def distr(self):
        def obj(s, args=None):
            """args = [min, max, beta, ppf]"""

            loc = (args[1]+args[0])/2.
            beta = args[2]
            ppf = args[3]
            d = gennorm(loc=loc, scale=s, beta=beta)
            r = sum((d.ppf([1.-ppf, .5,  ppf]) - np.array([args[0], loc, args[1]]))**2)
            return r
        if self._distr is None:
            from scipy.optimize import minimize
            res = minimize(obj, [.1], method='Nelder-Mead', tol=1e-6, args=[self.clip[0],self.clip[1], self.beta, .999])
            # res = scipy.optimize.minimize_scalar(obj, bounds=[1e-10, 1], args=[1.,4., beta, .999], tol=1e-10)
            # print("res", res.x)
            self._distr = gennorm(loc=self.mean, scale=res.x, beta=self.beta)
        return self._distr
示例#16
0
def eval_generalized_normal_logpdf(eval_at,
                                   mu=0.0,
                                   alpha=1.0,
                                   p=10.0,
                                   lower=-np.inf,
                                   upper=np.inf):
    """Evaluate the generalized normal pdf, scaled/shifted

    See `sample_generalized_normal` for parameter definitions.

    """
    generalized_normal = stats.gennorm(beta=p, loc=mu, scale=alpha)
    unnormed_eval_logpdf = generalized_normal.logpdf(eval_at)
    unnormed_eval_logpdf[eval_at < lower] = -np.inf
    unnormed_eval_logpdf[eval_at > upper] = -np.inf
    accept_norm = generalized_normal.cdf(upper) - generalized_normal.cdf(lower)
    normed_eval_logpdf = unnormed_eval_logpdf - np.log(accept_norm)
    return normed_eval_logpdf
示例#17
0
    def testGeneralizedNormalEntropyWithScalarInputs(self):
        loc_v = 2.34
        scale_v = 4.56
        power_v = 7.89

        gnormal = tfd.GeneralizedNormal(loc=self.make_input(loc_v),
                                        scale=self.make_input(scale_v),
                                        power=self.make_input(power_v),
                                        validate_args=True)
        entropy = gnormal.entropy()
        self.assertAllEqual(self.evaluate(gnormal.batch_shape_tensor()),
                            entropy.shape)
        self.assertAllEqual(self.evaluate(gnormal.batch_shape_tensor()),
                            self.evaluate(entropy).shape)
        self.assertAllEqual(gnormal.batch_shape, entropy.shape)
        self.assertAllEqual(gnormal.batch_shape, self.evaluate(entropy).shape)
        expected_entropy = sp_stats.gennorm(power_v, loc=loc_v,
                                            scale=scale_v).entropy()
        self.assertAllClose(expected_entropy, self.evaluate(entropy))
示例#18
0
    def sample_generalized_normal(self,
                                  mu=0.0,
                                  alpha=1.0,
                                  p=10.0,
                                  lower=-np.inf,
                                  upper=np.inf):
        """Samples from a generalized normal distribution, optionally truncated

        Note
        ----
        Also called the exponential power distribution, this distribution converges
        pointwise to uniform as p --> infinity. To approximate a uniform between ``a`` and ``b``,
        define ``mu = 0.5*(a + b)`` and ``alpha=0.5*(b - a)``.
        For ``p=1``, it's identical to Laplace.
        For ``p=2``, it's identical to normal.
        See [1]_.

        Parameters
        ----------
        mu : float
            location (default: 0.0)
        alpha : float
            scale (default: 1.0)
        p : float
            shape (default: 10.0)
        lower : float
            min value (default: -np.inf)
        upper : float
            max value (default: np.inf)

        References
        ----------
        .. [1] `"Generalized normal distribution, Version 1" <https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1>`_

        """
        generalized_normal = stats.gennorm(beta=p, loc=mu, scale=alpha)
        sample = generalized_normal.rvs()
        # Reject samples outside of bounds, repeat sampling until accepted
        while not np.all(
            [np.greater(sample, lower),
             np.greater(upper, sample)]):
            sample = generalized_normal.rvs()
        return sample
示例#19
0
    def testGeneralizedNormalSF(self):
        batch_size = 50
        mu = self._rng.randn(batch_size)
        sigma = self._rng.rand(batch_size) + 1.
        power = self._rng.rand(batch_size) + 1.
        x = np.linspace(-8., 8., batch_size).astype(np.float64)

        gnormal = tfd.GeneralizedNormal(loc=self.make_input(mu),
                                        scale=self.make_input(sigma),
                                        power=self.make_input(power),
                                        validate_args=True)
        sf = gnormal.survival_function(x)
        self.assertAllEqual(self.evaluate(gnormal.batch_shape_tensor()),
                            sf.shape)
        self.assertAllEqual(self.evaluate(gnormal.batch_shape_tensor()),
                            self.evaluate(sf).shape)
        self.assertAllEqual(gnormal.batch_shape, sf.shape)
        self.assertAllEqual(gnormal.batch_shape, self.evaluate(sf).shape)
        expected_sf = sp_stats.gennorm(power, loc=mu, scale=sigma).sf(x)
        self.assertAllClose(expected_sf, self.evaluate(sf), atol=0, rtol=1e-5)
 def testGeneralizedNormalQuantile(self):
   batch_size = 50
   mu = self._rng.randn(batch_size)
   sigma = self._rng.rand(batch_size) + 1.
   power = self._rng.rand(batch_size) + 1.
   p = np.linspace(0., 1., batch_size).astype(np.float64)
   gnormal = tfd.GeneralizedNormal(loc=self.make_input(mu),
                                   scale=self.make_input(sigma),
                                   power=self.make_input(power),
                                   validate_args=True)
   quantile = gnormal.quantile(p)
   self.assertAllEqual(
       self.evaluate(gnormal.batch_shape_tensor()), quantile.shape)
   self.assertAllEqual(
       self.evaluate(gnormal.batch_shape_tensor()),
       self.evaluate(quantile).shape)
   self.assertAllEqual(gnormal.batch_shape, quantile.shape)
   self.assertAllEqual(gnormal.batch_shape, self.evaluate(quantile).shape)
   expected_quantile = sp_stats.gennorm(power, loc=mu, scale=sigma).ppf(p)
   self.assertAllClose(
       expected_quantile, self.evaluate(quantile), atol=0, rtol=1e-4)
示例#21
0

    

if args.noisemodel == 'johnson':
    noise_model = st.johnsonsu(*args.noiseparams)
if args.noisemodel == 'gaussian':
    noise_model = st.norm(*args.noiseparams)
if args.noisemodel == 'laplace':
    noise_model = st.laplace(*args.noiseparams)
    
if args.noisemodel == 'cauchy':
    noise_model = st.cauchy(*args.noiseparams)

if args.noisemodel == 'gennorm':
    noise_model = st.gennorm(*args.noiseparams)

if args.noisemodel == 'studentt':
    noise_model = st.t(*args.noiseparams)

    

def adjust_learning_rate(optimizer, epoch):
    """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
    lr = 0.16
    if epoch > 10:
        lr = 0.07
    if epoch > 20:
        lr = 0.05
    if epoch > 30:
        lr = 0.03
示例#22
0
 def cdf(self, resids, parameters=None):
     self._check_constraints(parameters)
     nu = parameters[0]
     var = stats.gennorm(nu).var()
     return stats.gennorm(nu, scale=1.0 / sqrt(var)).cdf(resids)
 def cdf(self, resids, parameters=None):
     self._check_constraints(parameters)
     nu = parameters[0]
     var = stats.gennorm(nu).var()
     return stats.gennorm(nu, scale=1.0 / sqrt(var)).cdf(resids)
 def ppf(self, pits, parameters=None):
     self._check_constraints(parameters)
     nu = parameters[0]
     var = stats.gennorm(nu).var()
     return stats.gennorm(nu, scale=1.0 / sqrt(var)).ppf(pits)
示例#25
0
 def ppf(self, pits, parameters=None):
     self._check_constraints(parameters)
     pits = asarray(pits)
     nu = parameters[0]
     var = stats.gennorm(nu).var()
     return stats.gennorm(nu, scale=1.0 / sqrt(var)).ppf(pits)
    
    ax.margins(x=0, y=0)
    
    if line: ax.plot(np.linspace(0, 1), np.linspace(0, 1), 'r', lw=2)
    
    return ax


## Gráfico pp distribución completa

fig, ((ax1,ax2),(ax3,ax4)) = plt.subplots(nrows = 2, ncols = 2, 
                                          dpi=1300)


pp_plot(logeados, stats.gennorm(beta = parametros_normal[0], 
                                loc = parametros_normal[1],
                                scale=parametros_normal[2]), 
        line = True, ax=ax1)

ax1.set_title('Normal generalizada', fontsize=11)


pp_plot(logeados, stats.genpareto(c = parametros_pareto[0], 
                                loc = parametros_pareto[1],
                                scale=parametros_pareto[2]), 
        line = True,ax=ax2)
ax2.set_title('Pareto generalizada', fontsize=11)

pp_plot(logeados, stats.dweibull(c = parametros_weibull[0], 
                                loc = parametros_weibull[1],
                                scale=parametros_weibull[2]), 
示例#27
0
def all_dists():
    # dists param were taken from scipy.stats official
    # documentaion examples
    # Total - 89
    return {
        "alpha":
        stats.alpha(a=3.57, loc=0.0, scale=1.0),
        "anglit":
        stats.anglit(loc=0.0, scale=1.0),
        "arcsine":
        stats.arcsine(loc=0.0, scale=1.0),
        "beta":
        stats.beta(a=2.31, b=0.627, loc=0.0, scale=1.0),
        "betaprime":
        stats.betaprime(a=5, b=6, loc=0.0, scale=1.0),
        "bradford":
        stats.bradford(c=0.299, loc=0.0, scale=1.0),
        "burr":
        stats.burr(c=10.5, d=4.3, loc=0.0, scale=1.0),
        "cauchy":
        stats.cauchy(loc=0.0, scale=1.0),
        "chi":
        stats.chi(df=78, loc=0.0, scale=1.0),
        "chi2":
        stats.chi2(df=55, loc=0.0, scale=1.0),
        "cosine":
        stats.cosine(loc=0.0, scale=1.0),
        "dgamma":
        stats.dgamma(a=1.1, loc=0.0, scale=1.0),
        "dweibull":
        stats.dweibull(c=2.07, loc=0.0, scale=1.0),
        "erlang":
        stats.erlang(a=2, loc=0.0, scale=1.0),
        "expon":
        stats.expon(loc=0.0, scale=1.0),
        "exponnorm":
        stats.exponnorm(K=1.5, loc=0.0, scale=1.0),
        "exponweib":
        stats.exponweib(a=2.89, c=1.95, loc=0.0, scale=1.0),
        "exponpow":
        stats.exponpow(b=2.7, loc=0.0, scale=1.0),
        "f":
        stats.f(dfn=29, dfd=18, loc=0.0, scale=1.0),
        "fatiguelife":
        stats.fatiguelife(c=29, loc=0.0, scale=1.0),
        "fisk":
        stats.fisk(c=3.09, loc=0.0, scale=1.0),
        "foldcauchy":
        stats.foldcauchy(c=4.72, loc=0.0, scale=1.0),
        "foldnorm":
        stats.foldnorm(c=1.95, loc=0.0, scale=1.0),
        # "frechet_r": stats.frechet_r(c=1.89, loc=0.0, scale=1.0),
        # "frechet_l": stats.frechet_l(c=3.63, loc=0.0, scale=1.0),
        "genlogistic":
        stats.genlogistic(c=0.412, loc=0.0, scale=1.0),
        "genpareto":
        stats.genpareto(c=0.1, loc=0.0, scale=1.0),
        "gennorm":
        stats.gennorm(beta=1.3, loc=0.0, scale=1.0),
        "genexpon":
        stats.genexpon(a=9.13, b=16.2, c=3.28, loc=0.0, scale=1.0),
        "genextreme":
        stats.genextreme(c=-0.1, loc=0.0, scale=1.0),
        "gausshyper":
        stats.gausshyper(a=13.8, b=3.12, c=2.51, z=5.18, loc=0.0, scale=1.0),
        "gamma":
        stats.gamma(a=1.99, loc=0.0, scale=1.0),
        "gengamma":
        stats.gengamma(a=4.42, c=-3.12, loc=0.0, scale=1.0),
        "genhalflogistic":
        stats.genhalflogistic(c=0.773, loc=0.0, scale=1.0),
        "gilbrat":
        stats.gilbrat(loc=0.0, scale=1.0),
        "gompertz":
        stats.gompertz(c=0.947, loc=0.0, scale=1.0),
        "gumbel_r":
        stats.gumbel_r(loc=0.0, scale=1.0),
        "gumbel_l":
        stats.gumbel_l(loc=0.0, scale=1.0),
        "halfcauchy":
        stats.halfcauchy(loc=0.0, scale=1.0),
        "halflogistic":
        stats.halflogistic(loc=0.0, scale=1.0),
        "halfnorm":
        stats.halfnorm(loc=0.0, scale=1.0),
        "halfgennorm":
        stats.halfgennorm(beta=0.675, loc=0.0, scale=1.0),
        "hypsecant":
        stats.hypsecant(loc=0.0, scale=1.0),
        "invgamma":
        stats.invgamma(a=4.07, loc=0.0, scale=1.0),
        "invgauss":
        stats.invgauss(mu=0.145, loc=0.0, scale=1.0),
        "invweibull":
        stats.invweibull(c=10.6, loc=0.0, scale=1.0),
        "johnsonsb":
        stats.johnsonsb(a=4.32, b=3.18, loc=0.0, scale=1.0),
        "johnsonsu":
        stats.johnsonsu(a=2.55, b=2.25, loc=0.0, scale=1.0),
        "ksone":
        stats.ksone(n=1e03, loc=0.0, scale=1.0),
        "kstwobign":
        stats.kstwobign(loc=0.0, scale=1.0),
        "laplace":
        stats.laplace(loc=0.0, scale=1.0),
        "levy":
        stats.levy(loc=0.0, scale=1.0),
        "levy_l":
        stats.levy_l(loc=0.0, scale=1.0),
        "levy_stable":
        stats.levy_stable(alpha=0.357, beta=-0.675, loc=0.0, scale=1.0),
        "logistic":
        stats.logistic(loc=0.0, scale=1.0),
        "loggamma":
        stats.loggamma(c=0.414, loc=0.0, scale=1.0),
        "loglaplace":
        stats.loglaplace(c=3.25, loc=0.0, scale=1.0),
        "lognorm":
        stats.lognorm(s=0.954, loc=0.0, scale=1.0),
        "lomax":
        stats.lomax(c=1.88, loc=0.0, scale=1.0),
        "maxwell":
        stats.maxwell(loc=0.0, scale=1.0),
        "mielke":
        stats.mielke(k=10.4, s=3.6, loc=0.0, scale=1.0),
        "nakagami":
        stats.nakagami(nu=4.97, loc=0.0, scale=1.0),
        "ncx2":
        stats.ncx2(df=21, nc=1.06, loc=0.0, scale=1.0),
        "ncf":
        stats.ncf(dfn=27, dfd=27, nc=0.416, loc=0.0, scale=1.0),
        "nct":
        stats.nct(df=14, nc=0.24, loc=0.0, scale=1.0),
        "norm":
        stats.norm(loc=0.0, scale=1.0),
        "pareto":
        stats.pareto(b=2.62, loc=0.0, scale=1.0),
        "pearson3":
        stats.pearson3(skew=0.1, loc=0.0, scale=1.0),
        "powerlaw":
        stats.powerlaw(a=1.66, loc=0.0, scale=1.0),
        "powerlognorm":
        stats.powerlognorm(c=2.14, s=0.446, loc=0.0, scale=1.0),
        "powernorm":
        stats.powernorm(c=4.45, loc=0.0, scale=1.0),
        "rdist":
        stats.rdist(c=0.9, loc=0.0, scale=1.0),
        "reciprocal":
        stats.reciprocal(a=0.00623, b=1.01, loc=0.0, scale=1.0),
        "rayleigh":
        stats.rayleigh(loc=0.0, scale=1.0),
        "rice":
        stats.rice(b=0.775, loc=0.0, scale=1.0),
        "recipinvgauss":
        stats.recipinvgauss(mu=0.63, loc=0.0, scale=1.0),
        "semicircular":
        stats.semicircular(loc=0.0, scale=1.0),
        "t":
        stats.t(df=2.74, loc=0.0, scale=1.0),
        "triang":
        stats.triang(c=0.158, loc=0.0, scale=1.0),
        "truncexpon":
        stats.truncexpon(b=4.69, loc=0.0, scale=1.0),
        "truncnorm":
        stats.truncnorm(a=0.1, b=2, loc=0.0, scale=1.0),
        "tukeylambda":
        stats.tukeylambda(lam=3.13, loc=0.0, scale=1.0),
        "uniform":
        stats.uniform(loc=0.0, scale=1.0),
        "vonmises":
        stats.vonmises(kappa=3.99, loc=0.0, scale=1.0),
        "vonmises_line":
        stats.vonmises_line(kappa=3.99, loc=0.0, scale=1.0),
        "wald":
        stats.wald(loc=0.0, scale=1.0),
        "weibull_min":
        stats.weibull_min(c=1.79, loc=0.0, scale=1.0),
        "weibull_max":
        stats.weibull_max(c=2.87, loc=0.0, scale=1.0),
        "wrapcauchy":
        stats.wrapcauchy(c=0.0311, loc=0.0, scale=1.0),
    }
示例#28
0
    pp = np.sort(dist.cdf(x))

    sns.scatterplot(x=p, y=pp, color='blue', edgecolor='blue', ax=ax, s=8)

    ax.margins(x=0, y=0)

    if line: ax.plot(np.linspace(0, 1), np.linspace(0, 1), 'r', lw=2)

    return ax


fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2, dpi=1300)

pp_plot(logeados,
        stats.gennorm(beta=parametros_gennormal[0],
                      loc=parametros_gennormal[1],
                      scale=parametros_gennormal[2]),
        line=True,
        ax=ax1)

ax1.set_title('Normal generalizada', fontsize=11)

pp_plot(logeados,
        stats.genpareto(c=parametros_pareto[0],
                        loc=parametros_pareto[1],
                        scale=parametros_pareto[2]),
        line=True,
        ax=ax2)
ax2.set_title('Pareto generalizada', fontsize=11)

pp_plot(logeados,
示例#29
0
mean, var, skew, kurt = gennorm.stats(beta, moments='mvsk')

# Display the probability density function (``pdf``):

x = np.linspace(gennorm.ppf(0.01, beta),
                gennorm.ppf(0.99, beta), 100)
ax.plot(x, gennorm.pdf(x, beta),
       'r-', lw=5, alpha=0.6, label='gennorm pdf')

# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.

# Freeze the distribution and display the frozen ``pdf``:

rv = gennorm(beta)
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')

# Check accuracy of ``cdf`` and ``ppf``:

vals = gennorm.ppf([0.001, 0.5, 0.999], beta)
np.allclose([0.001, 0.5, 0.999], gennorm.cdf(vals, beta))
# True

# Generate random numbers:

r = gennorm.rvs(beta, size=1000)

# And compare the histogram:

ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)