コード例 #1
0
ファイル: DNN.py プロジェクト: hupipi96/GatedNet-Decoder
    def test(self,alpha,GSNR_low,GSNR_up,interval,test_batch,num_words):
        SNR_dB_start_Eb = GSNR_low
        SNR_dB_stop_Eb = GSNR_up
        SNR_points = interval
        SNR_dB_start_Es = SNR_dB_start_Eb + 10 * np.log10(self.k / self.N)
        SNR_dB_stop_Es = SNR_dB_stop_Eb + 10 * np.log10(self.k / self.N)
        SNRs = np.linspace(SNR_dB_start_Eb, SNR_dB_stop_Eb, SNR_points)

        sigma_start = np.sqrt(1 / (2 * 10 ** (SNR_dB_start_Es / 10)))
        sigma_stop = np.sqrt(1 / (2 * 10 ** (SNR_dB_stop_Es / 10)))
        sigmas = np.linspace(sigma_start, sigma_stop, SNR_points)

        nb_errors = np.zeros(len(sigmas), dtype=int)
        nb_bits = np.zeros(len(sigmas), dtype=int)
        ber = np.zeros(len(sigmas), dtype=float)
        seedrand = np.zeros(100, dtype=int)

        for sr in range(1, 100):
            seedrand[sr] = np.random.randint(0, 2 ** 14, size=(1))  # seedrand[sr-1]+1
        for i in range(0, len(sigmas)):  # different  SNR
            scale = CommFunc.CalScale(SNRs[i], alpha, self.R)
            # print("GSNR={},scale={}".format(SNRs[i], scale))
            for ii in range(0, np.round(num_words / test_batch).astype(int)):
                # Source
                x_test, d_test=Data.genRanData(self.k, self.N, test_batch, seedrand[ii])
                # Modulator (BPSK)
                s_test = -2 * x_test + 1
                # Channel (alpha-stable)
                y_test = s_test + levy_stable.rvs(alpha, 0, 0, scale, (test_batch, self.N))
                # Decoder
                nb_errors[i] += self.decoder.evaluate(y_test, d_test, batch_size=test_batch, verbose=2)[2]
                nb_bits[i] += d_test.size
                ber = np.float32(nb_errors/nb_bits)
        return ber
コード例 #2
0
def generate_series(N, return1, return10, alpha, beta, delta, gamma):
    #generate random variables sequentially according to Levy alpha-stable distribution
    randomvariable = np.zeros(N)
    for i in range(N):
        randomvariable[i] = levy_stable.rvs(alpha, beta, delta, gamma, random_state=None)
    
    ##initialize prices
    price = np.zeros(N+1)
    price[0] = 100.0 #arbitrary initial price
        
    ##Standard practice, as prescribed by Mandelbrot (1963)
    ##presumes that the daily change in natural logarithms
    ##of prices is represented by said random variable:
    ##  X = log_e[P_(i+1)] - log_e[P_i] , or equivalently
    ##  P_(i+1) = P_i * exp(X)
    exprv = np.exp(randomvariable)
    for i in range(N):
        price[i+1] = price[i] * exprv[i]
    
    #Use the equations given for fractional daily returns:
    #  r^1_i = ( P_(i+1) - P_i ) / P_i
    #  r^10_i = ( P_(i+10) - P_i ) / P_i
    for i in range(N):
        return1[i] = (price[i+1] / price[i]) - 1
        #shortcut return1[i] = exprv[i] - 1
        if (i < N - 9):
            return10[i] = (price[i+10] / price[i]) - 1 
コード例 #3
0
ファイル: monte_carlo.py プロジェクト: AAAAA-tech/monte_carlo
def monte_carlo_test(mon_car_tr, overlap_per):

    # input params
    alpha, beta = 1.7, .0
    # generate prices
    price = levy_stable.rvs(
        alpha,
        beta,
        size=mon_car_tr
    )

    # Generate 1 day returns
    one_day_returns = [
        (price[i+1]-price[i])/price[i]
        for i in range(len(price)-1)
    ]

    # Generate overlapping data
    overlap_data = []
    for i in range(mon_car_tr-overlap_per):
        val = 0
        for j in range(overlap_per):
            val = val + one_day_returns[i+j]
        overlap_data.append(val)

    # Probability of specific value in oveerlappig data
    probs = [
        levy_stable.ppf(i, alpha, beta)
        for i in overlap_data
    ]

    return probs, overlap_data
コード例 #4
0
    def sample_new_coordinates(self, x_old, y_old, gazeDir, dV_x, dV_y):
        #Distances drawn from the stable random number generator
        r = levy_stable.rvs(alpha=self.alpha_stable,
                            beta=self.beta_stable,
                            loc=self.delta_stable,
                            scale=self.gamma_stable,
                            size=self.NUM_SAMPLE_LEVY)

        #Generate randomly a direction theta from a uniform distribution
        #between  -pi and pi and as a function of previous direction
        theta = 2 * np.pi * np.random.rand(
            self.NUM_SAMPLE_LEVY, ) - np.pi + gazeDir

        dV_x = np.reshape(dV_x, -1, order='F')
        dV_y = np.reshape(dV_y, -1, order='F')

        #Compute  new gaze position of the FOA via Langevin equation
        x_new = np.round(x_old + self.h *
                         (-self.k_P * dV_x[x_old] +
                          self.k_R * np.multiply(r, np.cos(theta))))
        y_new = np.round(y_old + self.h *
                         (-self.k_P * dV_y[y_old] +
                          self.k_R * np.multiply(r, np.sin(theta))))

        return x_new, y_new
コード例 #5
0
ファイル: flm.py プロジェクト: bencoscia/flm
    def _realization(self, truncated_distribution):

        if self.alpha == 2:

            z = np.random.normal(0, scale=self.scale, size=self.Na)

        else:

            if self.truncate is not None:

                if truncated_distribution is not None:

                    z = truncated_distribution.sample(self.Na)

                else:

                    z = sampling.truncated_levy_distribution(
                        self.truncate, self.alpha, self.scale, self.Na)

            else:

                z = levy_stable.rvs(self.alpha,
                                    0,
                                    loc=0,
                                    scale=self.scale,
                                    size=self.Na)

        z = np.fft.fft(z, self.Na)

        w = np.fft.ifft(z * self.A, self.Na).real

        return w[:self.N * self.m:self.m]
def draw(a, b, fig):
    x = []
    for i in range(1000):
        w = np.random.exponential(1)
        x.append(F_inv(w, a, b))
    plt.figure(fig)
    plt.subplot(1, 2, 1)
    plt.hist(x,
             bins=100,
             color='r',
             label='a=' + str(a) + ' b=' + str(b),
             rwidth=0.8,
             density=1)
    r = levy_stable.rvs(a, b, size=1000, scale=1)
    plt.hist(r,
             bins=100,
             label="Theoretical",
             color='b',
             rwidth=0.8,
             density=1)
    plt.title('Sample histogram')
    plt.legend()
    plt.subplot(1, 2, 2)
    plt.plot(np.arange(0, 1000), x)
    plt.title('time series plot')
    return
コード例 #7
0
ファイル: db.py プロジェクト: skypitcher/manfe
def gen_batch(hps):
    y_batch = np.zeros([TIMES_SLOTS_PER_BATCH, 2 * NUM_ANT, 1])
    h_batch = np.zeros([TIMES_SLOTS_PER_BATCH, 2 * NUM_ANT, 2 * NUM_ANT])
    s_batch = np.random.rand(TIMES_SLOTS_PER_BATCH, 2 * NUM_ANT, 1)
    s_batch = np.where(s_batch < 0.5, -1 / np.sqrt(2), 1 / np.sqrt(2))
    w_batch = np.zeros([TIMES_SLOTS_PER_BATCH, 2 * NUM_ANT, 1])

    p = 10**(hps.snr / 10)

    for i in range(PACKETS_PER_BATCH):
        h = np.sqrt(p / NUM_ANT) * complex_channel()
        for j in range(TIME_SLOTS_PER_PACKET):
            t = i * TIME_SLOTS_PER_PACKET + j

            if NOISE_TYPE == "MIXGAUSS":
                w = exampl_mixture_gauss(size=[2 * NUM_ANT, 1])
            elif NOISE_TYPE == "NAKA":
                w = nakagami_m(m=hps.m, size=[2 * NUM_ANT, 1])
            else:
                w = levy_stable.rvs(alpha=hps.alpha,
                                    beta=hps.beta,
                                    loc=hps.mu,
                                    scale=hps.sigma,
                                    size=[2 * NUM_ANT, 1])

            s = s_batch[t, :, :]
            s = s.reshape([2 * NUM_ANT, 1])

            y = h @ s + w

            y_batch[t:t + 1, :, :] = y
            h_batch[t:t + 1, :, :] = h
            w_batch[t:t + 1, :, :] = w

    return y_batch, h_batch, s_batch, w_batch
コード例 #8
0
ファイル: rolling.py プロジェクト: Maplenormandy/psfc-misc
def getSlope(alpha):
    x = levy_stable.rvs(alpha=alpha, beta=0.0, size=100000)
    cutoffs = np.logspace(-1,4)
    means = np.array([np.std(np.clip(x,-c,c)) for c in cutoffs])
    slope, intercept, r_value, p_value, std_err = linregress(np.log(cutoffs), np.log(means))
    plt.loglog(cutoffs, means)
    return slope
コード例 #9
0
def simu_gumbel(num, theta):
    """
    # Gumbel copula
    """
    # https://cran.r-project.org/web/packages/gumbel/gumbel.pdf
    # https://cran.r-project.org/web/packages/gumbel/vignettes/gumbel.pdf

    d = theta
    alpha = 1 / theta
    beta = 1
    gamma = 1
    delta = 0
    X = levy_stable.rvs(alpha=1 / theta,
                        beta=1,
                        scale=(np.cos(np.pi / (2 * theta)))**theta,
                        loc=0,
                        size=num)

    v1 = np.array([np.random.exponential(scale=1.0) for i in range(0, num)])
    v2 = np.array([np.random.exponential(scale=1.0) for i in range(0, num)])

    def phi_1(t):
        return np.exp(-t**(1 / theta))

    u1 = phi_1(v1 / X)
    u2 = phi_1(v2 / X)

    return u1, u2
コード例 #10
0
    def get_transmission_delay(self):

        transmission_delay = levy_stable.rvs(alpha=self.levy_stable_alpha,
                                             beta=self.levy_stable_beta,
                                             loc=self.levy_stable_loc,
                                             scale=self.levy_stable_scale)

        return transmission_delay
コード例 #11
0
 def Gumbel(self, alpha, d=(1000, )):
     self.alpha = alpha
     x = np.random.uniform(size=d)
     y = np.random.uniform(size=d)
     beta = (math.cos(math.pi / (2 * alpha)))**alpha
     v = levy_stable.rvs(1 / alpha, 1, loc=0, scale=beta, size=d)
     u_x = self.gumbel_generator(-np.log10(x) / v)
     u_y = self.gumbel_generator(-np.log10(y) / v)
     return u_x, u_y
コード例 #12
0
    def get_transmission_delay(self):
        if self.type == 0:
            transmission_delay = levy_stable.rvs(alpha=self.levy_stable_alpha,
                                                 beta=self.levy_stable_beta,
                                                 loc=self.levy_stable_loc,
                                                 scale=self.levy_stable_scale)

            return transmission_delay
        elif self.type == 1:
            return self.mean_transmission_delay
        else:
            return None
コード例 #13
0
ファイル: update_w.py プロジェクト: tomMoral/alphacsc
def estimate_phi_mh(X,
                    Xhat,
                    alpha,
                    Phi,
                    n_iter_mcmc,
                    n_burnin_mcmc,
                    random_state,
                    return_loglk=False,
                    verbose=10):
    """Estimate the expectation of 1/phi by Metropolis-Hastings"""

    if n_iter_mcmc <= n_burnin_mcmc:
        raise ValueError('n_iter_mcmc must be greater than n_burnin_mcmc')

    n_trials, n_times = X.shape
    residual = (X - Xhat)**2

    tau = np.zeros((n_trials, n_times))
    rng = check_random_state(random_state)

    if return_loglk:
        loglk_all = np.zeros((n_iter_mcmc, 1))

    for i in range(n_iter_mcmc):
        scale = 2 * np.cos(np.pi * alpha / 4)**(2 / alpha)
        Phi_p = levy_stable.rvs(alpha / 2,
                                1,
                                loc=0,
                                scale=scale,
                                size=(n_trials, n_times),
                                random_state=rng)
        log_acc = 0.5 * np.log(Phi / Phi_p) + residual * (1 / Phi - 1 / Phi_p)
        log_u = np.log(rng.uniform(size=(n_trials, n_times)))
        ix = (log_acc > log_u)

        Phi[ix] = Phi_p[ix]

        if return_loglk:
            loglk = np.sum(-0.5 * np.log(Phi) - 0.5 * residual / Phi)
            loglk_all[i] = loglk
            if verbose > 5:
                print("Iter: %d\t loglk:%E\t NumAcc:%d" %
                      (i, loglk, np.sum(ix)))

        if (i >= n_burnin_mcmc):
            tau += 1 / Phi

    tau = tau / (n_iter_mcmc - n_burnin_mcmc)

    if return_loglk:
        return Phi, tau, loglk_all
    return Phi, tau
コード例 #14
0
    def MultiDimensionalGumbel(self, d=(2, 1000)):
        arr = []
        beta = (math.cos(math.pi / (2 * self.alpha)))**self.alpha
        v = levy_stable.rvs(1 / self.alpha,
                            1,
                            loc=0,
                            scale=beta,
                            size=(d[1], ))
        for i in range(d[0]):
            x = np.random.uniform(size=(d[1], ))
            u_x = self.gumbel_generator(-np.log10(x) / v)
            arr.append(u_x)

        return np.asarray(arr)
コード例 #15
0
def levyflight(x0, n, dt, alpha, beta, out=None):
    """
    Generate an instance of a Levy flight:

    Arguments
    ---------
    x0 : float or numpy array (or something that can be converted to a numpy array
         using numpy.asarray(x0)).
        The initial condition(s) (i.e. position(s)) of the Brownian motion.
    n : int
        The number of steps to take.
    dt : float
        The time step.

    alpha, beta: Levy parameters

    out : numpy array or None
        If `out` is not None, it specifies the array in which to put the
        result.  If `out` is None, a new numpy array is created and returned.

    Returns
    -------
    A numpy array of floats with shape `x0.shape + (n,)`.

    Note that the initial value `x0` is not included in the returned array.
    """

    #can we really calculate the stats?
    #mean, var, skew, kurt = levy_stable.stats(alpha, beta, moments='mvsk')
    #print("Levy mean: ",mean," variance: ",var)

    x0 = np.asarray(x0)

    # For each element of x0, generate a sample of n numbers from a
    # normal distribution.
    r = levy_stable.rvs(alpha, beta, size=x0.shape + (n, ), scale=sqrt(dt))

    # If `out` was not given, create an output array.
    if out is None:
        out = np.empty(r.shape)

    # This computes the Brownian motion by forming the cumulative sum of
    # the random samples.
    np.cumsum(r, axis=-1, out=out)

    # Add the initial condition.
    out += np.expand_dims(x0, axis=-1)

    return out
コード例 #16
0
ファイル: test_stable.py プロジェクト: youisbaby/pyro
def test_sample_2(alpha, beta):
    num_samples = 10000

    d = dist.Stable(alpha, beta, coords="S")
    # Temporarily increase radius to test hole-patching logic.
    # Scipy doesn't handle values of alpha very close to 1.
    try:
        old = pyro.distributions.stable.RADIUS
        pyro.distributions.stable.RADIUS = 0.02
        actual = d.sample([num_samples])
    finally:
        pyro.distributions.stable.RADIUS = old
    actual = d.sample([num_samples])

    expected = levy_stable.rvs(alpha, beta, size=num_samples)

    assert ks_2samp(expected, actual).pvalue > 0.05
コード例 #17
0
ファイル: simulation.py プロジェクト: huning2009/pycop
def SimuGumbel(n, m, theta):
    """
    # Gumbel copula
    Requires:
        n = number of variables to generate
        m = sample size
        theta = Gumbel copula parameter
    """
    # https://cran.r-project.org/web/packages/gumbel/gumbel.pdf
    # https://cran.r-project.org/web/packages/gumbel/vignettes/gumbel.pdf

    v = [np.random.uniform(0,1,m) for i in range(0,n)]

    X = levy_stable.rvs(alpha=1/theta, beta=1,scale=(np.cos(np.pi/(2*theta)))**theta,loc=0, size=m)

    phi_t = lambda t:  np.exp(-t**(1/theta))

    u = [phi_t(-np.log(v[i])/X) for i in range(0,n)]
    return u
コード例 #18
0
def generator2(dataset_name,
               fraction,
               mu,
               sigma,
               SNR=None,
               distribution="Gaussian"):
    #print(os.getcwd())
    a = np.load(
        os.path.join(os.path.join('../../data', dataset_name),
                     r'normlized_tensor.npy'))
    b = np.zeros_like(a, dtype=bool)
    DIM = a.shape
    sigmas = [0.01, 0.05, 0.1, 0.5, 1]
    #Add noise
    if SNR != None:
        sigma2 = np.var(tensor_to_vec(a)) * (1 / (10**(SNR / 10)))
        GN = np.sqrt(sigma2) * np.random.randn(DIM[0], DIM[1], DIM[2])
        a = a + GN

    for it in range(5):
        #Add outliers
        if distribution == "Gaussian":
            outliers = np.random.randn(DIM[0], DIM[1], DIM[2]) * sqrt(
                sigmas[it]) + mu
        elif distribution == "levy_stable":
            outliers = levy_stable.rvs(sigma,
                                       mu,
                                       size=(DIM[0], DIM[1], DIM[2]))

        locations = list(range(np.prod(DIM)))
        if fraction != 0:
            sampled_locations = np.random.choice(
                locations, int(len(locations) * fraction), replace=False)
            # print(len(sampled_locations))
            for x in sampled_locations:
                k = x // (DIM[0] * DIM[1])
                x %= (DIM[0] * DIM[1])
                i = x // DIM[1]
                j = x % DIM[1]
                b[i, j, k] = 1
                a[i, j, k] += outliers[i, j, k]
    return a, b
コード例 #19
0
def test_spectrum():
    for N in [1000, 10000]:
        for q_list in [6, 12, 21]:

            alpha = 1.5
            X = levy_stable.rvs(alpha, beta=0, size=N)

            q = np.linspace(-10, 10, q_list)
            q = q[q!=0.0]

            print(q)
            lag = np.unique(
                  np.logspace(
                  0, np.log10(X.size // 4), 55
                  ).astype(int) + 1
                )

            lag, dfa = MFDFA(X, lag = lag, q = q, order = 1)

            alpha, f  = singspect.singularity_spectrum(lag, dfa, q = q)
            _ = singspect.singularity_spectrum_plot(alpha, f);
            assert alpha.shape[0] == f.shape[0], "Output shape mismatch"
            assert alpha.shape[0] == q.shape[0], "Output shape mismatch"

            q, tau = singspect.scaling_exponents(lag, dfa, q = q)
            _ = singspect.scaling_exponents_plot(q, tau);
            assert tau.shape[0] == q.shape[0], "Output shape mismatch"

            q, hq = singspect.hurst_exponents(lag, dfa, q = q)
            _ = singspect.hurst_exponents_plot(q, hq);
            assert hq.shape[0] == q.shape[0], "Output shape mismatch"

            try:
                singspect._slopes(lag, dfa, q[0:3])
            except Exception:
                pass
コード例 #20
0
def fractional_diffusion(alpha_,
                         beta_,
                         theta_,
                         D_,
                         L,
                         M,
                         use_parallel=False,
                         do_save=False):
    L = int(L)
    M = int(M)
    xnt = np.zeros((L, M))
    L_temp = 20 * L
    tau = 1e-5
    c_alpha_ = np.power(D_ * tau, 1 / alpha_)
    c_beta_ = np.power(tau, 1 / beta_)

    x_alpha_ = alpha_
    x_beta_ = -tan(theta_ * pi / 2) / tan(alpha_ * pi / 2)
    x_gamma_ = c_alpha_ * np.power(cos(theta_ * pi / 2), 1 / alpha_)
    x_delta_ = -x_gamma_ * tan(theta_ * pi / 2)

    t_alpha_ = beta_
    t_beta_ = 1
    t_gamma_ = c_beta_ * np.power(cos(beta_ * pi / 2), 1 / beta_)
    t_delta_ = t_gamma_ * tan(beta_ * pi / 2)
    M_thres = 200
    num_processes = cpu_count()
    ti = time()
    if M > M_thres:
        num_M = int(np.floor(M / M_thres))
        if use_parallel:
            pool = Pool(processes=num_processes)
            results_dt = []
            results_dx = []
            for i in range(num_M):
                results_dt.append(
                    apply_async(pool, get_levy_stable,
                                (t_alpha_, t_beta_, t_delta_, t_gamma_,
                                 (L_temp, M_thres))))
            results_dt = [p.get() for p in results_dt]
            dt = np.concatenate(results_dt, axis=1)

            for i in range(num_M):
                results_dx.append(
                    apply_async(pool, get_levy_stable,
                                (x_alpha_, x_beta_, x_delta_, x_gamma_,
                                 (L_temp, M_thres))))
            results_dx = [p.get() for p in results_dx]
            dx = np.concatenate(results_dx, axis=1)
        else:
            dt = np.zeros((L_temp, M))
            dx = np.zeros((L_temp, M))
            for i in range(num_M):
                dt_temp = levy_stable.rvs(t_alpha_,
                                          t_beta_,
                                          t_delta_,
                                          t_gamma_,
                                          size=(L_temp, M_thres))
                dt[:, i * M_thres:(i + 1) * M_thres] = dt_temp

                dx_temp = levy_stable.rvs(x_alpha_,
                                          x_beta_,
                                          x_delta_,
                                          x_gamma_,
                                          size=(L_temp, M_thres))
                dx[:, i * M_thres:(i + 1) * M_thres] = dx_temp
    else:
        dt = levy_stable.rvs(t_alpha_,
                             t_beta_,
                             t_delta_,
                             t_gamma_,
                             size=(L_temp, M))
        dx = levy_stable.rvs(x_alpha_,
                             x_beta_,
                             x_delta_,
                             x_gamma_,
                             size=(L_temp, M))

    dt_sum = np.concatenate((np.zeros((1, M)), np.cumsum(dt, axis=0)))
    dx_sum = np.concatenate((np.zeros((1, M)), np.cumsum(dx, axis=0)))
    T = np.linspace(0, np.min(dt_sum[-1, :]), L)

    nt = np.zeros((np.size(T), M), dtype=int)
    for i in range(M):
        jj = 0
        looping_complete = False
        for k in range(1 + L_temp):
            while (True):
                if dt_sum[k, i] >= T[jj]:
                    nt[jj, i] = k
                    jj += 1
                    if jj >= np.size(T):
                        looping_complete = True
                        break
                else:
                    break
            if looping_complete:
                break
    for i in range(M):
        xnt[:, i] = dx_sum[nt[:, i], i]

    if do_save:
        data_dir_name = 'data'
        make_dir(data_dir_name)
        file_name = 'sim_frac_diff_a_%1.2f_b_%1.2f_t_%1.2f_D_%1.2f_L_%d_M_%d.p' % (
            alpha_, beta_, theta_, D_, L, M)
        pk.dump({
            'x': xnt,
            'T': T
        }, open(os.path.join(data_dir_name, file_name), 'wb'))
        savemat(
            open(os.path.join(data_dir_name, file_name[:-1] + 'mat'), 'wb'), {
                'xnt': xnt,
                'T': T
            })

    return {'x': xnt, 'T': T}
コード例 #21
0
###############################################################################
# We can also visualize the learned activations

plot_data([z[:10] for z in z_hat], ['stem'] * n_atoms)

###############################################################################
# Note if the data is corrupted with impulsive noise, this method may not be
# the best.  Check out our :ref:`example using alphacsc
# <sphx_glr_auto_examples_plot_simulate_alphacsc.py>` to learn how to deal with
# such data.

alpha = 1.2
noise_level = 0.005
X[idx_corrupted] += levy_stable.rvs(alpha,
                                    0,
                                    loc=0,
                                    scale=noise_level,
                                    size=(n_corrupted_trials, n_times),
                                    random_state=random_state_simulate)
pobj, times, d_hat, z_hat, reg = learn_d_z(X,
                                           n_atoms,
                                           n_times_atom,
                                           reg=reg,
                                           n_iter=n_iter,
                                           solver_d_kwargs=dict(factr=100),
                                           random_state=random_state,
                                           n_jobs=1,
                                           verbose=1)
plt.figure()
plt.plot(d_hat.T)
plt.plot(ds_true.T, 'k--')
plt.show()
コード例 #22
0
def Inoise(alpha_train, x1, x2, x3):
    y = np.float32(levy_stable.rvs(alpha_train, 0, x1, x2, x3))
    return y
コード例 #23
0
x = np.linspace(levy_stable.ppf(0.01, alpha, beta),
                levy_stable.ppf(0.99, alpha, beta), 100)
ax.plot(x, levy_stable.pdf(x, alpha, beta),
       'r-', lw=5, alpha=0.6, label='levy_stable pdf')

# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.

# Freeze the distribution and display the frozen ``pdf``:

rv = levy_stable(alpha, beta)
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')

# Check accuracy of ``cdf`` and ``ppf``:

vals = levy_stable.ppf([0.001, 0.5, 0.999], alpha, beta)
np.allclose([0.001, 0.5, 0.999], levy_stable.cdf(vals, alpha, beta))
# True

# Generate random numbers:

r = levy_stable.rvs(alpha, beta, size=1000)

# And compare the histogram:

ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
ax.legend(loc='best', frameon=False)
plt.show()
コード例 #24
0
"""

from scipy.stats import levy_stable
import numpy as np

points = 1000000
#jennys_constant = 8675309

alpha = 1.7
beta = 0.0
delta = 1.0
gamma = 1.0

draw1 = levy_stable.rvs(alpha,
                        beta,
                        gamma,
                        delta,
                        size=points,
                        random_state=None)
draw2 = levy_stable.rvs(alpha,
                        beta,
                        gamma,
                        delta,
                        size=points,
                        random_state=None)

draw = np.zeros(points)
for i in range(points):
    draw[i] = draw1[i] + draw2[i]

#use scipy's quantile estimator to estimate the parameters and convert to S parameterization
#pconv = lambda alpha, beta, mu, sigma: (alpha, beta, mu - sigma * beta * np.tan(np.pi * alpha / 2.0), sigma)
for a in alpha:
    for b in beta:
        x = []
        for i in range(N):
            W = np.random.exponential(1)
            x.append(f(W, a, b))

        plt.figure(figsize=(10, 6))

        # alpha-stable simulations
        plt.subplot(1, 2, 1)
        plt.hist(x,
                 bins=100,
                 color='black',
                 density=1,
                 alpha=0.75,
                 label='Simulation: ' + chr(945) + '=' + str(a) + ',' +
                 chr(946) + '=' + str(b))
        theo = levy_stable.rvs(a, b, size=N, scale=1)
        plt.hist(theo, bins=100, color='r', density=1, label='Theoretical')
        plt.title('Simulation Histogram')
        plt.legend()

        # plots of time series
        plt.subplot(1, 2, 2)
        plt.plot(np.arange(0, N), x)
        plt.title('Time Series Plot')

        plt.savefig('Q3_a{}_b{}.png'.format(a, b))
        plt.show()
コード例 #26
0
 def jumps(self, N):
     return levy_stable.rvs(self.params[0], beta=0, scale=self.params[-1], size=N)
コード例 #27
0
 def stable(self, alpha, beta, mu=0, c=1):
     return levy_stable.rvs(alpha, c)
コード例 #28
0
def SimuMx(n, m, combination):
    v = [np.random.uniform(0, 1, m) for i in range(0, n)]

    weights = [comb["weight"] for comb in combination]

    #Generate m random sample of category labels
    y = np.array([
        np.where(ls == 1)[0][0]
        for ls in np.random.multinomial(n=1, pvals=weights, size=m)
    ])

    for i in range(0, len(combination)):

        combinationsize = len(v[0][y == i])
        if combination[i]["type"] == "clayton":
            theta = combination[i]["theta"]
            X = np.array([
                np.random.gamma(theta**(-1), scale=1.0)
                for i in range(0, combinationsize)
            ])
            phi_t = lambda t: (1 + t)**(-1 / theta)
            for j in range(0, len(v)):
                v[j][y == i] = phi_t(-np.log(v[j][y == i]) / X)

        elif combination[i]["type"] == "gumbel":
            theta = combination[i]["theta"]
            X = levy_stable.rvs(alpha=1 / theta,
                                beta=1,
                                scale=(np.cos(np.pi / (2 * theta)))**theta,
                                loc=0,
                                size=combinationsize)
            phi_t = lambda t: np.exp(-t**(1 / theta))
            for j in range(0, len(v)):
                v[j][y == i] = phi_t(-np.log(v[j][y == i]) / X)

        elif combination[i]["type"] == "gaussian":
            corrMatrix = combination[i]["corrMatrix"]

            v2 = np.random.uniform(0, 1, combinationsize)

            L = linalg.cholesky(corrMatrix, lower=True)

            for j in range(0, len(v)):
                v[j][y == i] = np.sqrt(-2 * np.log(v[j][y == i])) * np.cos(
                    2 * np.pi * v2)

            y = np.dot(L, [vk[y == i] for vk in v])
            Y = norm.cdf(y, loc=0, scale=1)

            for j in range(0, len(v)):
                v[j][y == i] = np.array(Y[j])

        elif combination[i]["type"] == "student":
            corrMatrix = combination[i]["corrMatrix"]
            k = combination[i]["k"]

            v2 = np.random.uniform(0, 1, combinationsize)
            L = linalg.cholesky(corrMatrix, lower=True)
            r = np.random.chisquare(df=k, size=combinationsize)

            for j in range(0, len(v)):
                v[j][y == i] = np.sqrt(-2 * np.log(v[j][y == i])) * np.cos(
                    2 * np.pi * v2)

            z = np.dot(L, [vk[y == i] for vk in v])
            Y = t.cdf(np.sqrt(k / r) * z, df=k, loc=0, scale=1)

            for j in range(0, len(v)):
                v[j][y == i] = np.array(Y[j])

    return v
コード例 #29
0
ファイル: alpha3.py プロジェクト: paa27/stable_dis
    beta = np.max([beta, -1])
    gam = np.max([gam, 0])

    return [alpha, alphaold], [beta, betaold], [gam, gamold], [delta, deltaold]


if __name__ == "__main__":

    alphas = np.linspace(0.1, 2, 5)
    betas = np.linspace(-1, 1, 2)
    errorsa = np.zeros((len(alphas), len(betas)))
    errorsb = np.zeros((len(alphas), len(betas)))

    for i in range(len(alphas)):
        alpha = alphas[i]
        for j in range(len(betas)):
            beta = betas[j]
            r = levy_stable.rvs(alpha, beta, loc=0, scale=1, size=10000)

            k = estimate(r)

            errorsa[i][j] = np.absolute((k[0][0]) - alpha) / alpha
            errorsb[i][j] = np.absolute(k[1][0] - beta) / beta
    '''
	pd.DataFrame(errorsa,index = alphas,columns = betas).to_csv('/alpha_err.csv')
	pd.DataFrame(errorsb,index = alphas,columns = betas).to_csv('/beta_err.csv')
	'''

    np.savetxt('errorsa.csv', errorsa, fmt='%.4e', delimiter=',')
    np.savetxt('errorsb.csv', errorsb, fmt='%.4e', delimiter=',')
コード例 #30
0
def noise(input_shape, alpha=1.5, beta=0, scale=1):
    im_noise = levy_stable.rvs(alpha, beta, 0, scale, input_shape)
    return im_noise
コード例 #31
0
import numpy as np
from scipy.stats import levy_stable
import matplotlib.pyplot as plt
N = 1000
##alpha, beta = 1.8, 0.75
##r = levy_stable.rvs(alpha, beta, size=N, scale = 0.3)
##plt.hist(r,bins = 30,label = "α=2 and β=0.75")

##alpha, beta = 2, 0.75
##r = levy_stable.rvs(alpha, beta, size=N, scale = 1)
##plt.hist(r,alpha= 0.5,bins = 20,label = "Theoretical and β=0.75")
##plt.legend()
##plt.show()

alpha, beta = 0.5, 0
r = levy_stable.rvs(alpha, beta, size=N, scale=1)
x = np.linspace(0, 999, 1000)
plt.plot(x, r)
plt.legend()
plt.show()