Example #1
0
def genPoint():
    unif = r.uniform(0, 1)
    w1 = 0.21
    w2 = 0.68
    if unif < w1:
        return -abs(r.standard_cauchy())
    elif unif < w1 + w2:
        return genNormInInt(0, 4, 2, 4)
    else:
        return (abs(r.standard_cauchy()))**0.5 + 4
Example #2
0
def AMMOchild(parent, strats):
    chromosomelen = len(parent)
    T = 1 / sqrt(2 * chromosomelen)
    Tp = 1 / sqrt(2 * sqrt(chromosomelen))
    nstrats = strats * exp(T * randn(2) + Tp * randn(2))
    nchild = parent + nstrats[0] * (standard_cauchy((chromosomelen, 1)) + nstrats[1] * random.normal(chromosomelen, 1))
    return [nchild, nstrats]
Example #3
0
def noise_iid(size, noise_scale, noise_tail="normal", **kwargs):
    if noise_tail == "normal":
        return npr.normal(0, 1, size=size) * noise_scale
    elif noise_tail == "laplace":
        return npr.laplace(0, 1, size=size) * noise_scale
    elif noise_tail == "cauchy":
        return npr.standard_cauchy(size=size) * noise_scale
Example #4
0
def compound_poisson_from_T(intensity,
                            T,
                            jumps_distribution="exponential",
                            intensity_jumps=4.):
    """
    Simulation de la trajectoire d'un processus de Poisson composé sur l'intervalle
    [0,T], pour une loi d'amplitude des sauts donnee (par défaut, c'est la loi
    exponentielle d'espérance 1/4)
    """
    scale_jumps = 1. / intensity_jumps
    N = poisson(T * intensity)

    events = zeros(N + 1)
    events[1:] = T * rand(N)
    events.sort()
    events = append(events, T)

    jumps = zeros(N + 1)
    if jumps_distribution == "exponential":
        jumps[1:] = cumsum(exponential(scale_jumps, size=N))
    if jumps_distribution == "cauchy":
        jumps[1:] = cumsum(abs(standard_cauchy(size=N)))
    jumps = append(jumps, jumps[-1])

    return events, jumps
Example #5
0
def build_dist(type, n):
    if type=='l':
        # ARP: I like the list comprehensions
        d = [np.mean(r.standard_cauchy(n)) for x in range(x_len)]
    elif type=='t':
        d = [np.mean(r.triangular(-15, 0, 15, size=n)) for x in range(x_len)]
    elif type=='u':
        d = [np.mean(r.uniform(-15, 15, n)) for x in range(x_len)]
    return d
Example #6
0
 def copyAndModify(self, maxMutations, scale, source, maxIndexes):
     """
     The search operator:
     - copy and mutate this member.
     - copy values from the source at random indexes.
     """
     x = self.rep.copy()
     mutableIndexes = sample(range(len(x)), randrange(maxMutations + 1))
     x[mutableIndexes] += standard_cauchy() * scale
     copyIndexes = sample(range(len(x)), randrange(maxIndexes + 1))
     x[copyIndexes] = source.rep[copyIndexes]
     return x
Example #7
0
def varRange(n):
    for i in range(1000):
        fillTable(normal(0, 1, n))
    addRow("normal", n)
    for i in range(1000):
        fillTable(standard_cauchy(n))
    addRow("cauchy", n)
    for i in range(1000):
        fillTable(laplace(0, 2**(-0.5), n))
    addRow("laplace", n)
    for i in range(1000):
        fillTable(poisson(10, n))
    addRow("poisson", n)
    for i in range(1000):
        fillTable(uniform(-1 * (3**0.5), 3**0.5, n))
    addRow("uniform", n)
Example #8
0
    def train(self):
        pop = [self.create_solution() for _ in range(self.pop_size)]
        sorted_pop = sorted(pop, key=lambda temp: temp[self.ID_FIT])
        leaders = deepcopy(sorted_pop[:3])
        g_best = deepcopy(sorted_pop[0])

        for epoch in range(self.epoch):
            b = 2 - 2 * epoch / (self.epoch - 1
                                 )  # linearly decreased from 2 to 0, Eq. 5
            a = 2 - 2 * epoch / (self.epoch - 1
                                 )  # linearly decreased from 2 to 0
            ## Random walk here
            for i in range(0, len(leaders)):
                pos_new = leaders[i][self.ID_POS] + a * standard_cauchy(
                    self.problem_size)
                fit_new = self.get_fitness_position(pos_new)
                if fit_new < leaders[i][self.ID_FIT]:
                    leaders[i] = [pos_new, fit_new]
            ## Update other wolfs
            for i in range(self.pop_size):
                miu1, miu2, miu3 = b * (2 * uniform() - 1), b * (
                    2 * uniform() - 1), b * (2 * uniform() - 1)  # Eq. 3
                c1, c2, c3 = 2 * uniform(), 2 * uniform(), 2 * uniform(
                )  # Eq. 4
                X1 = leaders[0][self.ID_POS] - miu1 * abs(
                    c1 * g_best[self.ID_POS] - pop[i][self.ID_POS])
                X2 = leaders[1][self.ID_POS] - miu2 * abs(
                    c2 * g_best[self.ID_POS] - pop[i][self.ID_POS])
                X3 = leaders[2][self.ID_POS] - miu3 * abs(
                    c3 * g_best[self.ID_POS] - pop[i][self.ID_POS])
                pos_new = (X1 + X2 + X3) / 3.0
                fit_new = self.get_fitness_position(pos_new)
                if fit_new < pop[i][self.ID_FIT]:
                    pop[i] = [pos_new, fit_new]
            sorted_pop = sorted(pop + leaders,
                                key=lambda temp: temp[self.ID_FIT])
            pop = deepcopy(sorted_pop[:self.pop_size])
            leaders = deepcopy(sorted_pop[:3])
            if sorted_pop[self.ID_MIN_PROB][self.ID_FIT] < g_best[self.ID_FIT]:
                g_best = deepcopy(sorted_pop[self.ID_MIN_PROB])

            self.loss_train.append(g_best[self.ID_FIT])
            if self.verbose:
                print(">Epoch: {}, Best fit: {}".format(
                    epoch + 1, g_best[self.ID_FIT]))
        self.solution = g_best
        return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
Example #9
0
File: filter.py Project: chyser/bin
def arma_process(length,
                 ar_params=[1.],
                 ma_params=[1.],
                 mu=0.,
                 dist='normal',
                 scale=1):
    #-------------------------------------------------------------------------------
    """ Generate ARMA(p,q) process of given length, where p=len(ar_params) and q=len(ma_params).
    """

    # Initialize series with mean value
    series = resize(float(mu), length)

    # Enforce array type for parameters
    ar_params = atleast_1d(ar_params)
    ma_params = concatenate(([1], -1 * atleast_1d(ma_params))).tolist()

    # Reverse order of parameters for calculations below
    ma_params.reverse()

    # Degree of process
    p, q = len(ar_params), len(ma_params) - 1

    # Specify error distribution
    if dist is 'normal':
        a = random.normal(0, scale, length)
    elif dist is 'cauchy':
        a = random.standard_cauchy(length) * scale
    elif dist is 't':
        a = random.standard_t(scale, length)
    else:
        print 'Invalid error disitrbution'
        return

    # Generate autoregressive series
    for t in range(1, length):

        # Autoregressive piece
        series[t] += dot(ar_params[max(p - t, 0):],
                         series[t - min(t, p):t] - mu)

        # Moving average piece
        series[t] += dot(ma_params[max(q - t + 1, 0):], a[t - min(t, q + 1):t])

    return series
Example #10
0
def Simple(f, x0, maxEvals=1e6, verbose=False, targetFitness=-1e-10, sigma = .001, batchSize = None):
    dim = len(x0)
    if not batchSize:
        batchSize = 4 + int(floor(3 * log(dim)))
    numEvals = 0
    bestFound = None
    bestFitness = -Inf
    mutation = sigma

    population = [x0 + randn(dim) * mutation for _ in range(batchSize)]
    sigmas = ones(batchSize) * sigma

    while numEvals + batchSize <= maxEvals and bestFitness < targetFitness:
        # produce and evaluate samples
        fitnesses = [f(s) for s in population]
        # print fitnesses

        if max(fitnesses) > bestFitness:
            bestFitness = max(fitnesses)
            bestFound = population[argmax(fitnesses)]

        numEvals += batchSize 
        if verbose: print "Step", numEvals/batchSize, ":", max(fitnesses), "best:", bestFitness
        
        # update center and variances
        utilities = computeUtilities(fitnesses)

        new_population = []
        new_sigmas = []
        for n in range(batchSize):
            chosen = select_proportional(utilities)

            new_sigmas.append(sigmas[chosen] * normal(1,.001))
            new_population.append(population[chosen].copy() + standard_cauchy(dim) * new_sigmas[n])

            # new_sigmas.append(sigmas[chosen] + normal(0,.01))
            # new_population.append(population[chosen].copy() + randn(dim) * new_sigmas[n])

        population = new_population
        sigmas = new_sigmas
        print mean(sigmas)
        # population = mix(new_population)

    return bestFound, bestFitness
Example #11
0
File: mcmc.py Project: NASCENCE/alg
def MCMC(f, x0, maxEvals=1e5, verbose=False, targetFitness=-1e-10, sigma=0.1, batchSize=None):
    dim = len(x0)

    bestFound = None
    bestFitness = -Inf

    last_fitness = f(x0)
    T = 1.0

    numEvals = 0

    accept_estimate = 0.5
    avg_fitness = last_fitness
    SMOOTH = 0.0001
    while numEvals <= maxEvals and bestFitness < targetFitness:
        x_new = x0 + standard_cauchy(dim) * sigma
        fitness = f(x_new)
        if fitness > bestFitness:
            bestFitness = fitness
            bestFound = x_new.copy()
            print bestFitness, bestFound

        print fitness / T, last_fitness / T
        avg_fitness = avg_fitness * (1.0 - SMOOTH) + fitness * SMOOTH
        accept = min(1.0, exp((fitness - avg_fitness) / T) / exp((last_fitness - avg_fitness) / T))
        # print accept, fitness, last_fitness
        if accept > rand():
            x0 = x_new
            last_fitness = fitness
            accept_estimate = accept_estimate * (1.0 - SMOOTH) + SMOOTH
        else:
            accept_estimate = accept_estimate * (1.0 - SMOOTH)
        if accept_estimate > 0.4:
            sigma *= 1.1
            # T /= 1.001
        else:
            sigma /= 1.1
            # T *= 1.001
        if numEvals % 1000 == 0:
            print sigma
        numEvals += 1
    return bestFound, bestFitness
Example #12
0
def compound_poisson_from_n(intensity,
                            n,
                            jumps_distribution="exponential",
                            intensity_jumps=4.):
    """
    Simulation de la trajectoire d'un processus de Poisson composé jusqu'a
    l'instant T_n, pour une loi d'amplitude des sauts donnee (par défaut, 
    c'est la loi exponentielle d'esperance 1/4)
    """
    scale = 1. / intensity
    scale_jumps = 1. / intensity_jumps

    events = zeros(n + 1)
    events[1:] = cumsum(exponential(scale, size=n))

    jumps = zeros(n + 1)
    if jumps_distribution == "exponential":
        jumps[1:] = cumsum(exponential(scale_jumps, size=n))
    if jumps_distribution == "cauchy":
        jumps[1:] = cumsum(abs(standard_cauchy(size=n)))
    return events, jumps
Example #13
0
File: filter.py Project: chyser/bin
def arma_process(length, ar_params=[1.], ma_params=[1.], mu=0., dist='normal', scale=1):
#-------------------------------------------------------------------------------
    """ Generate ARMA(p,q) process of given length, where p=len(ar_params) and q=len(ma_params).
    """

    # Initialize series with mean value
    series = resize(float(mu), length)

    # Enforce array type for parameters
    ar_params = atleast_1d(ar_params)
    ma_params = concatenate(([1], -1 * atleast_1d(ma_params))).tolist()

    # Reverse order of parameters for calculations below
    ma_params.reverse()

    # Degree of process
    p, q = len(ar_params), len(ma_params) - 1

    # Specify error distribution
    if dist is 'normal':
        a = random.normal(0, scale, length)
    elif dist is 'cauchy':
        a = random.standard_cauchy(length) * scale
    elif dist is 't':
        a = random.standard_t(scale, length)
    else:
        print 'Invalid error disitrbution'
        return

    # Generate autoregressive series
    for t in range(1, length):

        # Autoregressive piece
        series[t] += dot(ar_params[max(p-t, 0):], series[t - min(t, p):t] - mu)

        # Moving average piece
        series[t] += dot(ma_params[max(q - t + 1, 0):], a[t - min(t, q + 1):t])

    return series
Example #14
0
    def sample(self, n_samples):
        # Sample from standard half-cauchy distribution
        lamda = np.abs(npr.standard_cauchy(size=n_samples))

        # I think scale is the thing called Tau^2 in the paper.
        return npr.randn() * lamda * self.scale
Example #15
0
#coding: utf-8

import numpy as np
import numpy.random as rd
import matplotlib.pyplot as plt
from math import pi
from scipy.special import i0

#データ数
NUM=10000000

"""コーシー分布"""
x=rd.standard_cauchy(NUM)
plt.hist(x, histtype='step', bins=1000, range=(-10, 10), normed=True)
plt.title("Standard_Cauchy dist : p(x)")
filename="cauchy.png"
plt.savefig(filename)
plt.show()
Example #16
0
 def __call__(self):
     return standard_cauchy(size=size(self.s)) * self.s
Example #17
0
    def sample(self, n_samples):
        # Sample from standard half-cauchy distribution
        lamda = np.abs(npr.standard_cauchy(size=n_samples))

        # I think scale is the thing called Tau^2 in the paper.
        return npr.randn() * lamda * self.scale
Example #18
0
    def __init__(
            self,
            num_n,
            num_m,
            mos_method='eft',
            *args,
            algorithm=omp.recover,
            **kwargs
    ):
        """
            Construct an SOE Scenario

        Parameters
        ----------
        num_n : int
            number of atoms in the dictionary
        num_m : int
            number to compress down to
        mos_method : string, optional
            method to use for model order selection
        algorithm : function
            recovery method
        **kwargs : dict
            additional arguments for each MOS method
        """

        # save the MOS algorithm
        self._mos_method = mos_method

        if self._mos_method == 'lopes':

            # evenly divide the measurement
            self.num_m1 = int(num_m/2)
            self.num_m2 = num_m - self.num_m1
            self._num_gamma = kwargs['num_gamma']

            # init measurement matrix
            matMeasurement = np.zeros((num_m, num_n))

            # generate cauchy measurements
            matMeasurement[:self.num_m1, :] = npr.standard_cauchy(
                (self.num_m1, num_n)
            ) * self._num_gamma

            # generate gaussian measurements
            matMeasurement[self.num_m1:, :] = npr.randn(
                self.num_m2,
                num_n
            ) * self._num_gamma

            matMeasurement[:] = proj_sphere(matMeasurement)

            # set appropriate estimation function
            self._estimate_function = self._est_lopes

        elif mos_method == 'ravazzi':

            # density parameter and variance of normal distribution
            self._num_gamma = kwargs['num_gamma']

            # generate the uniform distribution to decide where there are zeros
            # in the measurement matrix
            matUniform = npr.uniform(0, 1, (num_m, num_n))

            # generate the normal distributed samples
            matNormal = (1 / np.sqrt(self._num_gamma)) * \
                npr.randn(num_m, num_n)

            # decide where there are zeros
            matSubSel = 1 * (matUniform < self._num_gamma)

            # put the normal distributed elements, where we rolled the
            # dice correctly
            matMeasurement = matSubSel * matNormal

            # set the correct estimation function
            self._estimate_function = self._est_ravazzi

        else:

            # path to store training data to
            self._buffer_path = kwargs['str_path']

            # overlap parameter
            self._num_p = kwargs['num_p']

            # error probability during training
            self._num_err_prob = kwargs['num_err_prob']

            # make scenario complex if we do overlap
            self._do_complex = ((self._num_p != 0) or (kwargs['do_complex']))

            # if we have no overlap, both matrices should be gaussian
            # if we have overlap one has to be vandermonde and the scenario
            # itself has to be complex
            if self._num_p == 0:

                # find dimensions most suitable
                self._num_l, self._num_k = self._largest_div(num_m)

                # create the matrices
                if self._do_complex:
                    mat_psi = npr.randn(self._num_l, num_n) + \
                        1j * npr.randn(self._num_l, num_n)

                    mat_phi = npr.randn(self._num_k, num_n) + \
                        1j * npr.randn(self._num_k, num_n)

                else:
                    mat_psi = npr.randn(self._num_l, num_n)
                    mat_phi = npr.randn(self._num_k, num_n)

                self._estimate_function = self._nope_overlap

            else:
                mat_psi = vand.draw(
                    int(np.ceil(float(num_m)/self._num_p)),
                    num_n,
                    self._buffer_path + "vander_c"
                )
                mat_phi = npr.randn(self._num_p, num_n) + \
                    1j * npr.randn(self._num_p, num_n)

                self._num_l = self._find_block_length(num_m, self._num_p)
                self._num_k = int((num_m - self._num_l) / self._num_p + 1)

                # set appropriate estimation function
                self._estimate_function = self._true_overlap

            # measurement is the KRP of scaled KRP of vandermonde
            # where we only take the first num_m rows
            matMeasurement = proj_sphere(
                khatri_rao(mat_psi, mat_phi)[:num_m, :]
            )

            if mos_method == 'eft':
                # generate the array with the helper values
                self._num_q = min(self._num_l, self._num_k)
                self._arr_r = self._eft_fetch_arr_r(
                    self._num_q,
                    self._num_l,
                    self._buffer_path+"eft_arr_r_"+str(self._num_l)+"_"+str(self._num_k)
                )

                # fetch the thresholding coefficients
                self._arr_eta = self._eft_fetch_arr_eta(
                    self._num_l,
                    self._num_k,
                    self._num_err_prob,
                    self._do_complex,
                    self._buffer_path + "eft_arr_eta_" +
                    str(self._num_err_prob) + "_" +
                    int(self._do_complex) * "c_" +
                    str(self._num_l) + "_" + str(self._num_k)
                ) + 0.4

            elif mos_method == 'eet':
                self._arr_eta = self._eet_fetch_arr_eta(
                    self._num_l,
                    self._num_k,
                    self._num_err_prob,
                    self._do_complex,
                    self._buffer_path + "eet_arr_eta_" +
                    str(self._num_err_prob) + "_" +
                    int(self._do_complex) * "c_" +
                    str(self._num_l) + "_" + str(self._num_k)
                )
            elif mos_method == 'new':
                self._arr_eta = self._new_fetch_arr_eta(
                    self._num_l,
                    self._num_k,
                    self._num_err_prob,
                    self._do_complex,
                    self._buffer_path + "new_arr_eta_" +
                    str(self._num_err_prob) + "_" +
                    int(self._do_complex) * "c_" +
                    str(self._num_l) + "_" + str(self._num_k)
                )

        # now create the cs scenario
        Scenario.__init__(
            self,						# yay!
            np.eye(num_n),				# during soe the dictionary is the
            # identity matrix, i.e. the vector
            # itself is sparse
            matMeasurement,
            algorithm					# recovery is done with OMP
        )
Example #19
0
def standard_cauchy(size, params):
    try:
        return random.standard_cauchy(size)
    except ValueError as e:
        exit(e)
Example #20
0
def cauchy_distr(size):
    return random.standard_cauchy(size)
Example #21
0
def cauchy(x=0,gamma=1,size=None):
    return random.standard_cauchy(size=size)*gamma+x
Example #22
0
 def __call__(self):
     return nr.standard_cauchy(size=np.size(self.s)) * self.s
file.write('Распределение ошибок соответствует ei ~ N(0, 1) + Cauchy(0, 10)\n')
file.write(
    'Для создания выбросов в 10, 30 и 50 % случаев будет использоваться следующий подход:\n'
)
file.write('    1) будут сгенерированы три ряда длиной в 10 символов\n')
file.write(
    '    2) в рядах будут содержаться 1, 3 и 5 чисел из распределения Коши, остальные - нули\n'
)
file.write(
    '    3) будет создан массив ошибок со стандартным нормальным распределением\n'
)
file.write(
    '    4) каждый элемент массива ошибок случайно будет увеличен на одно из чисел рядов с Коши\n'
)

Cauchy_row1 = [0, standard_cauchy(), 0, 0, 0, 0, 0, 0, 0, 0]
Cauchy_row2 = [
    0,
    standard_cauchy(), 0,
    standard_cauchy(), 0, 0,
    standard_cauchy(), 0, 0, 0
]
Cauchy_row3 = [
    0,
    standard_cauchy(), 0,
    standard_cauchy(), 0,
    standard_cauchy(), 0,
    standard_cauchy(), 0,
    standard_cauchy()
]
# Создаём списки, где будут лежать выбросы из распределения Коши
Example #24
0
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import uniform
import matplotlib.mlab as mlab
from scipy import stats
from numpy.random import standard_cauchy

#sample_size
N = 1000

#uniform[0,1] variable
u = uniform(size=N)

#Inverse probability transform
x_s = np.tan(np.multiply((u - 1 / 2), np.pi))

std_cauchy = standard_cauchy(N)
x = np.linspace(-10, 10, N, endpoint=True)

plt.hist(x_s, bins=50, range=(-10, 10), normed=True, alpha=0.9, label='Cuachy')
plt.ylabel('Inverse Probabiliity Transform')
plt.plot(x, stats.cauchy.pdf(x), color='orange', label='sin(x)')
plt.legend()
plt.grid()
plt.show()
#!/local/anaconda/bin/python
# IMPORTANT: leave the above line as is.

import sys
import numpy as np
import numpy.random as npr
from sklearn import linear_model
DIMENSION = 400  # Dimension of the original data.
CLASSES = (-1, +1)   # The classes that we are trying to predict.
M =10000  # nb of samples to take

npr.seed(0)
ww = npr.standard_cauchy(size=(M,400))
b = npr.uniform(0, 2*np.pi, size = M) 
def transform(x_original):
    gamma = 2
    return np.cos(ww.dot(x_original)*gamma+b)*np.sqrt(2.0/M)

if __name__ == "__main__":
    x = None
    clf = linear_model.SGDClassifier(fit_intercept=False, alpha=0.00001, loss='hinge')
    for line in sys.stdin:
        line = line.strip()
        (label, x_string) = line.split(" ", 1)
        y = np.array([int(label)])
        x_original = np.fromstring(x_string, sep=' ')
        x = transform(x_original)  # Use our features.
        clf.partial_fit(x,y, classes=CLASSES)
    w = clf.coef_.flatten()
    print "%s\t%s" % ('1', str(list(w)))
Example #26
0
 def __call__(self, num_draws=None):
     size = (self.s.shape)
     if num_draws:
         size += (num_draws, )
     return standard_cauchy(size=size).T * self.s
Example #27
0
 def __call__(self):
     return standard_cauchy(size=np.size(self.s)) * self.s
Example #28
0
    A = ((x - m) / s)**2
    B = pi * s
    return prod(1 / (B * (1 + A)))


N = 10
n = 10**4
pr = range(0, n, 1)
m1 = 0.
s1 = 1.
"""
for p in pr:
    x = normal(m1, s1, N)
    g.append(log(f(x, N, m1, s1)/f(x, N, m2, s2)))
    x = normal(m2, s2, N)
    g2.append(log(f(x, N, m1, s1)/f(x, N, m2, s2)))
"""

for p in pr:
    x = normal(m1, s1, N)
    if f(x, N, m1, s1) != 0:
        g.append(log(f(x, N, m1, s1) / C(x, m1, s1)))
    x = standard_cauchy(N)
    if f(x, N, m1, s1) != 0:
        g2.append(log((f(x, N, m1, s1) / C(x, m1, s1))))

y, bins, _ = plt.hist(g, 200, histtype=u'step', density=True)
y2, bins2, _ = plt.hist(g2, 200, histtype=u'step', density=True)
bin_w = bins[1] - bins[0]
#bin_w2 = bins2[1] - bins2[0]
plt.show()
    return np.median(s[s <= q2]), np.median(s[s >= q2])


def out(x):
    # returns mask for those values in sample, that are not outliers
    q1, q3 = quartiles(x)
    iqr = q3 - q1
    return (q1 - 1.5 * iqr <= x) & (x <= q3 + 1.5 * iqr)


mu, sigma = 0, 1
N = 2000

if __name__ == '__main__':
    X = nr.normal(mu, sigma, N)
    Z = nr.standard_cauchy(N)

    S = pd.DataFrame({'X': X, 'Z': Z})

    M1, M2 = abs(S) > 1, abs(S) > 3

    C1, C2 = np.sum(M1), np.sum(M2)
    print(C1, C2, sep='\n')

    X_F = X[out(X)]
    Z_F = Z[out(Z)]

    N_BINS = 42

    plt.hist(X_F, bins=N_BINS)
    plt.title('N(%.2f, %.2f) - filtered' % (mu, sigma))
Example #30
0
from numpy import quantile, around
from numpy.random import normal, standard_cauchy, laplace, poisson, uniform
import math
import matplotlib.pyplot as plt
from tabulate import tabulate

distributions = {
    "normal": lambda n: normal(0, 1, n),
    "cauchy": lambda n: standard_cauchy(n),
    "laplace": lambda n: laplace(0, 2**(-0.5), n),
    "pois": lambda n: poisson(10, n),
    "uniform": lambda n: uniform(-math.sqrt(3), math.sqrt(3), n)
}


def getDistribution(distrName, n):
    return distributions.get(distrName)(n)


def theoreticalProb(sample):
    min = quantile(
        sample, 0.25) - 1.5 * (quantile(sample, 0.75) - quantile(sample, 0.25))
    max = quantile(
        sample, 0.75) + 1.5 * (quantile(sample, 0.75) - quantile(sample, 0.25))
    return min, max


def ejectionNum(rv, min, max):
    ejection = 0
    for elem in rv:
        if elem < min or elem > max: