Example #1
0
def mutational_effects(n, s, beta):
    """
    Generate n mutations from a gamma distribution with mean effect s and shape parameter beta.
    Negative (positive) values of s indicate deleterious (beneficial) mutations. 
    
    Arguments:
    n -- number of mutations
    s -- mean effect of a mutation
    beta -- shape parameter of the gamma distribution (inf indicates equal effects)
    """
    if np.sign(s) == 1:
        beneficial = True
    elif np.sign(s) == -1:
        beneficial = False
    else:
        return "Invalid s: must be nonzero."
    if beta > 0:
        if beta == inf:
            mutations = np.repeat(s, n)            
        else:
            alpha = beta / abs(s)
            if beneficial:
                mutations = gamma(shape=beta, scale=1/alpha, size=n)
            else:
                mutations = - gamma(shape=beta, scale=1/alpha, size=n)
        return mutations
    else:
        return "Invalid beta: must be 0 < beta < inf."
Example #2
0
def firstStateGenerator(parameters, size):
    first_state = zeros((size, 5))
    first_state[:, 2] = random.gamma(size = size, \
            shape = parameters[6] * (parameters[2]**2) / parameters[3], \
            scale = (parameters[3] / parameters[2]))
    first_state[:, 4] = random.gamma(size = size, \
            shape = (1 - parameters[6]) * (parameters[2]**2) / parameters[3], \
            scale = (parameters[3] / parameters[2]))
    return first_state
Example #3
0
 def initializationAttributes(self):
     self.Attribute['IOP'] = random.normal(28,3) # need to do truncation later
     while self.Attribute['IOP'] < 22:
         self.Attribute['IOP'] = random.normal(28,3)
     self.Attribute['MD'] = -random.gamma(2,2.5) # need to do truncation later
     while self.Attribute['MD'] > -3:
         self.Attribute['MD'] = -random.gamma(2,2.5)
     self.Attribute['MDR'] = random.gamma(2,0.014)
     self.Attribute['Age'] = random.normal(68,5)
Example #4
0
File: main.py Project: 0-T-0/bokeh
def _create_prices(t):
    last_average = 100 if t==0 else source.data['average'][-1]
    returns = asarray(lognormal(mean.value, stddev.value, 1))
    average =  last_average * cumprod(returns)
    high = average * exp(abs(gamma(1, 0.03, size=1)))
    low = average / exp(abs(gamma(1, 0.03, size=1)))
    delta = high - low
    open = low + delta * uniform(0.05, 0.95, size=1)
    close = low + delta * uniform(0.05, 0.95, size=1)
    return open[0], high[0], low[0], close[0], average[0]
Example #5
0
def sample_eta_west(eta, nact, n0, a=1, b=0):
    """Samples the concentration parameter eta"""

    ## compute x, r and p
    x = rn.beta(eta + 1, n0)
    lx = np.log(x)
    r = (a + nact - 1) / (n0 * (b - lx))
    p = r / (r + 1)

    ## return
    return rn.gamma(a + nact, 1 / (b - lx)) if rn.rand() < p else rn.gamma(a + nact - 1, 1 / (b - lx))
Example #6
0
 def _init_component(self, m, dim):
     assert self.mode_dims[m] == dim
     K = self.n_components
     s = self.smoothness
     gamma_DK = s * rn.gamma(s, 1. / s, size=(dim, K))
     delta_DK = s * rn.gamma(s, 1. / s, size=(dim, K))
     self.gamma_DK_M[m] = gamma_DK
     self.delta_DK_M[m] = delta_DK
     self.E_DK_M[m] = gamma_DK / delta_DK
     self.sumE_MK[m, :] = self.E_DK_M[m].sum(axis=0)
     self.G_DK_M[m] = np.exp(sp.psi(gamma_DK) - np.log(delta_DK))
     self.beta_M[m] = 1. / self.E_DK_M[m].mean()
Example #7
0
 def sample(self, n=None):
     """Return a multivariate t sample."""
     if n is None:
         snsamps = random.standard_normal(self.ndim)
         gamsamp = random.gamma(self.hnu)
         return _mvnt.mvtsamp(self.mu, self.L, self.nu, snsamps, gamsamp)
     samps = zeros((n, self.ndim), float)
     for i in range(n):
         snsamps = random.standard_normal(self.ndim)
         gamsamp = random.gamma(self.hnu)
         samps[i] = _mvnt.mvtsamp(self.mu, self.L, self.nu, snsamps, gamsamp)
     return samps
 def sample_noise(self):
     if self.noise_type == "truncnorm":
         return nr.uniform(0, self.upper)
     elif self.noise_type == "noiseless":
         return 0
     elif self.noise_type == "gamma":
         return nr.gamma(3, scale=3)  # equivalent to gamma(3,3) in wiki
     elif self.noise_type == "beta":
         return nr.gamma(3, scale=3)
     else:
         print("Unknown noise type for generating noise")
         sys.exit("sample_noise: Unknown noise type for generating noise")
 def __calculate_alpha(self, graph, alpha_prev):
     '''Picks alpha from a mixture of 2 gamma distributions'''
     num_communities = graph.number_of_communities
     num_players = graph.number_of_nodes
     beta_z = npr.beta(alpha_prev + 1, num_players)
     #generate a uniform random number to pick which gamma from the mixture
     rnd_unif = npr.uniform()
     inv_mixture_scale = self.__gamma_b - math.log(beta_z)
     mixture_scale = 1.0 / inv_mixture_scale
     if rnd_unif / (1 - rnd_unif) <  ((self.__gamma_a + num_communities)/
                                       (num_players * inv_mixture_scale)):
         return npr.gamma(self.__gamma_a + num_communities, mixture_scale)
     return npr.gamma(self.__gamma_a + num_communities - 1, mixture_scale)
Example #10
0
    def levy(self):
#        landa = float(landa)

        s = []
        for i in range(self.lchrom):
            up = gamma(1+self.beta) * (pi*self.beta/2.)
            down = gamma((1+self.beta)/2.) * 2**((self.beta-1)/2.)
            sigma = (up / down) ** (1/self.beta)
            
            U = normal(0, sigma)
            V = normal(0, 1)
        
            s.append(U / abs(V)**(1/self.beta))
        return np.array(s)
Example #11
0
 def sample_q(self):
     """Return a multivariate t sample and the value of its
     associated quadratic form."""
     if n is None:
         snsamps = random.standard_normal(self.ndim)
         gamsamp = random.gamma(self.hnu)
         return _mvnt.mvtsampq(self.mu, self.L, self.nu, snsamps, gamsamp)
     samps = zeros((n, self.ndim), float)
     qvals = zeros(n, float)
     for i in range(n):
         snsamps = random.standard_normal(self.ndim)
         gamsamp = random.gamma(self.hnu)
         samps[i], qvals[i] = _mvnt.mvtsampq(self.mu, self.L, self.nu, snsamps, gamsamp)
     return samps
Example #12
0
    def levy(self):
        s = []
        for i in range(self.lchrom):
            up = gamma(1+self.beta) * (pi*self.beta/2.)
            down = gamma((1+self.beta)/2.) * 2**((self.beta-1)/2.)
            sigma = (up / down) ** (1/self.beta)
            
            U = normal(0, sigma)
            V = normal(0, 1)
        
            s.append(U / abs(V)**(1/self.beta))
            
#        s = self.map.logistic_map(self.lchrom, rand()/10., rand()*4)
        return np.array(s)
Example #13
0
def new_cluster(number_of_stars = 1000):
    masses = new_salpeter_mass_distribution(
        number_of_stars, 
        mass_min = 0.1 | units.MSun,
        mass_max = 125.0 | units.MSun, 
        alpha = -2.35
    )
    
    particles = Particles(number_of_stars)
    particles.mass = masses
    particles.x = units.parsec(random.gamma(2.0, 1.0, number_of_stars))
    particles.y = units.parsec(random.gamma(1.0, 1.0, number_of_stars))
    particles.z = units.parsec(random.random(number_of_stars))
    
    return particles
Example #14
0
def sample_eta_ishwaran(lw, eta, a=0, b=0):
    """Samples the concentration parameter eta given a vector of mixture log-weights"""

    eta = rn.gamma(lw.size + a - 1, 1 / (b - lw[-1])) if np.isfinite(lw[-1]) else eta

    ##
    return eta
Example #15
0
 def __init__(self, bias_stdev=0.2, eval_stdev=0.2, mode='pareto', frac=0.1, 
              pareto_shape=1.4, gamma_shape=3):
     """Initializes the precision and bias of a user.  Useful only for simulation."""
     # Chooses the bias of the user
     self.true_bias = 0
     if bias_stdev > 0:
         self.true_bias = npr.normal(scale=bias_stdev)
     # Chooses the variance of the user.
     if mode == 'bimodal':
         # 10% of the students are responsible for 90% of the trouble,
         # where 10% is the fraction.
         # This code keeps the standard deviation as specified, but explains
         # it via a bimodal distribution, with values s and s / frac.
         s = eval_stdev * eval_stdev * frac / (1.0 + frac - frac * frac)
         if npr.uniform() < frac:
             self.prec = (s / (frac * frac)) ** 0.5
         else:
             self.prec = s ** 0.5
     elif mode == 'pareto':
         # The mean of a pareto distribution of shape a is 1 / (a - 1)
         # Here, we use the pareto distribution to sample the variance.
         
         prec_sq = npr.pareto(pareto_shape) * eval_stdev * eval_stdev * (pareto_shape - 1.0)
         self.prec = prec_sq ** 0.5
     else:
         # Gamma.
         prec_sq = npr.gamma(gamma_shape, scale=eval_stdev)
         self.prec = prec_sq * prec_sq
         
     # List of items it judged.
     self.items = []
     # Dictionary mapping each item, to the grade assigned by the user.
     self.grade = {}
Example #16
0
    def generate(self):
        '''Generate some random formants.'''
        bw = 10.
        f1 = rng.uniform(300, 700)
        f2 = rng.uniform(800, 2000)
        f3 = rng.uniform(2000, 2500)
        f4 = rng.uniform(2500, 3500)

        def near(x, t, s):
            return 1. / (1. + numpy.exp((t - x) / s))

        while True:
            if rng.random() < 0.1:
                bw = rng.gamma(5, 5)
                r = near(f1, 500, 100)
                f1 += rng.uniform(-30 * r, 30 * (1 - r))
                r = near(f2, 1500, 300)
                f2 += rng.uniform(-50 * r, 50 * (1 - r))
                r = near(f3, 2200, 500)
                f3 += rng.uniform(-70 * r, 70 * (1 - r))
                r = near(f4, 3000, 700)
                f4 += rng.uniform(-90 * r, 90 * (1 - r))
            yield (bw, ((1., f1, 200),
                        (0.9, f2, 100),
                        (0.8, f3, 100),
                        (0.7, f4, 100)))
Example #17
0
def genData():
    data = [[],[]]
    for i in range(0,150):
        data[0].append(random.gamma(2,0.5))
    for i in range(0,100):
        data[1].append(random.normal(1,0.25))
    return data
    def rand(self):

        m, n = self.__m, self.__n

        s = linalg.cholesky(self.__prod).transpose()
        w = self.__weight

        # Compute the parameters of the posterior distribution.
        mu = linalg.solve(s[:m, :m], s[:m, m:])
        omega = np.dot(s[:m, :m].transpose(), s[:m, :m])
        sigma = np.dot(s[m:, m:].transpose(), s[m:, m:]) / w
        eta = w

        # Simulate the marginal Wishart distribution.
        f = linalg.solve(np.diag(np.sqrt(2.0*random.gamma(
            (eta - np.arange(n))/2.0))) + np.tril(random.randn(n, n), -1),
                         np.sqrt(eta)*linalg.cholesky(sigma).transpose())
        b = np.dot(f.transpose(), f)

        # Simulate the conditional Gauss distribution.
        a = mu + linalg.solve(linalg.cholesky(omega).transpose(),
                              np.dot(random.randn(m, n),
                                     linalg.cholesky(b).transpose()))

        return a, b
    def rand(self):

        dim=self.__dim__

        mu=self.__param__.mu
        omega=self.__param__.omega
        sigma=self.__param__.sigma
        eta=self.__param__.eta

        if numpy.isfinite(eta):

            # Simulate the marginal Gamma distribution.
            disp=sigma/(random.gamma(eta/2.0,size=dim)/(eta/2.0))

        else:

            # Account for the special case where the
            # marginal distribution is singular.
            disp=numpy.copy(sigma)

        if numpy.isfinite(omega):

            # Simulate the conditional Gauss distribution.
            loc=mu+(numpy.sqrt(disp)*random.randn(dim))/math.sqrt(omega)

        else:

            # Account for the special case where the
            # conditional distribution is singular.
            loc=numpy.copy(mu)

        return loc,disp
    def rand(self):

        dim=self.__dim__

        mu=self.__param__.mu
        omega=self.__param__.omega
        sigma=self.__param__.sigma
        eta=self.__param__.eta

        if numpy.isfinite(eta):

            # Simulate the marginal Wishart distribution.
            diag=2.0*random.gamma((eta-numpy.arange(dim))/2.0)
            fact=numpy.diag(numpy.sqrt(diag))+numpy.tril(random.randn(dim,dim),-1)
            fact=linalg.solve(fact,math.sqrt(eta)*linalg.cholesky(sigma).transpose())
            disp=numpy.dot(fact.transpose(),fact)

        else:

            # Account for the special case where the
            # marginal distribution is singular.
            disp=numpy.copy(sigma)

        if numpy.isfinite(omega):

            # Simulate the conditional Gauss distribution.
            loc=mu+numpy.dot(linalg.cholesky(disp),random.randn(dim))/math.sqrt(omega)

        else:

            # Account for the special case where the
            # conditional distribution is singular.
            loc=numpy.copy(mu)

        return loc,disp
Example #21
0
def gen_inv_gaussian(a, b, p, burnin=10):
    """
    Sampler based on Gibbs sampling.
    Assumes scalar p.
    """
    from numpy.random import gamma
    from numpy import sqrt

    s = a * 0. + 1.

    if p < 0:
        a, b = b, a

    for i in range(burnin):

        l = b + 2 * s
        m = sqrt(l / a)

        x = inv_gaussian(m, l, shape=m.shape)
        s = gamma(abs(p) + 0.5, x)

    if p >= 0:
        return x
    else:
        return 1 / x
Example #22
0
def truncated_gamma(shape=None, alpha=1., beta=1., x_min=None, x_max=None):
    """
    Generate random variates from a lower-and upper-bounded gamma distribution.

    @param shape: shape of the random sample
    @param alpha: shape parameter (alpha > 0.)
    @param beta:  scale parameter (beta >= 0.)
    @param x_min: lower bound of variate
    @param x_max: upper bound of variate    
    @return: random variates of lower-bounded gamma distribution
    """
    from scipy.special import gammainc, gammaincinv
    from numpy.random import gamma
    from numpy import inf

    if x_min is None and x_max is None:
        return gamma(alpha, 1 / beta, shape)
    elif x_min is None:
        x_min = 0.
    elif x_max is None:
        x_max = inf
        
    x_min = max(0., x_min)
    x_max = min(1e300, x_max)

    a = gammainc(alpha, beta * x_min)
    b = gammainc(alpha, beta * x_max)

    return probability_transform(shape,
                                 lambda x, alpha=alpha: gammaincinv(alpha, x),
                                 a, b) / beta
Example #23
0
def sample_dirichlet(a):
    """Sample from multiple Dirichlet distributions given the matrix of concentration parameter columns a"""

    x = rn.gamma(a, 1)
    w = x / np.sum(x, 0)

    ##
    return w
Example #24
0
def distLambda(m, Nmud, Vmud, Vwell):
    """Choose m values from the distribution of lambda given Nmud, Vmud, Vwell.
        m is the number of values to pull
        Nmud is the number of cysts counted in estimating the concentration of cysts
        Vmud is the volume of mud counted over in estimating the concentrtion of cysts
        Vwell is the volume of mud placed in each well in the wellplate
        """
    return Vwell/Vmud*gamma(Nmud, 1.0, m)
Example #25
0
 def EvaluateConversion (self):
     """Evaluate survival probability to conversion from OHT to POAG"""
     h = (1.26**((self.Attribute['Age']-55)/10))*(1.09**(self.Attribute['IOP'] - 24))*0.02*HRother
     prob = 1 - math.exp(-h*self.params['CurrentTime'])
     if random.uniform(0,1) < prob:
         self.params['Conversion'] = True
         self.Attribute['MD'] = -random.gamma(6,0.5)
         self.params['TimetoConversion'] = self.params['CurrentTime']
Example #26
0
def test_gamma_parameters2(verbose=0):
    import numpy.random as nr
    n = 1000
    X = nr.gamma(11., 3., n)
    G = Gamma()
    G.estimate(X)
    if verbose:
        G.parameters()
    assert(np.absolute(G.scale-3)<0.5)
Example #27
0
def sample_normal_prec_jeffreys(s1, s2, ndata):
    """Samples the precision of a normal distribution"""

    ##
    avg = s1 / ndata
    dot = s2 - 2 * avg * s1 + ndata * avg**2

    ##
    return rn.gamma(ndata * 0.5, 2 / dot)
Example #28
0
def test_Gamma_parameters1(verbose=0):
    import numpy.random as nr
    n = 1000
    X = nr.gamma(11., 3., n)
    G = Gamma()
    G.estimate(X)
    if verbose:
        G.parameters()
    assert(np.absolute(G.shape-11)<2.)
Example #29
0
def sample_post(hp, ss):
    z = _intermediates(hp, ss)
    l_star = gamma(z.alpha, 1. / z.beta)
    while True:
        m_star = t.rvs(2 * z.alpha, z.mu,
                       z.beta / (z.alpha * z.tau)) ** -1
        if m_star > 0:
            break
    return (m_star, l_star)
Example #30
0
 def sample(self, eta, size=1):
     """
     @param eta: the natural parameters
     @param size: the size of the sample
     A sample of sufficient statistics
     """
     from numpy.random import gamma
     (a, b) = self.theta(eta)
     return self.T(gamma(a, scale=1. / b, size=size))
Example #31
0
 def sample_pi(self, delta):
     shapes = cu.counter(
         delta, self.nMix) * self.inv_temper_temp + self.priors.pi.a
     unnormalized = gamma(shape=shapes)
     return unnormalized / unnormalized.sum()
Example #32
0
 def generate(self):
     return gamma(self.k, self.theta)
Example #33
0
    def gen_rnas_proteins(self):
        """ Creates RNA and protein objects corresponding to genes on chromosome

        """
        cell = self.knowledge_base.cell

        options = self.options
        mean_copy_number = options.get('mean_copy_number')
        mean_half_life = options.get('mean_half_life')
        mean_volume = cell.properties.get_one(
            id='mean_volume').value

        cytosol = cell.compartments.get_one(id='c')
        for chromosome in cell.species_types.get(__type=wc_kb.core.DnaSpeciesType):
            for i in range(len(chromosome.loci)):

                locus = chromosome.loci[i]

                if type(locus) == wc_kb.prokaryote.TranscriptionUnitLocus:
                    tu = locus

                    # creates RnaSpeciesType for RNA sequence corresponding to gene
                    rna = cell.species_types.get_or_create(
                        id='rna_{}'.format(tu.id), __type=wc_kb.prokaryote.RnaSpeciesType)
                    rna.name = 'rna {}'.format(tu.id)
                    # GeneLocus object for gene sequence, attribute of ProteinSpeciesType object
                    if tu.genes[0].type == wc_kb.core.GeneType.mRna:
                        rna.type = wc_kb.core.RnaType.mRna
                    elif tu.genes[0].type == wc_kb.core.GeneType.rRna:
                        rna.type = wc_kb.core.RnaType.rRna
                    elif tu.genes[0].type == wc_kb.core.GeneType.tRna:
                        rna.type = wc_kb.core.RnaType.tRna
                    elif tu.genes[0].type == wc_kb.core.GeneType.sRna:
                        rna.type = wc_kb.core.RnaType.sRna

                    # print(rna.type)
                    rna_conc = random.gamma(1, mean_copy_number) / scipy.constants.Avogadro / mean_volume
                    rna_species = rna.species.get_or_create(compartment=cytosol)
                    rna_species.concentration = wc_kb.core.Concentration(cell=cell, value=rna_conc)                    
                    rna.half_life = random.normal(
                        mean_half_life, numpy.sqrt(mean_half_life))

                    rna.transcription_units.append(tu)

                    # print(rna.get_seq

                    if rna.type == wc_kb.core.RnaType.mRna:
                        for gene in tu.genes:
                            # creates ProteinSpecipe object for corresponding protein sequence(s)
                            # print(gene.get_seq()[0:3])
                            prot = cell.species_types.get_or_create(
                                id='prot_{}'.format(gene.id), __type=wc_kb.prokaryote.ProteinSpeciesType)
                            prot.name = 'prot_{}'.format(gene.id)

                            prot.cell = cell
                            prot.cell.knowledge_base = self.knowledge_base

                            prot.gene = gene  # associates protein with GeneLocus object for corresponding gene
                            prot.rna = rna
                            prot.half_life = 1
                            prot_species = prot.species.get_or_create(compartment=cytosol)
                            prot_species.concentration = wc_kb.core.Concentration(cell=cell, value=rna_conc)
def simulateUtilityScore(N, VehicleShare, NumericalAttributes, CategoricalAttributes):

    mcost_k_M = 100 * beta(a=4.5, b=2, size=N[0])
    mcost_t_M = 0.1 * beta(a=3.5, b=2, size=N[0])
    mcost_utility_M = ((-2e-3 * gamma(mcost_k_M, mcost_t_M, [len(NumericalAttributes['monthly_cost']), N[0]]))
                       * np.array(NumericalAttributes['monthly_cost'])[:, np.newaxis])
    mcost_utility_M -= mcost_utility_M.mean(axis=0)

    mcost_k_X = 100 * beta(a=4., b=2, size=N[1])
    mcost_t_X = 0.1 * beta(a=4.5, b=2, size=N[1])
    mcost_utility_X = ((-1.6e-3 * gamma(mcost_k_X, mcost_t_X, [len(NumericalAttributes['monthly_cost']), N[1]]))
                       * np.array(NumericalAttributes['monthly_cost'])[:, np.newaxis])
    mcost_utility_X -= mcost_utility_X.mean(axis=0)

    mcost_k_B = 100 * beta(a=3.5, b=2, size=N[2])
    mcost_t_B = 0.1 * beta(a=5, b=2, size=N[2])
    mcost_utility_B = ((-1.8e-3 * gamma(mcost_k_B, mcost_t_B, [len(NumericalAttributes['monthly_cost']), N[2]]))
                       * np.array(NumericalAttributes['monthly_cost'])[:, np.newaxis])
    mcost_utility_B -= mcost_utility_B.mean(axis=0)

    mcost_utility = np.hstack([mcost_utility_M, mcost_utility_X, mcost_utility_B]).T

    upcost_k_M = 100 * beta(a=4.5, b=2, size=N[0])
    upcost_t_M = 0.1 * beta(a=3.5, b=2, size=N[0])
    upcost_utility_M = ((-2e-5 * gamma(upcost_k_M, upcost_t_M, [len(NumericalAttributes['upfront_cost']), N[0]]))
                       * np.array(NumericalAttributes['upfront_cost'])[:, np.newaxis])
    upcost_utility_M -= upcost_utility_M.mean(axis=0)

    upcost_k_X = 100 * beta(a=4., b=2, size=N[1])
    upcost_t_X = 0.1 * beta(a=4.5, b=2, size=N[1])
    upcost_utility_X = ((-1e-5 * gamma(upcost_k_X, upcost_t_X, [len(NumericalAttributes['upfront_cost']), N[1]]))
                       * np.array(NumericalAttributes['upfront_cost'])[:, np.newaxis])
    upcost_utility_X -= upcost_utility_X.mean(axis=0)

    upcost_k_B = 100 * beta(a=3.5, b=2, size=N[2])
    upcost_t_B = 0.1 * beta(a=5, b=2, size=N[2])
    upcost_utility_B = ((-1.5e-5 * gamma(upcost_k_B, upcost_t_B, [len(NumericalAttributes['upfront_cost']), N[2]]))
                       * np.array(NumericalAttributes['upfront_cost'])[:, np.newaxis])
    upcost_utility_B -= upcost_utility_B.mean(axis=0)

    upcost_utility = np.hstack([upcost_utility_M, upcost_utility_X, upcost_utility_B]).T

    term_k_M = 100 * beta(a=4.5, b=2, size=N[0])
    term_t_M = 0.1 * beta(a=3.5, b=2, size=N[0])
    term_utility_M = ((-1e-2 * gamma(term_k_M, term_t_M, [len(NumericalAttributes['term']), N[0]]))
                       * np.array(NumericalAttributes['term'])[:, np.newaxis])
    term_utility_M -= term_utility_M.mean(axis=0)

    term_k_X = 100 * beta(a=4., b=2, size=N[1])
    term_t_X = 0.1 * beta(a=4.5, b=2, size=N[1])
    term_utility_X = ((-1.2e-2 * gamma(term_k_X, term_t_X, [len(NumericalAttributes['term']), N[1]]))
                       * np.array(NumericalAttributes['term'])[:, np.newaxis])
    term_utility_X -= term_utility_X.mean(axis=0)

    term_k_B = 100 * beta(a=3.5, b=2, size=N[2])
    term_t_B = 0.1 * beta(a=5, b=2, size=N[2])
    term_utility_B = ((-1.2e-2 * gamma(term_k_B, term_t_B, [len(NumericalAttributes['term']), N[2]]))
                       * np.array(NumericalAttributes['term'])[:, np.newaxis])
    term_utility_B -= term_utility_B.mean(axis=0)

    term_utility = np.hstack([term_utility_M, term_utility_X, term_utility_B]).T


    worth_k_M = 1000 * beta(a=4.5, b=2, size=N[0])
    worth_t_M = 0.01 * beta(a=3.5, b=2, size=N[0])
    worth_utility_M = ((1.5e-5 * gamma(worth_k_M, worth_t_M, [len(NumericalAttributes['vehicle_worth']), N[0]]))
                     * np.array(NumericalAttributes['vehicle_worth'])[:, np.newaxis])
    worth_utility_M -= worth_utility_M.mean(axis=0)

    worth_k_X = 1000 * beta(a=4, b=2, size=N[1])
    worth_t_X = 0.01 * beta(a=4.5, b=2, size=N[1])
    worth_utility_X = ((1.5e-5 * gamma(worth_k_X, worth_t_X, [len(NumericalAttributes['vehicle_worth']), N[1]]))
                       * np.array(NumericalAttributes['vehicle_worth'])[:, np.newaxis])
    worth_utility_X -= worth_utility_X.mean(axis=0)

    worth_k_B = 1000 * beta(a=3.5, b=2, size=N[2])
    worth_t_B = 0.01 * beta(a=4, b=2, size=N[2])
    worth_utility_B = ((1.5e-5 * gamma(worth_k_B, worth_t_B, [len(NumericalAttributes['vehicle_worth']), N[2]]))
                       * np.array(NumericalAttributes['vehicle_worth'])[:, np.newaxis])
    worth_utility_B -= worth_utility_B.mean(axis=0)

    worth_utility = np.hstack([worth_utility_M, worth_utility_X, worth_utility_B]).T

    range_k_M = 1000 * beta(a=4.5, b=2, size=N[0])
    range_t_M = 0.007 * beta(a=4, b=2, size=N[0])
    range_utility_M = ((2e-3 * gamma(range_k_M, range_t_M, [len(NumericalAttributes['range']), N[0]]))
                       * np.array(NumericalAttributes['range'])[:, np.newaxis])
    range_utility_M -= range_utility_M.mean(axis=0)

    range_k_X = 1000 * beta(a=4, b=2, size=N[1])
    range_t_X = 0.008 * beta(a=4, b=2, size=N[1])
    range_utility_X = ((2e-3 * gamma(range_k_X, range_t_X, [len(NumericalAttributes['range']), N[1]]))
                       * np.array(NumericalAttributes['range'])[:, np.newaxis])
    range_utility_X -= range_utility_X.mean(axis=0)

    range_k_B = 1000 * beta(a=3.5, b=2, size=N[2])
    range_t_B = 0.008 * beta(a=4, b=2, size=N[2])
    range_utility_B = ((2e-3 * gamma(range_k_B, range_t_B, [len(NumericalAttributes['range']), N[2]]))
                       * np.array(NumericalAttributes['range'])[:, np.newaxis])
    range_utility_B -= range_utility_B.mean(axis=0)

    range_utility = np.hstack([range_utility_M, range_utility_X, range_utility_B]).T

    charge_k_M = 1000 * beta(a=4.5, b=2, size=N[0])
    charge_t_M = 0.007 * beta(a=4, b=2, size=N[0])
    charge_utility_M = ((2e-3 * gamma(charge_k_M, charge_t_M, [len(NumericalAttributes['charge']), N[0]]))
                       * np.array(NumericalAttributes['charge'])[:, np.newaxis])
    charge_utility_M -= charge_utility_M.mean(axis=0)

    charge_k_X = 1000 * beta(a=4, b=2, size=N[1])
    charge_t_X = 0.008 * beta(a=4, b=2, size=N[1])
    charge_utility_X = ((2e-3 * gamma(charge_k_X, charge_t_X, [len(NumericalAttributes['charge']), N[1]]))
                       * np.array(NumericalAttributes['charge'])[:, np.newaxis])
    charge_utility_X -= charge_utility_X.mean(axis=0)

    charge_k_B = 1000 * beta(a=3.5, b=2, size=N[2])
    charge_t_B = 0.008 * beta(a=4, b=2, size=N[2])
    charge_utility_B = ((2e-3 * gamma(charge_k_B, charge_t_B, [len(NumericalAttributes['charge']), N[2]]))
                       * np.array(NumericalAttributes['charge'])[:, np.newaxis])
    charge_utility_B -= charge_utility_B.mean(axis=0)

    charge_utility = np.hstack([charge_utility_M, charge_utility_X, charge_utility_B]).T

    energy_sig_M = 0.4 * beta(a=10, b=2, size=N[0])
    energy_mu_M = normal(loc=0.9, scale=0.1, size=N[0])
    energy_inter = (1 * normal(energy_mu_M, energy_sig_M, [1, N[0]]))
    energy_utility_M = np.vstack([-1 * energy_inter, energy_inter]).T

    energy_sig_X = 0.4 * beta(a=10, b=2, size=N[1])
    energy_mu_X = normal(loc=0.9, scale=0.1, size=N[1])
    energy_inter = (1 * normal(energy_mu_X, energy_sig_X, [1, N[1]]))
    energy_utility_X = np.vstack([-1 * energy_inter, energy_inter]).T

    energy_sig_B = 0.4 * beta(a=10, b=2, size=N[2])
    energy_mu_B = normal(loc=1.1, scale=0.1, size=N[2])
    energy_inter = (1.2 * normal(energy_mu_B, energy_sig_B, [1, N[2]]))
    energy_utility_B = np.vstack([-1 * energy_inter, energy_inter]).T

    energy_utility = np.vstack([energy_utility_M, energy_utility_X, energy_utility_B])

    type_mix_M = binomial(n=1, p=0.4, size=N[0])
    type_sig_M = 2 * beta(a=10, b=2, size=N[0])
    type_mu_M = normal(loc=(3 * (type_mix_M - 0.5)), scale=0.1, size=N[0])
    type_inter = (0.5 * normal(type_mu_M, type_sig_M, [1, N[0]]))
    type_utility_M = np.vstack([type_inter, -1 * type_inter]).T

    type_mix_X = binomial(n=1, p=0.2, size=N[1])
    type_sig_X = 2 * beta(a=10, b=2, size=N[1])
    type_mu_X = normal(loc=(3 * (type_mix_X - 0.5)), scale=0.1, size=N[1])
    type_inter = (0.5 * normal(type_mu_X, type_sig_X, [1, N[1]]))
    type_utility_X = np.vstack([type_inter, -1 * type_inter]).T

    type_mix_B = binomial(n=1, p=0.4, size=N[2])
    type_sig_B = 2 * beta(a=10, b=2, size=N[2])
    type_mu_B = normal(loc=(3 * (type_mix_B - 0.5)), scale=0.1, size=N[2])
    type_inter = (0.5 * normal(type_mu_B, type_sig_B, [1, N[2]]))
    type_utility_B = np.vstack([type_inter, -1 * type_inter]).T

    type_utility = np.vstack([type_utility_M, type_utility_X, type_utility_B])
    type_utility = type_utility.clip(-5, 5)

    brand_scale = 10 * beta(a=10, b=2, size=sum(N))
    brand_alloc = dirichlet(10*np.array([0.06, 0.07, 0.07, 0.01, 0.09, 0.44, 0.16, 0.1]), size=sum(N))
    brand_utility = brand_alloc * brand_scale[:, np.newaxis]
    brand_utility -= brand_utility.mean(axis=1)[:, np.newaxis]

    utility = np.hstack([brand_utility, mcost_utility, upcost_utility, term_utility, worth_utility, range_utility,
                         charge_utility, energy_utility, type_utility])

    # simulate the current market share
    simCust = multinomial(1, list(VehicleShare.values()), sum(N))
    simProduct = [list(VehicleShare.keys())[x] for x in np.argmax(simCust, axis=1)]

    aug = pd.DataFrame(np.array([list(range(1, sum(N) + 1, 1)), ['Millenial'] * N[0] + ['Gen X'] * N[1]
                                 + ['Baby Boomer'] * N[2], simProduct]).T, columns = ['id', 'segment', 'current brand'])

    k = 3  # we start at three as first three columns reserved for id, segment and current brand
    topLevel = [0, 1, 2]
    topLevelLab = ['id', 'segment', 'current brand']
    bottomLevelLab = ['id', 'segment', 'current brand']
    dictAttributes = {**CategoricalAttributes, **NumericalAttributes}
    for col in ['id', 'brand', 'model', 'monthly_cost', 'upfront_cost', 'term',
                'vehicle_worth', 'range', 'charge', 'energy', 'vehicle_type']:
        if col not in ['id', 'model']:
            topLevel = topLevel + [k] * len(list(dictAttributes[col]))
            topLevelLab = topLevelLab + [col]
            bottomLevelLab = bottomLevelLab + [lvl for lvl in list(dictAttributes[col])]
            k += 1

    midx = pd.MultiIndex(levels=[list(topLevelLab), list(bottomLevelLab)],
                         codes=[topLevel, list(range(len(bottomLevelLab)))])

    utilityDf = pd.DataFrame(pd.concat([aug, pd.DataFrame(utility)], axis=1).values, columns=midx)

    return utilityDf
Example #35
0
 def _update_Theta_DK(self):
     post_shape_DK = self.shape + self.Y_DK
     post_rate_DK = self.rate + np.dot(~self.data.mask, self.Phi_KV.T)
     self.Theta_DK[:] = rn.gamma(post_shape_DK, 1. / post_rate_DK)
Example #36
0
def sample_lam_b_pri(lam_b_hyper_pri_shape, lam_b_hyper_pri_rate, lam_a_pri,
                     lam_mat, K):
    lam_b_pri = gamma(lam_b_hyper_pri_shape + K * lam_a_pri,
                      1 / (lam_b_hyper_pri_rate + lam_mat.sum(axis=0)))

    return lam_b_pri
 def initialize_sampler(self, ns):
     self.samples = Samples(ns, self.nCol)  # self.nDat, self.nCol)
     self.samples.zeta[0] = gamma(shape=2., scale=2., size=self.nCol)
     # self.samples.rho[0] = self.sample_rho(self.samples.zeta[0])
     self.curr_iter = 0
     return
Example #38
0
 def sample_r(self, alpha, delta):
     shapes = alpha[delta].sum(axis=1)
     rates = self.data.V.sum(axis=1)
     return gamma(shape=shapes, scale=1. / rates)
Example #39
0
 def sample_re(self):
     enew = self.e0 + 0.5 * self.F * self.N
     fnew = self.f0 + 0.5 * np.sum(self.X**2)
     self.r_e = nr.gamma(enew, scale=1. / fnew)
Example #40
0
 def sample(shape: Union[float, np.ndarray],
            scale: Union[float, np.ndarray],
            size: int = None) -> Union[float, np.ndarray]:
     return 1 / npr.gamma(shape=shape, scale=1 / scale, size=size)
Example #41
0
import numpy as np
import numpy.random as npr
import scipy.stats as sps
import matplotlib.pyplot as plt

plt.close("all")

t = 10.
shape = 1.
scale = 4.
n = 50000

X = npr.gamma(shape, scale, size=n)
X_c = X[X > t] - t
x = np.linspace(0., max(X_c), 1000)
f_x = sps.gamma.pdf(x, a=shape, scale=scale)

plt.hist(X_c, normed=True, bins=2 * round(n**(1. / 3.)))  #empirique
plt.plot(x, f_x, "r")  #theorique
plt.title("Loi empirique de (X | X > t) - t pour X de loi exponentielle d'intensite " \
    + str(1./scale))
plt.show()
Example #42
0
    def sampler(self):
        """
        Run blocked-Gibbs sampling
        """
        #pdb.set_trace()

        for key, value in self.data.items():
            T = value.shape[0]
            # Step 1: backwards message passing
            messages = np.zeros((T, self.L))
            messages[-1, :] = 1
            for t in range(T - 1, 0, -1):
                messages[t - 1, :] = self.PI.dot(
                    messages[t, :] *
                    np.exp(self._logphi(value[t, :], self.mu, self.sigma)))
                messages[t - 1, :] /= np.max(messages[t - 1, :])

            # Step 2: states by MH algorithm
            for t in range(1, T):
                j = choice(self.L)  # proposal
                k = self.state[key][t]

                logprob_accept = (
                    np.log(messages[t, k]) - np.log(messages[t, j]) +
                    np.log(self.PI[self.state[key][t - 1], k]) -
                    np.log(self.PI[self.state[key][t - 1], j]) +
                    self._logphi(value[t - 1, :], self.mu[k], self.sigma[k]) -
                    self._logphi(value[t - 1, :], self.mu[j], self.sigma[j]))
                if exponential(1) > logprob_accept:
                    print("state update!")
                    self.state[key][t] = j
                    self.N[self.state[key][t - 1], j] += 1
                    self.N[self.state[key][t - 1], k] -= 1

        # Step 3: auxiliary variables
        P = np.tile(self.beta, (self.L, 1)) + self.n
        np.fill_diagonal(P, np.diag(P) + self.kappa)
        P = 1 - self.n / P
        for i in range(self.L):
            for j in range(self.L):
                self.M[i, j] = binomial(self.M[i, j], P[i, j])

        w = np.array([
            binomial(self.M[i, i], 1 / (1 + self.beta[i]))
            for i in range(self.L)
        ])
        m_bar = np.sum(self.M, axis=0) - w

        # pdb.set_trace()
        # input("continue...")
        # Step 4: beta and parameters of clusters
        #self.beta = _gem(self.gma)
        self.beta = dirichlet(np.ones(self.L) * (self.gma / self.L))  #+ m_bar

        # Step 5: transition matrix
        self.PI = np.tile(self.alpha * self.beta, (self.L, 1)) + self.N
        np.fill_diagonal(self.PI, np.diag(self.PI) + self.kappa)
        # pdb.set_trace()
        for i in range(self.L):
            self.PI[i, :] = dirichlet(self.PI[i, :])
            cluster = []
            for key, value in self.data.items():
                idx = np.where(self.state[key] == i)
                if cluster == []:
                    cluster = value[idx]
                else:
                    cluster = np.vstack([cluster, value[idx, :]])
            nc = cluster.shape[0]
            if nc:
                xmean = np.mean(cluster)
                self.mu[i] = xmean / (self.nu / nc + 1)
                self.sigma[i] = (2 * self.b +
                                 (nc - 1) * np.var(cluster) + nc * xmean**2 /
                                 (self.nu + nc)) / (2 * self.a + nc - 1)
            else:
                self.mu[i] = normal(0, np.sqrt(self.nu))
                self.sigma[i] = 1 / gamma(self.a, self.b)

        # check log likelihood
        total_loglikelihood = 0
        for key, value in self.data.items():
            T = value.shape[0]
            emis = 0
            trans = 0
            for t in range(T):
                emis += self._logphi(value[t, :], self.mu[self.state[key][t]],
                                     self.sigma[self.state[key][t]])
                if t > 0:
                    trans += np.log(self.PI[self.state[key][t - 1],
                                            self.state[key][t]])
            total_loglikelihood = emis + trans
        print("total log likelihood of all sequence: ", total_loglikelihood)
Example #43
0
    def _next_disconnect(self):
        time_until = gamma(self.time_until_shape, self.time_until_scale)
        duration = gamma(self.duration_shape, self.duration_scale)

        return time_until, duration
Example #44
0
 def _update_Phi_KV(self):
     post_shape_KV = self.shape + self.Y_KV
     post_rate_KV = self.rate + np.dot(self.Theta_DK.T, ~self.data.mask)
     self.Phi_KV[:] = rn.gamma(post_shape_KV, 1. / post_rate_KV)
Example #45
0
 def sample_r(self, alphas, betas, delta):
     alpha = alphas[delta]
     beta = betas[delta]
     As = alpha.sum(axis=1)
     Bs = (self.data.Yl * beta).sum(axis=1)
     return gamma(shape=As, scale=1 / Bs)
Example #46
0
def sample_lam_mat(lam_a_pri, lam_b_pri, ysum,
                   ycnt):  #lam_mat is a K by # neurons mat
    lam_mat = gamma(lam_a_pri + ysum, 1 / (lam_b_pri + ycnt))

    return lam_mat
Example #47
0
 def _update_alpha(self, V):
     a = self.ncomp + self.e - 1
     b = self.f - np.log(1 - V).sum()
     return npr.gamma(a, scale=1 / b)
Example #48
0
def unsupervised_wiener(image,
                        psf,
                        reg=None,
                        user_params=None,
                        is_real=True,
                        clip=True):
    """Unsupervised Wiener-Hunt deconvolution.

    Return the deconvolution with a Wiener-Hunt approach, where the
    hyperparameters are automatically estimated. The algorithm is a
    stochastic iterative process (Gibbs sampler) described in the
    reference below. See also ``wiener`` function.

    Parameters
    ----------
    image : (M, N) ndarray
       The input degraded image.
    psf : ndarray
       The impulse response (input image's space) or the transfer
       function (Fourier space). Both are accepted. The transfer
       function is automatically recognized as being complex
       (``np.iscomplexobj(psf)``).
    reg : ndarray, optional
       The regularisation operator. The Laplacian by default. It can
       be an impulse response or a transfer function, as for the psf.
    user_params : dict, optional
       Dictionary of parameters for the Gibbs sampler. See below.
    clip : boolean, optional
       True by default. If true, pixel values of the result above 1 or
       under -1 are thresholded for skimage pipeline compatibility.

    Returns
    -------
    x_postmean : (M, N) ndarray
       The deconvolved image (the posterior mean).
    chains : dict
       The keys ``noise`` and ``prior`` contain the chain list of
       noise and prior precision respectively.

    Other parameters
    ----------------
    The keys of ``user_params`` are:

    threshold : float
       The stopping criterion: the norm of the difference between to
       successive approximated solution (empirical mean of object
       samples, see Notes section). 1e-4 by default.
    burnin : int
       The number of sample to ignore to start computation of the
       mean. 15 by default.
    min_iter : int
       The minimum number of iterations. 30 by default.
    max_iter : int
       The maximum number of iterations if ``threshold`` is not
       satisfied. 200 by default.
    callback : callable (None by default)
       A user provided callable to which is passed, if the function
       exists, the current image sample for whatever purpose. The user
       can store the sample, or compute other moments than the
       mean. It has no influence on the algorithm execution and is
       only for inspection.

    Examples
    --------
    >>> from skimage import color, data, restoration
    >>> img = color.rgb2gray(data.astronaut())
    >>> from scipy.signal import convolve2d
    >>> psf = np.ones((5, 5)) / 25
    >>> img = convolve2d(img, psf, 'same')
    >>> img += 0.1 * img.std() * np.random.standard_normal(img.shape)
    >>> deconvolved_img = restoration.unsupervised_wiener(img, psf)

    Notes
    -----
    The estimated image is design as the posterior mean of a
    probability law (from a Bayesian analysis). The mean is defined as
    a sum over all the possible images weighted by their respective
    probability. Given the size of the problem, the exact sum is not
    tractable. This algorithm use of MCMC to draw image under the
    posterior law. The practical idea is to only draw highly probable
    images since they have the biggest contribution to the mean. At the
    opposite, the less probable images are drawn less often since
    their contribution is low. Finally the empirical mean of these
    samples give us an estimation of the mean, and an exact
    computation with an infinite sample set.

    References
    ----------
    .. [1] François Orieux, Jean-François Giovannelli, and Thomas
           Rodet, "Bayesian estimation of regularization and point
           spread function parameters for Wiener-Hunt deconvolution",
           J. Opt. Soc. Am. A 27, 1593-1607 (2010)

           https://www.osapublishing.org/josaa/abstract.cfm?URI=josaa-27-7-1593

           http://research.orieux.fr/files/papers/OGR-JOSA10.pdf
    """
    params = {
        'threshold': 1e-4,
        'max_iter': 200,
        'min_iter': 30,
        'burnin': 15,
        'callback': None
    }
    params.update(user_params or {})

    if reg is None:
        reg, _ = uft.laplacian(image.ndim, image.shape, is_real=is_real)
    if not np.iscomplexobj(reg):
        reg = uft.ir2tf(reg, image.shape, is_real=is_real)

    if psf.shape != reg.shape:
        trans_fct = uft.ir2tf(psf, image.shape, is_real=is_real)
    else:
        trans_fct = psf

    # The mean of the object
    x_postmean = np.zeros(trans_fct.shape)
    # The previous computed mean in the iterative loop
    prev_x_postmean = np.zeros(trans_fct.shape)

    # Difference between two successive mean
    delta = np.NAN

    # Initial state of the chain
    gn_chain, gx_chain = [1], [1]

    # The correlation of the object in Fourier space (if size is big,
    # this can reduce computation time in the loop)
    areg2 = np.abs(reg)**2
    atf2 = np.abs(trans_fct)**2

    # The Fourier transform may change the image.size attribute, so we
    # store it.
    if is_real:
        data_spectrum = uft.urfft2(image.astype(np.float))
    else:
        data_spectrum = uft.ufft2(image.astype(np.float))

    # Gibbs sampling
    for iteration in range(params['max_iter']):
        # Sample of Eq. 27 p(circX^k | gn^k-1, gx^k-1, y).

        # weighting (correlation in direct space)
        precision = gn_chain[-1] * atf2 + gx_chain[-1] * areg2  # Eq. 29
        excursion = np.sqrt(0.5) / np.sqrt(precision) * (
            np.random.standard_normal(data_spectrum.shape) +
            1j * np.random.standard_normal(data_spectrum.shape))

        # mean Eq. 30 (RLS for fixed gn, gamma0 and gamma1 ...)
        wiener_filter = gn_chain[-1] * np.conj(trans_fct) / precision

        # sample of X in Fourier space
        x_sample = wiener_filter * data_spectrum + excursion
        if params['callback']:
            params['callback'](x_sample)

        # sample of Eq. 31 p(gn | x^k, gx^k, y)
        gn_chain.append(
            npr.gamma(
                image.size / 2,
                2 / uft.image_quad_norm(data_spectrum - x_sample * trans_fct)))

        # sample of Eq. 31 p(gx | x^k, gn^k-1, y)
        gx_chain.append(
            npr.gamma((image.size - 1) / 2,
                      2 / uft.image_quad_norm(x_sample * reg)))

        # current empirical average
        if iteration > params['burnin']:
            x_postmean = prev_x_postmean + x_sample

        if iteration > (params['burnin'] + 1):
            current = x_postmean / (iteration - params['burnin'])
            previous = prev_x_postmean / (iteration - params['burnin'] - 1)

            delta = np.sum(np.abs(current - previous)) / \
                np.sum(np.abs(x_postmean)) / (iteration - params['burnin'])

        prev_x_postmean = x_postmean

        # stop of the algorithm
        if (iteration > params['min_iter']) and (delta < params['threshold']):
            break

    # Empirical average \approx POSTMEAN Eq. 44
    x_postmean = x_postmean / (iteration - params['burnin'])
    if is_real:
        x_postmean = uft.uirfft2(x_postmean, shape=image.shape)
    else:
        x_postmean = uft.uifft2(x_postmean)

    if clip:
        x_postmean[x_postmean > 1] = 1
        x_postmean[x_postmean < -1] = -1

    return (x_postmean, {'noise': gn_chain, 'prior': gx_chain})
from numpy.random import uniform, normal, gamma
from numpy import sqrt
from sys import argv, stdout, stderr

# Process arguments
if len(argv) != 3:
    stderr.write("Incorrect number of arguments\n".format(argv[0]))
    exit(1)

a_diff = float(argv[1])
a_udiff = float(argv[2])
diff_sigma2 = 1.0 / gamma(3, 78)
udiff_sigma2 = 1.0 / gamma(3, 78)

t1 = uniform(low=0, high=1)
t2 = uniform(low=0, high=1)

diff_sampled = normal(loc=0.23, scale=sqrt(diff_sigma2))
udiff_sampled = normal(loc=0.64, scale=sqrt(udiff_sigma2))

if t1 > a_diff:  #draw from just diffusion coefficient
    diff_sampled = 0

elif t2 > a_udiff:  #draw from just u*diffusion coefficient
    udiff_sampled = 0

# Print sampled parameters
stdout.write("{:0.6f} {:0.6f} {:0.6f} {:0.6f}\n".format(
    diff_sampled, udiff_sampled, diff_sigma2, udiff_sigma2))
Example #50
0
stdDev = sqrt(variance)
stdDev = std(valArray)        # Biased estimate   - 1.0833
stdDev = valArray.std(ddof=1) # "Unbiased" estimate - 1.1362

print('Std:', stdDev)

stdErrMean = valArray.std(ddof=1)/sqrt(len(valArray))

from scipy.stats import sem

stdErrMean = sem(valArray, ddof=1) # Result is 0.3426

from scipy.stats import skew
from numpy import random

samples = random.gamma(3.0, 2.0, 100) # Example data

skewness = skew(samples)
print( 'Skew', skewness )      # Result is 1.0537

from scipy.stats import binom_test

count, nTrials, pEvent = 530, 1000, 0.5
result = binom_test(count, nTrials, pEvent)
print( 'Binomial two tail', result)

from scipy.stats import binom
from numpy import array, zeros

def binomialTailTest(counts, nTrials, pEvent, oneSided=True):
  
Example #51
0
    def sample(self, data, parents, T, dt_max):

        start = time.time()
        times, nodes = data

        # sufficient statistics
        M_0n = np.zeros(self.N)
        M_n = np.zeros(self.N)
        M_mn = np.zeros((self.N, self.N))
        xbar_mn = np.zeros((self.N, self.N))
        nu_mn = np.zeros((self.N, self.N))

        # Direct Method
        X = [[[] for n in range(self.N)] for n in range(self.N)]
        for t in range(len(times)):
            n = nodes[t]
            # M_n
            M_n[n] += 1
            sn = times[t]
            w = parents[
                t]  # (0 = background, 1, ..., M - 1 = last possible parent)
            if w > 0:  # if not a background event...
                m = nodes[w - 1]  # parent node
                sm = times[w - 1]  # parent time
                # M_mn
                M_mn[m, n] += 1
                x = np.log((sn - sm) / (dt_max - (sn - sm)))
                X[m][n].append(x)
            else:
                M_0n[n] += 1

        # xbar_mn, nu_mn
        for n in range(self.N):  # event node
            for m in range(self.N):  # parent node
                if len(X[m][n]) > 0:
                    xbar_mn[m, n] = np.mean(X[m][n])
                    nu_mn[m, n] = np.var(X[m][n]) * M_mn[m, n]
                else:
                    # print("No {} -> {} events found.".format(m,n))
                    xbar_mn[m, n] = 0.1
                    nu_mn[m, n] = 0.1

        assert M_mn.sum() + M_0n.sum() == len(times), "Event count error 1"
        assert M_n.sum() == len(times), "Event count error 2"

        # check for zeros
        if FLAGS_VERBOSE:
            print('M_mn={}'.format(M_mn))
            print('xbar_mn={}'.format(xbar_mn))
            print('nu_mn={}'.format(nu_mn))
        xbar_mn[xbar_mn == 0] = 0.01
        nu_mn[nu_mn == 0] = 0.01

        # calculate posterior parameters
        alpha_0 = self.alpha_0 + M_0n
        beta_0 = self.beta_0 + T
        kappa = self.kappa + M_mn
        nu = self.nu + M_n.reshape((self.N, 1))
        mu_mu = (self.kappa_mu * self.mu_mu +
                 M_mn * xbar_mn) / (self.kappa_mu + M_mn)
        kappa_mu = self.kappa_mu + M_mn
        alpha_tau = self.alpha_tau + (M_mn / 2)
        beta_tau = (nu_mn /
                    2) + (M_mn * self.kappa_mu *
                          (xbar_mn - self.mu_mu)**2) / (2 *
                                                        (M_mn + self.kappa_mu))

        # sample posteriors
        lamb = gamma(alpha_0, (1 / beta_0))  # N x 1 parameters
        W = self.A * gamma(kappa, (1 / nu))  # N x N parameters
        mu, tau = normal_gamma(mu_mu, kappa_mu, alpha_tau,
                               beta_tau)  # N x N parameters
        stop = time.time()
        if FLAGS_VERBOSE:
            print('Sampled parameters in {} seconds.'.format(stop - start))
        return lamb, W, mu, tau
Example #52
0
 def _numpy(self, loc=0.0, scale=1.0, size=(1, )):
     return lambda: nr.gamma(
         shape=self.alpha, scale=self.theta * scale, size=size) + loc
Example #53
0
def sim_gamma(shape, scale, size):
    return npr.gamma(shape, scale, size)
Example #54
0
    def run_chain(self, steps=1):
        """Run a Gibbs sampler
        """
        z_chain = np.zeros((steps, self.n), dtype=int)
        self.assignments = z_chain
        M = float(self.params['M'])

        for i in range(1, steps):
            if i % 50 == 0: print("MCMC Chain: {}".format(i))
            # find the number of points in each clusters
            old_unique, old_counts = np.unique(self.assignments[i - 1, :],
                                               return_counts=True)
            num_pts_clusters = dict(zip(old_unique, old_counts))
            # old_unique = list(old_unique)

            # initialise the arrays of the chain as the array lengths differ
            # as we increase the number of clusters
            mu_new = np.zeros(old_unique.shape)
            sigma_new = np.zeros(old_unique.shape)
            weights_new = np.zeros(old_unique.shape)

            mu_old = self.chain["mu"][i - 1]
            sigma_old = self.chain["sigma"][i - 1]
            weights_old = self.chain["weights"][i - 1]

            for k in old_unique:
                num_pts_cluster = num_pts_clusters[k]
                data_cluster = self.data[np.where(
                    self.assignments[i - 1, :] == k)[0]]
                if num_pts_cluster > 0:
                    # now sample mu[k] given mu[-k], sigma and the partition
                    # see lecture 15 last slide calculations N(a, b) trick
                    sigma_tmp = sigma_old[k]
                    b = np.sqrt(1 / (num_pts_cluster / sigma_tmp**2 +
                                     1 / self.hyperparam["sigma_0"]**2))
                    a = b**2 * (sum(data_cluster) / sigma_tmp**2 +
                                self.hyperparam["mu_0"] /
                                self.hyperparam["sigma_0"]**2)
                    # update mu
                    mu_new[k] = normal(loc=a, scale=b)

                    # now sample sigma[k] given sigma[-k], mu and the partition
                    c = self.hyperparam["alpha_0"] + num_pts_cluster / 2
                    d = self.hyperparam["beta_0"] + 0.5 * sum(
                        (data_cluster - mu_new[k])**2)

                    # update sigma
                    sigma_new[k] = 1.0 / np.sqrt(gamma(shape=c, scale=1 / d))

            self.assignments[i, :] = self.assignments[i - 1, :].copy()
            # now, loop through all the datapoints to compute the new cluster probabilities
            for j in range(self.n):

                # TODO: this bit could definitely be taken out in the future, but i
                # will just leave it for now
                old_unique, old_counts = np.unique(self.assignments[i, :],
                                                   return_counts=True)

                old_cluster_assigned = int(self.assignments[i, j])
                old_counts[old_cluster_assigned] -= 1

                K = len(old_unique)

                # probability for each existing k cluster -> gives a vector of probabilities
                p_old_cluster = (old_counts + 1. / M) * norm(
                    mu_new, sigma_new).pdf(self.data[j])

                mu_update = normal(loc=self.hyperparam["mu_0"],
                                   scale=self.hyperparam["sigma_0"])
                sigma_update = 1 / np.sqrt(
                    gamma(
                        shape=self.hyperparam["alpha_0"],
                        scale=1 / self.hyperparam["beta_0"],
                    ))
                p_new_cluster = (M - K) / M * self.params["alpha"] * norm(
                    mu_update, sigma_update).pdf(self.data[j])
                # logging.debug(p_old_cluster)
                # logging.debug(p_new_cluster)
                p_new_cluster = np.atleast_1d(p_new_cluster)
                # normlise the probabilities
                prob_clusters = np.concatenate((p_old_cluster, p_new_cluster))
                prob_clusters = prob_clusters / sum(prob_clusters)
                # select a new cluster!
                # if we get K then new cluster!
                # try:
                cluster_pick = np.random.choice(K + 1, p=prob_clusters)
                # except Exception as e:
                #     logging.info("Iteration {}, datapoint {}".format(i, j))
                #     logging.info("{}".format(sigma_new),
                #                  "nan probabilities occurring due to values "
                #                  "mu_new and sigma_new: \n\n"
                #                  "mu_new: {} \n sigma_new: {}".format(
                #                      mu_new, sigma_new),
                #                  "\n with probabilities: {}".format(prob_clusters))

                if cluster_pick == K:
                    self.assignments[i, j] = cluster_pick
                    # update the indices and shift the parameters up the list
                    mu_new = np.append(mu_new, mu_update)
                    sigma_new = np.append(sigma_new, sigma_update)

                else:
                    self.assignments[i, j] = cluster_pick

                # obtain the number of members in the cluster belonging to the ith element, with
                # it removed!
                # find the number of points in each clusters as it will change with each iteration
                # unique, counts = np.unique(
                #     self.assignments[i, :], return_counts=True)
                # num_pts_clusters = dict(zip(unique, counts))

                # remove empty clusters and their parameters
                # now, sample the cluster weights!
                if (old_counts[old_cluster_assigned]
                        == 0) & (cluster_pick != old_cluster_assigned):
                    mu_new = np.delete(mu_new, old_cluster_assigned)
                    sigma_new = np.delete(sigma_new, old_cluster_assigned)

                    ind = self.assignments[i, :] > (old_cluster_assigned)
                    self.assignments[i, ind] = self.assignments[i, ind] - 1

            # update the weights
            _, counts = np.unique(self.assignments[i, :], return_counts=True)
            weights_update = dirichlet(alpha=self.params["alpha"] + counts)
            weights_new = weights_update.copy()

            self.chain["mu"].append(mu_new)
            self.chain["sigma"].append(sigma_new)
            self.chain["weights"].append(weights_new)

        print("Complete sampling")

        return self.chain, self.assignments
Example #55
0
def firstStateGenerator(parameters, size):
    first_state = zeros((size, 2))
    first_state[:, 1] = random.gamma(size=size,
                                     shape=(parameters[2]**2) / parameters[3],
                                     scale=(parameters[3] / parameters[2]))
    return first_state
Example #56
0
 def draw_nparray(self, shape=(1, )):
     """ Draw a numpy array of random samples, of a certain shape."""
     return np.minimum(
         np.maximum(gamma(self.shape, self.scale, size=shape), self.min),
         self.max)
def ichi2(degFreedom, scale):
    shape = degFreedom / 2
    return ((shape * scale) / random.gamma(shape))
Example #58
0
traj_BD = []
t_label = []

while len(traj_BD) < nb_simul:

    #Initialization
    x = x0
    tabx = []
    t = 0

    while x > 0 and t < tau:
        tabx.append(x)
        x += numpy.random.normal(2 * dt, math.sqrt(2 * x * dt))
        t += dt

    gamma = random.gamma(n, 1.0 / n)

    exp = -math.log(random.random()) / n

    if x > 0 and gamma <= x <= (gamma + exp):
        traj_BD.append(tabx)

#
#Mean trajectory over time
#

avg_BD = [float(sum(col)) / len(col) for col in zip(*traj_BD)]

with open(path, "w") as file_BD:
    for i in xrange(len(avg_BD)):
        file_BD.write(str(i * dt) + '\t' + str(avg_BD[i]) + '\n')
Example #59
0
 def rvs(self, size=None):
     return random.gamma(self.a, scale=self.scale, size=size)
Example #60
0
 def box_rng(self) -> int:
     return int(random.gamma(2, 400))