Esempio n. 1
0
def gamma_difference_hrf(
    tr,
    oversampling=16,
    time_length=32.0,
    onset=0.0,
    delay=6,
    undershoot=16.0,
    dispersion=1.0,
    u_dispersion=1.0,
    ratio=0.167,
):
    """ Compute an hrf as the difference of two gamma functions

    Parameters
    ----------
    tr: float, scan repeat time, in seconds
    oversampling: int, temporal oversampling factor, optional
    time_length: float, hrf kernel length, in seconds
    onset: float, onset of the hrf

    Returns
    -------
    hrf: array of shape(length / tr * oversampling, float),
         hrf sampling on the oversampled time grid
    """
    dt = tr / oversampling
    time_stamps = np.linspace(0, time_length, float(time_length) / dt)
    time_stamps -= onset / dt
    hrf = gamma.pdf(time_stamps, delay / dispersion, dt / dispersion) - ratio * gamma.pdf(
        time_stamps, undershoot / u_dispersion, dt / u_dispersion
    )
    hrf /= hrf.sum()
    return hrf
Esempio n. 2
0
def mcmc(N=1000, k={"t1":100, "t2":100, "t3":5}, x=[], v=[]):
    chute = {"t1":[10],"t2":[10],"t3":[0.01]}
    M = chute
    hiper = {"t1":[0,100],"t2":[0,100],"t3":[0.1,0.1]} #VALORES DOS HIPERPARAMETROS

    for i in range(N-1):
        for j in M.keys():
            if j == "t1" or j == "t2": 
                M[j].append( np.random.normal(loc = M[j][-1]-k[j]/100, scale = k[j], size = 1) )

                lista = [ [ M[l][-1] for l in M.keys()] , [ M[l][-1] if l!=j else M[l][-2] for l in M.keys() ] ]
                    
                t1 = norm.pdf(M[j][-1], loc = hiper[j][0], scale = hiper[j][1]) * L(x, v, lista[0]) * norm.pdf(M[j][-2], loc = M[j][-1]-k[j]/100, scale = k[j])
                t2 = norm.pdf(M[j][-2], loc = hiper[j][0], scale = hiper[j][1]) * L(x, v, lista[1]) * norm.pdf(M[j][-1], loc = M[j][-2]-k[j]/100, scale = k[j])
                
                teste = (t1/t2)
            else:
                M[j].append( np.random.gamma(shape = M[j][-1]*k[j], scale = k[j], size = 1) )
                lista = [ [ M[l][-1] for l in M.keys()] , [ M[l][-1] if l!=j else M[l][-2] for l in M.keys() ] ]
                t1 =  gamma.pdf(M[j][-1], a = hiper[j][0], scale = hiper[j][1]) * L(x, v, lista[0]) * gamma.pdf(M[j][-2], a = M[j][-1]*k[j], scale = k[j])
                t2 =  gamma.pdf(M[j][-2], a = hiper[j][0], scale = hiper[j][1]) * L(x, v, lista[1]) * gamma.pdf(M[j][-1], a = M[j][-2]*k[j], scale = k[j])          

                teste = (t1/t2)
                
            if (min(1 , teste) < np.random.uniform(low = 0, high = 1, size = 1) ) or (np.isinf(teste)) or (np.isnan(teste)) :  
                M[j][-1] = M[j][-2]
            
    return(M)
def hrf_single(value):
	""" Return values for HRF at single value

	Parameters:
	-----------
	value: a single float or integer value

	Returns:
	--------
	hrf_value: the hrf(value) evaluated 


	Note:
	-----
	You must change the max_value (use np.argmax) if you change the function
	"""

	if value <0 or value >30: # if outside the range of the function
		return 0

	# Gamma pdf for the peak
	peak_values = gamma.pdf(value, 6)
	# Gamma pdf for the undershoot
	undershoot_values = gamma.pdf(value, 12)
	# Combine them
	values = peak_values - 0.35 * undershoot_values
	# Scale max to 0.6
	return values / max_value * 0.6 
def fast_hrf(values):
	""" Return values for HRF at multiple values

	Parameters:
	-----------
	value: an array-like structure of integers or floats.

	Returns:
	--------
	comb_values: the hrf(values) evaluated 


	Note:
	-----
	You must change the max_value (use np.argmax) if you change the function
	"""

	# Gamma pdf for the peak
	peak_values = gamma.pdf(values, 6)
	# Gamma pdf for the undershoot
	undershoot_values = gamma.pdf(values, 12)
	# Combine them
	comb_values = peak_values - 0.35 * undershoot_values
	# if outside the range of the function
	comb_values[np.logical_or(values <0, values >30)] = 0
	# Scale max to 0.6
	return comb_values / max_value * 0.6 
Esempio n. 5
0
def spm_hrf(times):
    """ Return values for standard SPM HRF at given `times`

    This is the same as SPM's ``spm_hrf.m`` function using the default input
    values.

    Parameters
    ----------
    times : array
        Times at which to sample hemodynamic response function

    Returns
    -------
    values : array
        Array of same length as `times` giving HRF samples at corresponding
        time post onset (where onset is T==0).
    """
    # Gamma only defined for x values > 0
    time_gt_0 = times > 0
    ok_times = times[time_gt_0]
    # Output vector
    values = np.zeros(len(times))
    # Gamma pdf for the peak
    peak_values = gamma.pdf(ok_times, 6)
    # Gamma pdf for the undershoot
    undershoot_values = gamma.pdf(ok_times, 16)
    # Combine them
    values[time_gt_0] = peak_values - undershoot_values / 6.
    # Divide by sum
    return values / np.sum(values)
Esempio n. 6
0
def test_hrf():
    time = np.arange(0,24, 1.0/100)
    peak_values = gamma.pdf(time,6)
    undershoot_values = gamma.pdf(time, 12)
    values = peak_values - 0.35 * undershoot_values
    hrf_totest = values/np.max(values) * 0.6 
    npt.assert_array_equal(hrf_totest,stimuli.hrf(np.arange(0,24,1.0/100)))
Esempio n. 7
0
def hrf(times):
  """Produce hemodynamic response function for 'times'

  Parameters
  ----------
  times : times in seconds for the events 

  Returns
  -------

  nparray of length == len(times) with hemodynamic repsonse values for each time course

  Example
  -------
  >>> tr_times = np.arange(240) * 2
  >>> hrf(tr_times)[:5]
  array([ 0.        ,  0.13913511,  0.6       ,  0.58888885,  0.25576589])
  """
  # Gamma pdf for the peak
  peak_values = gamma.pdf(times, 6)
  # Gamma pdf for the undershoot
  undershoot_values = gamma.pdf(times, 12)
  # Combine them
  values = peak_values - 0.35 * undershoot_values
  # Scale max to 0.6
  return values / np.max(values) * 0.6
Esempio n. 8
0
def hrf(times):
    """
    Computes values for the canonical hemodynamic response function at the
    specified times 
    
    Parameters
    ----------
    times : np.ndarray
        1-D array of time points

    Return:
    ------
    hrf : np.ndarray
        Array of shape (len(times),) that represents the hemodynamic response
        function for the specified time points
    """
    # Gamma pdf for the peak
    peak_values = gamma.pdf(times, 6)
    # Gamma pdf for the undershoot
    undershoot_values = gamma.pdf(times, 12)
    # Combine them
    values = peak_values - 0.35 * undershoot_values
    # Scale max to 0.6
    return values / np.max(values) * 0.6
    
def hrf(times):
    """ 
    Return values for HRF at given times 
    Used to get the convolved parameters
    """
    peak_values=gamma.pdf(times,6)
    undershoot_values=gamma.pdf(times,12)
    values=peak_values-0.35*undershoot_values
    return values/np.max(values)*0.6
Esempio n. 10
0
def hrf(times):
    # Gamma pdf for the peak
    peak_values = gamma.pdf(times, 6)
    # Gamma pdf for the undershoot
    undershoot_values = gamma.pdf(times, 12)
    # Combine them
    values = peak_values - 0.35 * undershoot_values
    # Scale max to 0.06
    return values / np.max(values) * 0.06
Esempio n. 11
0
def hrf(times):
    """ Return values for HRF at given times """
    # Gamma pdf for the peak
    peak_values = gamma.pdf(times, 6)
    # Gamma pdf for the undershoot
    undershoot_values = gamma.pdf(times, 12)
    # Combine them
    values = peak_values - 0.35 * undershoot_values
    # Scale max to 0.6
    return values / np.max(values) * 0.6
Esempio n. 12
0
File: hrf.py Progetto: INCF/pybids
def _gamma_difference_hrf(tr, oversampling=50, time_length=32., onset=0.,
                          delay=6, undershoot=16., dispersion=1.,
                          u_dispersion=1., ratio=0.167):
    """ Compute an hrf as the difference of two gamma functions

    Parameters
    ----------

    tr : float
        scan repeat time, in seconds

    oversampling : int, optional (default=16)
        temporal oversampling factor

    time_length : float, optional (default=32)
        hrf kernel length, in seconds

    onset: float
        onset time of the hrf

    delay: float, optional
        delay parameter of the hrf (in s.)

    undershoot: float, optional
        undershoot parameter of the hrf (in s.)

    dispersion : float, optional
        dispersion parameter for the first gamma function

    u_dispersion : float, optional
        dispersion parameter for the second gamma function

    ratio : float, optional
        ratio of the two gamma components

    Returns
    -------
    hrf : array of shape(length / tr * oversampling, dtype=float)
         hrf sampling on the oversampled time grid
    """
    from scipy.stats import gamma
    dt = tr / oversampling
    time_stamps = np.linspace(0, time_length, np.rint(float(time_length) / dt).astype(np.int))
    time_stamps -= onset
    hrf = gamma.pdf(time_stamps, delay / dispersion, dt / dispersion) -\
        ratio * gamma.pdf(
        time_stamps, undershoot / u_dispersion, dt / u_dispersion)
    hrf /= hrf.sum()
    return hrf
def test_hrf():
    # create test array of times
    hrf_times = np.arange(0,20,0.2)
    # Gamma pdf for the peak
    peak_values = gamma.pdf(hrf_times, 6)
    # Gamma pdf for the undershoot
    undershoot_values = gamma.pdf(hrf_times, 12)
    # Combine them
    test_values = peak_values - 0.35 * undershoot_values
    # Scale max to 0.6
    test_values = test_values/np.max(test_values)*0.6
    # my values from function
    my_values = hrf(hrf_times)
    # assert
    assert_almost_equal(test_values, my_values)
Esempio n. 14
0
def glover_hrf(timepoints):
	""" Canonical (Glover) HRF.
		
		Based on the nipy implementation (github.com/nipy/nipy/blob/master/nipy/
		modalities/fmri/hrf.py). Peaks at 5.4 with 5.2 FWHM, undershoot peak at
		10.8 with FWHM of 7.35 and amplitude of -0.35. 
		
		Args:
		timepoints: Array of floats. Time points (in seconds, onset is 0).
	"""	
	# Compute HRF as a difference of gammas and normalize
	hrf = gamma.pdf(timepoints, 7, scale=0.9) - 0.35 * gamma.pdf(timepoints, 13,
																 scale=0.9)
	hrf /= hrf.sum()
	return hrf
 def get_probability(self, shared_length, query_node, labeled_node):
     """
     Returns the probability that query_node and labeled_node have total
     shared segment length shared_length
     """
     shape, scale =  self._distributions[query_node, labeled_node]
     return gamma.pdf(shared_length, a = shape, scale = scale)
Esempio n. 16
0
    def fit_gamma_distribution(self, desorption_thresh=5, plot=False, bins=15, normed=True):

        #  first get the detachment time
        dt = self.get_desorption_distribution(thresh=desorption_thresh)

        # you need to invert the data - to be able to fit gamma
        # dt = dt.max() - dt

        fit_alpha, fit_loc, fit_beta = gamma.fit(dt)

        x = np.linspace(0, dt.max(), 100)

        pdf_fitted = gamma.pdf(x, fit_alpha, fit_loc, fit_beta)

        # normalize
        # pdf_fitted = pdf_fitted / pdf_fitted.max()

        # this is the maximum of the distribution
        mode = x[pdf_fitted.argmax()]

        if plot:
            x = np.linspace(0, dt.max(), 100)

            plt.hist(dt, bins=bins, normed=normed)
            plt.plot(x, pdf_fitted)
            plt.show()

        return fit_alpha, fit_loc, fit_beta
Esempio n. 17
0
def result_N_sub(path):
    fig, ax = plt.subplots()
    for result_data_path in path:
        data = np.load(result_data_path)
        beta = data['beta']
        num_of_strings = data['num_of_strings']
        L = data['L']
        frames = data['frames']
        Ls = data['Ls'].astype(np.float)
        N_sub = data['N_sub']

        # M = N_sub / 3 * Ls * (Ls + 1) + 1
        M = N_sub
        M_ave = M / np.sum(M)
        popt = curve_fit(gamma.pdf, xdata=Ls, ydata=M_ave, p0=[2.5, -5., 30])[0]
        print beta, popt
        ax.plot(Ls, M_ave, '.-', label=r'$\beta = %2.2f$' % beta)
        x = np.linspace(1., max(Ls), num=5*max(Ls))
        ax.plot(x, gamma.pdf(x, a=popt[0], loc=popt[1], scale=popt[2]),
                '-', label=r'fitted $\beta = %2.2f$' % beta)
    ax.legend(loc='best')
    ax.set_ylim((0., 0.1))
    ax.set_title('Strings in hexagonal region' +
                ' (sample: {})'.format(num_of_strings))
    ax.set_xlabel(r'Cutting size $L$')
    ax.set_ylabel('Average number of the sub-clusters in the hexagonal region of size L')

    plt.show()
 def lowerbound(self, lnjoint):
     #probability of these targets is 1 as they are training labels
     lnpCT = self.post_lnjoint_ct(lnjoint)                    
     lnpPi = np.sum(norm.pdf(self.mu, loc=self.m0, scale=1 / (self.lamb0 * self.prec)) \
                                                 *  gamma.pdf(self.prec, shape=self.gam_alpha0, scale=1 / self.gam_beta0))
     lnpKappa = self.post_lnkappa()
     EEnergy = lnpCT + lnpPi + lnpKappa
     
     lnqT = self.q_ln_t()
     lnqPi = np.sum(norm.pdf(self.mu, loc=self.m, scale=1 / (self.lamb * self.prec)) \
                                                 *  gamma.pdf(self.prec, shape=self.gam_alpha, scale=1 / self.gam_beta))
     lnqKappa = self.q_lnkappa()
     H = - lnqT - lnqPi - lnqKappa
     L = EEnergy + H
     #logging.debug('EEnergy ' + str(EEnergy) + ', H ' + str(H))
     return L
Esempio n. 19
0
def _fit_gamma(sampleses, filename):
    """Fits a gamma distribution to the first 16 samples and plots the results

    Assuming that filename ends with ".pdf"
    """
    for i, samples in enumerate(sampleses[:16]):
        sample_mean = np.mean(samples)
        sample_var = np.var(samples)
        sample_median = np.median(samples)
        shape, loc, scale = gamma.fit(samples)
        stat, pval = kstest(
            samples,
            'gamma',
            args=(shape, loc, scale))
        fig, axis = plt.subplots(1, 1)
        axis.hist(samples, normed=True)
        if i == 15:
            fig.savefig('last.pdf')
        plotx = np.linspace(np.min(samples), np.max(samples))
        axis.plot(
            plotx,
            gamma.pdf(plotx, shape, loc=loc, scale=scale),
            linewidth=3)
        axis.set_title(
            'shape='+str(shape)+'; loc='+str(loc) +
            '; scale='+str(scale)+'\n' +
            'stat='+str(stat)+'; pval='+str(pval)+'\n' +
            'mean='+str(shape*scale)+'; var='+str(shape*scale*scale)+'\n' +
            's_mean='+str(sample_mean)+'; s_var='+str(sample_var)+'\n' +
            's_median='+str(sample_median))
        fig.savefig(
            filename[:-4]+'_fit_'+_pad_num(i+1)+'.pdf',
            bbox_inches='tight')
        plt.close()
 def params_of(strings):
     strings_logprobs = np.empty(len(strings))
     for i, string in enumerate(strings):
         strings_logprobs[i] = sum(old_logprobs[state, symbol] for state, symbol in of(string))
     strings_params = gamma.fit(strings_logprobs[strings_logprobs != np.inf])
     _, bins, _ = plt.hist(strings_logprobs[strings_logprobs != np.inf], 500, histtype = 'step', normed = True)
     plt.plot(bins, gamma.pdf(bins, *strings_params))
     return strings_params
Esempio n. 21
0
def plot_gamma(ax, shape, scale):
    """plot a gamma distribution on the supplied axis"""
    tmin = max(0, ax.get_xlim()[0])
    tmax = ax.get_xlim()[1]
    t = np.linspace(tmin, tmax)
    ax.plot(t, gamma.pdf(t, a=shape, scale=scale), 'r',
        label="gamma pdf, $shape=%.1f, scale=%f$"%(shape, scale))
    xlims = ax.set_xlim(tmin, tmax)
    return ax
Esempio n. 22
0
def _recall(m, l, gamma_params, max_x):
    k = len(gamma_params)
    s = 0.0
    for i in range(1, k):
        shape, loc, scale = gamma_params[i]
        join_prob_func = lambda x : _hash_probability(m, l, np.sqrt(x)) * gamma.pdf(x, shape, loc, scale)
        prob, _ = quad(join_prob_func, 0.0, max_x) 
        s += prob
    return s / float(k - 1)
 def gamma(sample, alpha, beta):
     """
     https://en.wikipedia.org/wiki/Gamma_distribution#Parameterizations
     https://stackoverflow.com/a/16964743/3052112
     """
     x = sample
     k = alpha
     theta = 1.0 / beta
     return gamma.pdf(x, a=k, scale=theta)
Esempio n. 24
0
def calc_weight(weight, period, similarity):
    """
    :descption
    Before answer we add this five content.
    1. more is different.
    2. rare is beautiful.
    3. new is good.
    4. refractory period
    5. key word
    """
    return gamma.pdf(period, GAMMA) + similarity + more_or_rare(weight)
Esempio n. 25
0
def hrf(times):
	""" Return values for canonical HRF at given times 
	
	Parameter:
	---------
	times: array
		an array of times points

	Return:
	------
	an array (len(times),)
		hemodynamic response
	"""
	# Gamma pdf for the peak
	peak_values = gamma.pdf(times, 6)
	# Gamma pdf for the undershoot
	undershoot_values = gamma.pdf(times, 12)
	# Combine them
	values = peak_values - 0.35 * undershoot_values
	# Scale max to 0.6
	return values / np.max(values) * 0.6
Esempio n. 26
0
def simulate_gamma(psth, trials, duration, num_trials=20):

    #rescale the ISIs
    dt = 0.001
    rs_isis = []
    for trial in trials:
        if len(trial) < 1:
            continue
        csum = np.cumsum(psth)*dt
        for k,ti in enumerate(trial[1:]):
            tj = trial[k]
            if ti > duration or tj > duration or ti < 0.0 or tj < 0.0:
                continue
            ti_index = int((ti / duration) * len(psth))
            tj_index = int((tj / duration) * len(psth))
            #print 'k=%d, ti=%0.6f, tj=%0.6f, duration=%0.3f' % (k, ti, tj, duration)
            #print '  ti_index=%d, tj_index=%d, len(psth)=%d, len(csum)=%d' % (ti_index, tj_index, len(psth), len(csum))
            #get rescaled time as difference in cumulative intensity
            ui = csum[ti_index] - csum[tj_index]
            if ui < 0.0:
                print 'ui < 0! ui=%0.6f, csum[ti]=%0.6f, csum[tj]=%0.6f' % (ui, csum[ti_index], csum[tj_index])
            else:
                rs_isis.append(ui)
    rs_isis = np.array(rs_isis)
    rs_isi_x = np.arange(rs_isis.min(), rs_isis.max(), 1e-5)

    #fit a gamma distribution to the rescaled ISIs
    gamma_alpha,gamma_loc,gamma_beta = gamma.fit(rs_isis)
    gamma_pdf = gamma.pdf(rs_isi_x, gamma_alpha, loc=gamma_loc, scale=gamma_beta)
    print 'Rescaled ISI Gamma Fit Params: alpha=%0.3f, beta=%0.3f, loc=%0.3f' % (gamma_alpha, gamma_beta, gamma_loc)

    #simulate new trials using rescaled ISIs
    new_trials = []
    for nt in range(num_trials):
        ntrial = []
        next_rs_time = gamma.rvs(gamma_alpha, loc=gamma_loc,scale=gamma_beta)
        csum = 0.0
        for t_index,pval in enumerate(psth):
            csum += pval*dt
            if csum >= next_rs_time:
                #spike!
                t = t_index*dt
                ntrial.append(t)
                #reset integral and generate new rescaled ISI
                csum = 0.0
                next_rs_time = gamma.rvs(gamma_alpha, loc=gamma_loc,scale=gamma_beta)
        new_trials.append(ntrial)
    #plt.figure()
    #plt.hist(rs_isis, bins=20, normed=True)
    #plt.plot(rs_isi_x, gamma_pdf, 'r-')
    #plt.title('Rescaled ISIs')

    return new_trials
Esempio n. 27
0
def posterior(m , t):
    
    xbar , s = np.mean(data) , np.var(data)
    hyper_t = 1./hyper_sigma**2.
   
    sigma_pos = (n*t + hyper_t)**-.5
    mean_pos = (n*xbar*t + hyper_mu*hyper_t)/(n*t + hyper_t)

    a_pos = n/2. + hyper_a
    scale_pos  = 1./(1 + n*s/2.)
    #print mean_pos
    #print sigma_pos
    return gamma.pdf(t , a_pos , loc=0 , scale=scale_pos) * norm.pdf(m , loc = mean_pos , scale=sigma_pos)
 def _pdf(self, value: float):
     """
     Defines the gamma distribution
     :param value: x-value
     :return: Function value at point x
     """
     if self._research_mode:
         return gamma.pdf(value, a=self._alpha, scale=self._beta)
     else:
         if value > 0:
             return (math.pow(self.beta, -self.alpha) * math.pow(value, self.alpha-1) * math.exp(-value/self.beta)) / \
                    math.gamma(self.alpha)
         else:
             return 0
Esempio n. 29
0
def main():


    model=pymc.MCMC(gamma_model)
    model.sample(iter=1000, burn=500, thin=2)

    alpha = mean(model.trace('alpha')[:])
    beta = mean(model.trace('beta')[:])
    print('alpha: %s' % alpha)
    print('beta: %s' % beta )

    x = np.linspace(0, 100, 0.001)
    y = gamma.pdf(x, alpha, scale= 1.0 / beta)

    plt.plot(x,y)
    plt.xlim([0,100])
    plt.ylim([0,0.001])
    plt.show()
Esempio n. 30
0
def fit_a_x0_scale(path):
    betas = []
    a = []
    loc = []
    scale = []

    fig, ax = plt.subplots()
    for i, result_data_path in enumerate(path):
        globals().update(load_data(result_data_path))

        ax.plot(Ls, M_ave, '.', label=r'$\beta = %2.2f$' % beta,
                color=cm.viridis(float(i) / len(path)))

        popt = curve_fit(gamma.pdf, xdata=Ls, ydata=M_ave, p0=[2.5, -5., 10.])[0]
        print beta, popt
        betas.append(beta)

        a.append(popt[0])
        loc.append(popt[1])
        scale.append(popt[2])

        x = np.linspace(0, max(Ls), num=5*max(Ls))
        ax.plot(x, gamma.pdf(x, a=popt[0], loc=popt[1], scale=popt[2]),
                    '-', label=r'fitted $\beta = %2.2f$' % beta,
                    color=cm.viridis(float(i) / len(path)))
    show_plot1(ax, num_of_strings)
    plt.show()

    betas = np.array(betas)
    a = np.array(a)
    loc = np.array(loc)
    scale = np.array(scale)

    fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
    ax1.plot(betas, a, 'o')
    [ax.set_xlabel(r'$\beta$') for ax in [ax1, ax2, ax3]]
    [ax.set_xlim((0, max(betas))) for ax in [ax1, ax2, ax3]]
    ax1.set_ylabel(r'Shape parameter: $a$')
    ax2.plot(betas, loc, 'o')
    ax2.set_ylabel(r'Translation parameter: $x_{0}$')
    # ax3.plot(-betas, -scale)  # お試し
    ax3.plot(betas, scale, 'o')
    ax3.set_ylabel(r'Scale parameter: $\theta$')
    plt.show()
Esempio n. 31
0
def compute_gamma_prob(tau, a, b):
    dist = gamma.pdf(tau, a, loc=0, scale=(1/b))
    return dist
Esempio n. 32
0
def g(x):
    return gamma.pdf(x, C) * abs(np.cos(R * x))
Esempio n. 33
0
def prob_gamma(x, shape, scale):
    p = gamma.pdf(x,shape, loc=0, scale=scale)
    return p
Esempio n. 34
0
def dgamma(x, a):
    """ Probability of a gamma continuous random variable. """
    g2 = gamma.pdf(x, a)
    return g2
Esempio n. 35
0
            areas = np.array([float(x) for x in line.rsplit(",")[0:-1]])
        i+=1
    nonzero_areas = areas[areas.nonzero()]
    curved_core_cell_areas_sep[name]=nonzero_areas/nonzero_areas.mean()

pop_areas = list()
for n in names:
    pop_areas.extend(curved_core_cell_areas_sep[n])
pop_areas=remove_zeros(pop_areas)


# =============================================================================
# FITS
# =============================================================================
x = np.linspace(0,xmax,1000)
fit_pop = gamma.pdf(x,*pdr_params)

if floc:
    neo_params = gamma.fit(curved_neovascularized_areas,floc=0)
fit_neo = gamma.pdf(x,*neo_params)
# =============================================================================
# PLOT
# =============================================================================
fig = plt.figure()
ax = plt.subplot(111)
#plot_linear_distribution(pop_areas,"DR population", density=True,n="auto",fig=fig,ax=ax)

plt.plot(x,fit_pop,label="PDR whole image",c="red")
plt.plot(x,fit_neo,label="Neovascularized region only",c="#411900")

plot_linear_distribution(pop_areas,"PDR", density=True,n="auto",fig=fig,ax=ax,formatting=formatting)
Esempio n. 36
0
}

# Proposed values are a Gaussian peturbation away from the previous values.
# This is controlled by the sigma of the gaussian, which is defined for each variable
proposal_sigma = {
    'missing': 0.025,
    'shape': 0.05,
    'scale': 2,
    'mixture': 0.025,
}

# PRIORS
priors = (lambda x: {
    'missing': beta.pdf(x['missing'], a=3, b=15),
    'mixture': beta.pdf(x['mixture'], a=1.1, b=1.1),
    'shape': gma.pdf(x['shape'], a=10, scale=1 / 5),
    'scale': gma.pdf(x['scale'], a=6, scale=50)
})


def test_mcmc():
    folder = os.path.dirname(os.path.abspath(__file__))
    file = "/mcmc_test_chain"

    chain = mcmc.run_MCMC(data=am_data,
                          initial_parameters=initial_parameters,
                          proposal_sigma=proposal_sigma,
                          priors=priors,
                          thin=1,
                          nreps=3,
                          output_dir=folder,
Esempio n. 37
0
 def gps_function(treatment_val):
     return gamma.pdf(treatment_val, a=shape, loc=0, scale=scale)
def main():
    assert exists(SDCIT_RESULT_DIR + '/kcipt_chaotic_5000.csv'), 'run_SDCIT first'
    assert exists(SDCIT_RESULT_DIR + '/kcipt_chaotic_20000.csv'), 'run_SDCIT first'

    from experiments.draw_figures import color_palettes, method_color_codes

    obj_filename = SDCIT_RESULT_DIR + '/right_power.pickle'
    experiment(obj_filename)

    time.sleep(3)

    with open(obj_filename, 'rb') as f:  # Python 3: open(..., 'rb')
        sdcit_mmd, sdcit_null, mmds100, outer_null100, desired_B, mmds_B, outer_null_B, distr_boot = pickle.load(f)

    print(desired_B)
    print('SKEW SDCIT NULL: {}'.format(scipy.stats.skew(sdcit_null)))
    print('SKEW KCIPT NULL: {}'.format(scipy.stats.skew(outer_null_B)))

    names_kcipt_chaotic = ['independent', 'gamma', 'trial', 'N', 'statistic', 'pvalue', 'B']
    names_sdcit_chaotic = ['independent', 'gamma', 'trial', 'N', 'statistic', 'pvalue']

    df_kcipt_desired_B = pd.read_csv(SDCIT_RESULT_DIR + '/kcipt_chaotic_{}.csv'.format(desired_B), names=names_kcipt_chaotic, )
    df_kcipt_5000 = pd.read_csv(SDCIT_RESULT_DIR + '/kcipt_chaotic_5000.csv', names=names_kcipt_chaotic, )
    df_kcipt_20000 = pd.read_csv(SDCIT_RESULT_DIR + '/kcipt_chaotic_20000.csv', names=names_kcipt_chaotic, )
    df_sdcit = pd.read_csv(SDCIT_RESULT_DIR + '/sdcit_chaotic.csv', names=names_sdcit_chaotic, )
    df_sdcit = df_sdcit[df_sdcit['N'] == 400]
    df_sdcit = df_sdcit[df_sdcit['independent'] == 1]
    df_sdcit = df_sdcit[df_sdcit['gamma'] == 0.0]
    assert len(df_sdcit) == 300
    xs_sdcit = np.linspace(1.3 * sdcit_null.min(), 1.3 * sdcit_null.max(), 1000)
    ys_sdcit_pearson3 = pearson3.pdf(xs_sdcit, *pearson3.fit(sdcit_null))

    xs_kcipt = np.linspace(1.3 * outer_null_B.min(), 1.3 * outer_null_B.max(), 1000)
    ys_kcipt_pearson3 = pearson3.pdf(xs_kcipt, *pearson3.fit(outer_null_B))

    # 20000's null is inferred from known one...
    factor_20000 = np.sqrt(20000 / desired_B)
    ys_kcipt_20000_gamma = gamma.pdf(xs_kcipt, *gamma.fit(outer_null_B / factor_20000))

    sns.set(style='white', font_scale=1.2)
    paper_rc = {'lines.linewidth': 0.8, 'lines.markersize': 2, 'patch.linewidth': 1}
    sns.set_context("paper", rc=paper_rc)
    plt.rc('text', usetex=True)
    plt.rc('text.latex', preamble=r'\usepackage{cmbright}')

    if True:
        fig = plt.figure(figsize=[5, 3.5])
        ##################################
        fig.add_subplot(2, 2, 1, adjustable='box')

        plt.plot(xs_sdcit, ys_sdcit_pearson3, label='SDCIT null', lw=1.5, color=color_palettes[method_color_codes['SDCIT']])
        plt.plot([sdcit_mmd, sdcit_mmd], [0, 1000], label='SDCIT TS', color=color_palettes[method_color_codes['SDCIT']])
        plt.plot(xs_kcipt, ys_kcipt_pearson3, label='KCIPT null', lw=1.5, color=color_palettes[method_color_codes['KCIPT']])
        sns.distplot(distr_boot, hist=True, kde=False, hist_kws={'histtype': 'stepfilled'}, norm_hist=True, label='KCIPT TS', color=color_palettes[method_color_codes['KCIPT']])
        plt.gca().set_xlim([-0.0003, 0.0005])
        plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
        plt.gca().set_ylabel('density')
        plt.setp(plt.gca(), 'yticklabels', [])
        plt.legend(loc=1)
        ##################################
        fig.add_subplot(2, 2, 2, adjustable='box')

        pvals_B = [p_value_of(t, outer_null_B) for t in distr_boot]
        pval_sdcit = p_value_of(sdcit_mmd, sdcit_null)

        sns.distplot(pvals_B, bins=20, hist=True, kde=False, hist_kws={'histtype': 'stepfilled'}, norm_hist=True, color=color_palettes[method_color_codes['KCIPT']], label='KCIPT p-values')
        plt.plot([pval_sdcit, pval_sdcit], [0, 1], label='SDCIT p-value', color=color_palettes[method_color_codes['SDCIT']])
        plt.gca().set_ylim([0, 2.2])
        plt.gcf().subplots_adjust(wspace=0.3)
        plt.legend(loc=2)
        sns.despine()

        ##################################
        fig.add_subplot(2, 2, 3, adjustable='box')
        sns.distplot(df_sdcit['statistic'], hist=True, bins=20, kde=False, color=color_palettes[method_color_codes['SDCIT']], label='SDCIT TS')
        sns.distplot(df_kcipt_desired_B['statistic'], hist=True, bins=20, kde=False, color=color_palettes[method_color_codes['KCIPT']], label='KCIPT TS')
        plt.legend()
        plt.gca().set_xlim([-0.0003, 0.0005])
        plt.gca().set_xlabel('MMD')
        plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
        plt.gca().set_ylabel('density')
        plt.setp(plt.gca(), 'yticklabels', [])

        ##################################
        fig.add_subplot(2, 2, 4, adjustable='box')

        sns.distplot(df_sdcit['pvalue'], hist=True, bins=20, kde=False, color=color_palettes[method_color_codes['SDCIT']], norm_hist=True, label='SDCIT p-values')
        sns.distplot(df_kcipt_desired_B['pvalue'], hist=True, bins=20, kde=False, color=color_palettes[method_color_codes['KCIPT']], norm_hist=True, label='KCIPT p-values')
        plt.gca().set_xlabel('p-value')
        plt.gcf().subplots_adjust(wspace=0.3, hspace=0.3)
        plt.gca().set_ylim([0, 2.2])
        plt.legend(loc=0)
        sns.despine()
        plt.savefig(SDCIT_FIGURE_DIR + '/kcipt_{}_ps.pdf'.format(desired_B), transparent=True, bbox_inches='tight', pad_inches=0.02)
        plt.close()

    ###############################################
    ###############################################
    ###############################################
    ###############################################
    ###############################################
    ###############################################
    if True:
        sns.set(style='white', font_scale=1.2)
        paper_rc = {'lines.linewidth': 0.8, 'lines.markersize': 2, 'patch.linewidth': 1}
        sns.set_context("paper", rc=paper_rc)
        plt.rc('text', usetex=True)
        plt.rc('text.latex', preamble=r'\usepackage{cmbright}')
        fig = plt.figure(figsize=[5, 1.6])
        ##################################
        fig.add_subplot(1, 2, 1, adjustable='box')
        sns.distplot(df_kcipt_5000['statistic'], hist=True, bins=20, kde=False, color=color_palettes[method_color_codes['KCIPT']], label='TS')
        plt.legend()
        plt.gca().set_xlabel('MMD')
        plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
        plt.gca().set_ylabel('density')
        plt.gca().set_xlim([-0.0002, 0.0003])
        plt.setp(plt.gca(), 'yticklabels', [])
        ##
        fig.add_subplot(1, 2, 2, adjustable='box')
        sns.distplot(df_kcipt_5000['pvalue'], hist=True, bins=20, kde=False, color=color_palettes[method_color_codes['KCIPT']], norm_hist=True, label='p-value')
        plt.gca().set_xlabel('p-value')
        plt.gcf().subplots_adjust(wspace=0.3, hspace=0.3)
        plt.legend(loc=0)
        sns.despine()
        plt.savefig(SDCIT_FIGURE_DIR + '/kcipt_5000_ps.pdf', transparent=True, bbox_inches='tight', pad_inches=0.02)
        plt.close()

    if True:
        sns.set(style='white', font_scale=1.2)
        paper_rc = {'lines.linewidth': 0.8, 'lines.markersize': 2, 'patch.linewidth': 1}
        sns.set_context("paper", rc=paper_rc)
        plt.rc('text', usetex=True)
        plt.rc('text.latex', preamble=r'\usepackage{cmbright}')
        fig = plt.figure(figsize=[5, 1.6])

        # left subplot
        fig.add_subplot(1, 2, 1, adjustable='box')
        plt.plot(xs_sdcit, ys_sdcit_pearson3, label='SDCIT null', lw=1.5, color=color_palettes[method_color_codes['SDCIT']])
        plt.plot(xs_kcipt, ys_kcipt_20000_gamma, label='KCIPT null', lw=1.5, color=color_palettes[method_color_codes['KCIPT']])
        sns.distplot(df_kcipt_20000['statistic'], hist=True, bins=20, kde=False, norm_hist=True, color=color_palettes[method_color_codes['KCIPT']], label='KCIPT TS')
        plt.legend(loc=1)
        plt.gca().set_xlabel('MMD')
        plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
        plt.gca().set_ylabel('density')
        plt.gca().set_xlim([-0.0002, 0.0003])
        plt.setp(plt.gca(), 'yticklabels', [])

        # right subplot
        fig.add_subplot(1, 2, 2, adjustable='box')
        sns.distplot(df_kcipt_20000['pvalue'], hist=True, bins=20, kde=False, color=color_palettes[method_color_codes['KCIPT']], norm_hist=True, label='KCIPT p')
        sns.distplot([p_value_of(ss, sdcit_null) for ss in df_kcipt_20000['statistic']], hist=True, bins=20, kde=False, color='k', norm_hist=True, label='KCIPT p on SDCIT null')
        plt.gca().set_xlabel('p-value')
        plt.gcf().subplots_adjust(wspace=0.3, hspace=0.3)
        plt.legend(loc=0)

        sns.despine()
        plt.savefig(SDCIT_FIGURE_DIR + '/kcipt_20000_ps.pdf', transparent=True, bbox_inches='tight', pad_inches=0.02)
        plt.close()
Esempio n. 39
0
def iet_plot(obj1,
             Norm=None,
             model=None,
             t_lims=None,
             lon_lims=None,
             lat_lims=None,
             z_lims=None,
             Mc=None):
    """
    Plot a histogram of interevent times and a pdf of normalized IETs
    
    Args:
        obj1: a varpy object containing event catalogue data
        Norm: if True, normalize the IET histogram
        model: option to fit and bootstrap CoIs for model. Existing options: Poisson, Gamma
        t_lims: [t_min, t_max] defining time axis limits
        lon_lims: [lon_min, lon_max] defining x-axis limits        
        lat_lims: [lat_min, lat_max] defining y-axis limits
        z_lims: [z_min, z_max] defining depth range
        Mc: magnitude cut-off

    Returns:
        fig1: a png image of the resulting plot
    """

    if obj1.type == 'volcanic':
        data = obj1.ecvd.dataset
        header = obj1.ecvd.header
    else:
        data = obj1.ecld.dataset
        header = obj1.ecld.header

    if t_lims is not None:
        try:
            t_min = conversion.date2int(t_lims[0])
            t_max = conversion.date2int(t_lims[1])
        except:
            t_min = float(t_lims[0])
            t_max = float(t_lims[1])
            pass
        data = data[logical_and(data[:, header.index('datetime')] >= t_min,
                                data[:, header.index('datetime')] < t_max), :]

    if lon_lims is not None:
        data = data[
            logical_and(data[:, header.index('longitude')] >= lon_lims[0],
                        data[:, header.index('longitude')] < lon_lims[1]), :]

    if lat_lims is not None:
        data = data[
            logical_and(data[:, header.index('latitude')] >= lat_lims[0],
                        data[:, header.index('latitude')] < lat_lims[1]), :]

    if z_lims is not None:
        data = data[logical_and(data[:, header.index('depth')] >= z_lims[0],
                                data[:, header.index('depth')] < z_lims[1]), :]

    if Mc is not None:
        data = data[data[:, header.index('magnitude')] >= Mc, :]

    dt_data = data[:, header.index('datetime')]

    iets = diff(dt_data, n=1)
    iet_mean = mean(iets)

    iet_bins = logspace(-5.0, 2.0, num=50)
    mid_iet_bins = iet_bins[:-1] + diff(iet_bins) / 2

    iet_counts, iet_bes = histogram(iets, iet_bins)

    ##########
    fig1 = plt.figure(1, figsize=(12, 6))
    ax1 = fig1.add_subplot(121, axisbg='lightgrey')

    ax1.semilogx(mid_iet_bins, iet_counts, '-s', color='blue')

    ax2 = fig1.add_subplot(122, axisbg='lightgrey')
    ax2.loglog(mid_iet_bins / iet_mean,
               (iet_mean * iet_counts) / (diff(iet_bins) * len(iets)),
               '-s',
               color='blue')

    if Norm is True:
        norm = iet_mean
    else:
        norm = 1.

    if model is not None:
        #Bootstrap 95% COIs
        iet_bstps = 1000
        rates_bstps = zeros((len(iet_bins) - 1, iet_bstps))

        if model is 'Gamma':
            #fit gamma model
            fit_alpha, fit_loc, fit_beta = gamma.fit(iets, loc=0.0)
            for j in range(iet_bstps):
                model_sim = gamma.rvs(fit_alpha,
                                      loc=fit_loc,
                                      scale=fit_beta,
                                      size=len(iets))
                rates_bstps[:, j], model_bes = histogram(model_sim, iet_bins)

            coi_95 = scoreatpercentile(rates_bstps.transpose(), 95, axis=0)
            coi_5 = scoreatpercentile(rates_bstps.transpose(), 5, axis=0)

            ax1.semilogx(
                mid_iet_bins / norm,
                gamma.pdf(mid_iet_bins, fit_alpha, fit_loc, fit_beta) *
                diff(iet_bins) * len(iets), 'r')
            ax1.semilogx(mid_iet_bins / norm, coi_95, 'r:')
            ax1.semilogx(mid_iet_bins / norm, coi_5, 'r:')

            ax2.loglog(mid_iet_bins / iet_mean,
                       (iet_mean *
                        gamma.pdf(mid_iet_bins, fit_alpha, fit_loc, fit_beta)),
                       'r')
            ax2.loglog(mid_iet_bins / iet_mean,
                       (iet_mean * coi_95) / (diff(iet_bins) * len(iets)),
                       'r:')
            ax2.loglog(mid_iet_bins / iet_mean,
                       (iet_mean * coi_5) / (diff(iet_bins) * len(iets)), 'r:')

        elif model is 'Poisson':
            #fit exponential model
            for j in range(iet_bstps):
                model_sim = expon.rvs(scale=iet_mean, size=len(iets))
                rates_bstps[:, j], model_bes = histogram(model_sim, iet_bins)

            coi_95 = scoreatpercentile(rates_bstps.transpose(), 95, axis=0)
            coi_5 = scoreatpercentile(rates_bstps.transpose(), 5, axis=0)

            ax1.semilogx(
                mid_iet_bins / norm,
                expon.pdf(mid_iet_bins, loc=0, scale=iet_mean) *
                diff(iet_bins) * len(iets), 'r')
            ax1.semilogx(mid_iet_bins / norm, coi_95, 'r:')
            ax1.semilogx(mid_iet_bins / norm, coi_5, 'r:')

            ax2.loglog(
                mid_iet_bins / iet_mean,
                (iet_mean * expon.pdf(mid_iet_bins, loc=0, scale=iet_mean)),
                'r')
            ax2.loglog(mid_iet_bins / iet_mean,
                       (iet_mean * coi_95) / (diff(iet_bins) * len(iets)),
                       'r:')
            ax2.loglog(mid_iet_bins / iet_mean,
                       (iet_mean * coi_5) / (diff(iet_bins) * len(iets)), 'r:')

    if Norm is True:
        ax1.set_xlabel(r'$\tau \backslash \bar\tau$ (days)')
    else:
        ax1.set_xlabel(r'$\tau$ (days)')

    ax1.set_ylabel('Frequency')
    ax1.xaxis.set_ticks_position('bottom')

    ax2.set_xlim(0.00008, 200)
    ax2.set_ylim(0.00001, 1000)
    ax2.set_xlabel(r'$\tau \backslash \bar\tau$ (days)')
    ax2.set_ylabel('pdf')

    ax2.xaxis.set_ticks_position('bottom')

    png_name = obj1.figure_path + '/iet_plots.png'
    eps_name = obj1.figure_path + '/iet_plots.eps'
    plt.savefig(png_name)
    plt.savefig(eps_name)
from scipy.stats import expon
from scipy.stats import gamma
from scipy.stats import logistic

import matplotlib.pyplot as plt
import numpy as np

fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3, 2)

# gamma
a = 1.99
mean, var, skew, kurt = gamma.stats(a, moments = 'mvsk')
x = np.linspace(gamma.ppf(0.01, a),
                 gamma.ppf(0.99, a), 100)
ax1.plot(x, gamma.pdf(x, a),
       'r-', lw=5, alpha=0.6, label='gamma pdf')
ax1.set_title('gamma pdf')
ax2.plot(x, gamma.cdf(x, a),
       'r-', lw=5, alpha=0.6, label='gamma cdf')
ax2.set_title('gamma cdf')

# logistic
b = 0.5
mean, var, skew, kurt = logistic.stats(b, moments = 'mvsk')
x = np.linspace(logistic.ppf(0.01, b),
                 logistic.ppf(0.99, b), 100)
ax3.plot(x, logistic.pdf(x, b),
       'g-', lw=5, alpha=0.6, label='gamma pdf')
ax3.set_title('logistic pdf')
ax4.plot(x, logistic.cdf(x, b),
       'g-', lw=5, alpha=0.6, label='gamma cdf')
m2list = []
m3list = []
m4list = []
m5list = []

for i in range(len(state["rews"])):
    m1list.append(m1(state))
    m2list.append(m2(state))
    m3list.append(m3(state))
    m4list.append(m4(state))
    m5list.append(m5(state))

    state["t"] += 1

x = np.linspace(0, 1, 100)
dist = gamma.pdf(1, 4, x)
plt.plot(x, dist)
plt.title("Gamma prior")
plt.xlabel('N0')
plt.ylabel('Probability Mass')
plt.show()

plt.figure()
# plt.plot(np.array(m1list) / sum(m1list))
# plt.title('Standardized Reward Integration for Sequence: [1,0,0,0,1,0,0,1,0,1,0,1,0]')
# plt.plot(np.array(m1list) / np.std(m1list),label = "Time")
# plt.plot(-np.array(m2list) / np.std(m2list),label = "Memoryless Integrator")
# plt.plot(np.array(m3list) / np.std(m3list),label = "Basic Integrator (a=3, b=1)")
# plt.plot(np.array(m4list) / np.std(m4list),label = "Bayesian Estimation of N0 (a0=1, b0=4)")
# plt.plot(np.array(m5list) / np.std(m5list),label = "Recency-Biased Integrator (a=1, b=2, x=3)")
plt.title(
Esempio n. 42
0
def NG(mu, precision, mu0, kappa0, alpha0, beta0):
    '''
    PDF of a Normal-Gamma distribution.
    '''
    return norm.pdf(mu, mu0, 1. / (precision * kappa0)) * gamma.pdf(
        precision, alpha0, scale=1. / beta0)
Esempio n. 43
0
from scipy.stats import gamma
import matplotlib.pyplot as plt
import numpy as np

fig, ax = plt.subplots(1, 1)

a = 0.01
mean, var, skew, kurt = gamma.stats(a, moments='mvsk')
x = np.linspace(gamma.ppf(0.01, a), gamma.ppf(0.99, a), 100)
ax.plot(x,
        gamma.pdf(x, a, loc=0.0, scale=2.0),
        'r-',
        lw=5,
        alpha=0.6,
        label='gamma pdf')
# ax.plot(x, gamma.pdf(x, a),
#        'r-', lw=5, alpha=0.6, label='gamma pdf')

rv = gamma(a, loc=0.0, scale=2.0)
# rv = gamma(a)
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')

vals = gamma.ppf([0.001, 0.5, 0.999], a)
np.allclose([0.001, 0.5, 0.999], gamma.cdf(vals, a))

r = gamma.rvs(a, size=1000, loc=0.0, scale=2.0)
# r = gamma.rvs(a, size=1000)

# ax.hist(r, density='True', histtype='stepfilled', alpha=0.2)
# ax.legend(loc='best', frameon=False)
plt.show()
Esempio n. 44
0
def analysis_1_generax_withgamma(lang_fr=True, dark=False):
    # First analyse output from GeneRax (1by1)
    with HtmlReport(str(workdir / 'familyrates.html'),
                    style=(css_dark_style if dark else None)) as hr:
        # Invalid values:
        df, nodup = filter_invalid_generax(load_generax_familyrates(), out=hr)

        fig, ((ax0top, ax1top), (ax0bottom, ax1bottom)) = plt.subplots(2, ncols=2)
        # Y axis with a broken scale : ---//---
        fig.subplots_adjust(hspace=0.1)
        axes_dup = brokenAxes(ax0bottom, ax0top)
        (heights, _, _), _ = axes_dup.hist(df.duprate, bins=100, density=True)
        axes_dup.dobreak(max(heights[1:]))

        logger.info('Plotted dup rates')

        axes_loss = brokenAxes(ax1bottom, ax1top)
        ((_,heights), _, _), _ = axes_loss.hist((df.lossrate[nodup], df.lossrate[~nodup]),
                                                #label=['No dup', 'dup > 0'],
                                                label=[r'$\delta=0$', r'$\delta>0$'],
                                                bins=100, density=True, stacked=True)
        ymax = heights.max()
        axes_loss.dobreak(max(heights[heights<ymax]))
        logger.info('Plotted loss rates')

        dup_gamma = gamma.fit(df.duprate.values)
        dup_gamma_nonzero = gamma.fit(df.duprate[~nodup].values)
        loss_gamma = gamma.fit(df.lossrate.values)
        loss_gamma_nodup = gamma.fit(df.lossrate[nodup].values)
        loss_gamma_dup = gamma.fit(df.lossrate[~nodup].values)

        xdup = np.linspace(*ax0bottom.get_xlim(), num=100)
        xloss = np.linspace(*ax1bottom.get_xlim(), num=100)
        axes_dup.plot(xdup, gamma.pdf(xdup, *dup_gamma),
                      label=r'$\Gamma(%g, %g, %g)$' % inverse_scale(dup_gamma))
        #ax0.annotate('Gamma(%g, %g, %g)' % dup_gamma, (1,1), (-2,-2),
        #             xycoords='axes fraction', textcoords='offset points',
        #             va='top', ha='right')
        axes_dup.plot(xdup, gamma.pdf(xdup, *dup_gamma_nonzero),
                      #label='dup>0 Gamma(%g, %g, %g)' % dup_gamma_nonzero)
                      label=r'$\delta>0$ $\Gamma(%g, %g, %g)$' % inverse_scale(dup_gamma_nonzero))
        ax0top.legend()
        ax0bottom.set_ylabel("% d'arbres de gènes" if lang_fr else '% of gene trees')
        ax0top.set_title(('Taux de duplication' if lang_fr else 'Duplication rates') + r' $\delta$')

        axes_loss.plot(xloss, gamma.pdf(xloss, *loss_gamma),
                       label=r'$\Gamma(%g, %g, %g)$' % loss_gamma)
        axes_loss.plot(xloss, gamma.pdf(xloss, *loss_gamma_nodup),
                       label=r'$\delta=0$ $\Gamma(%g, %g, %g)$' % inverse_scale(loss_gamma_nodup))
        axes_loss.plot(xloss, gamma.pdf(xloss, *loss_gamma_dup),
                       label=r'$\delta>0$ $\Gamma(%g, %g, %g)$' % inverse_scale(loss_gamma_dup))
        #ax1.annotate('Gamma(%g, %g, %g)' % loss_gamma, (1,1), (-2,-2),
        #             xycoords='axes fraction', textcoords='offset points',
        #             va='top', ha='right')
        ax1top.legend()
        ax1top.set_title(('Taux de perte' if lang_fr else 'Loss rates') + r' $\lambda$')
        logger.info('Fitted gamma distribs')

        fig.savefig(str(workdir / ('fit_duploss_gamma%s.pdf' % ('_darkbg' if dark else ''))),
                    bbox_inches='tight')
        hr.show()

        for dataset, data_params, data in zip(
            #('dup', 'dup>0', 'loss', 'nodup loss', 'dup>0 loss'),
            (r'$\delta$', r'$\delta>0$', r'$\lambda$', r'$\lambda_{\delta=0}$', r'$\lambda_{\delta>0}$'),
            (dup_gamma, dup_gamma_nonzero, loss_gamma, loss_gamma_nodup, loss_gamma_dup),
            (df.duprate, df.duprate[~nodup], df.lossrate, df.lossrate[nodup], df.lossrate[~nodup])):
            hr.mkd(r'\\[ - lL(%s\\ params) = %g \\]' % (
                      dataset,
                      gamma.nnlf(inverse_scale(data_params), data.values)))

        nonzerodup_df = df.loc[~nodup]
        suggested_transforms = test_transforms(df, ['duprate', 'lossrate'],
                                               widget=False, out=hr)
        suggested_transforms_nonzerodup = test_transforms(nonzerodup_df, ['duprate', 'lossrate'],
                                               widget=False, out=hr)

        fig, axes = plt.subplots(ncols=2, squeeze=False)
        scatter_density('duprate', 'lossrate', data=df, alpha=0.4, ax=axes[0,0])
        axes[0,0].set_xlabel(r'$\delta$')
        axes[0,0].set_ylabel(r'$\lambda$')
        axes[0,0].set_title('Données complètes' if lang_fr else 'Complete dataset')
        scatter_density('duprate', 'lossrate', data=nonzerodup_df, alpha=0.4, ax=axes[0,1])
        axes[0,1].set_xlabel(r'$\delta$')
        axes[0,1].set_ylabel(r'$\lambda$')
        axes[0,1].set_title(r'Données avec $\delta>0$' if lang_fr else r'Dataset with $\delta > 0$')
        hr.show()
        fig.savefig(str(workdir/'scatter_duploss.pdf'), bbox_inches='tight')
        logger.info('Plotted scatter of dup/loss (dup>0, untransformed)')

        hr.print('\n# Regress untransformed data (dup>0)')
        fit = sm.OLS(nonzerodup_df[['lossrate']],
                     sm.add_constant(nonzerodup_df[['duprate']])).fit()
        hr.output(fit.summary())

        hr.print('\n# Regress transformed data (dup>0):')
        for ft, transform in suggested_transforms_nonzerodup.items():
            hr.print('- %s : %s' %(ft, transform.__name__))

        transformed_df = nonzerodup_df.transform(suggested_transforms_nonzerodup)
        fig, ax = plt.subplots()
        scatter_density('duprate', 'lossrate', data=transformed_df, alpha=0.4, ax=ax)
        ax.set_ylabel(r'$\mathrm{%s}(\lambda)$' % suggested_transforms_nonzerodup['lossrate'].__name__)
        ax.set_xlabel(r'$\mathrm{%s}(\delta)$' % suggested_transforms_nonzerodup['duprate'].__name__)
        hr.show()
        fig.savefig(str(workdir/'scatter_transformed-duploss.pdf'), bbox_inches='tight')
        logger.info('Plotted scatter of dup/loss (dup>0, transformed:%s,%s)',
                    suggested_transforms_nonzerodup['duprate'].__name__,
                    suggested_transforms_nonzerodup['lossrate'].__name__
                    )

        fit = sm.OLS(transformed_df[['lossrate']],
                     sm.add_constant(transformed_df[['duprate']])).fit()
        hr.output(fit.summary())
Esempio n. 45
0
variance_noise = sigma_noise**2
X_awgn = np.random.normal(mu_noise, sigma_noise, N)     #noise

X = X_normal + X_awgn           #a vector of observations as an example/for plotting

##########################################
#the conjugate prior of the Gaussian with known mean is Gamma (hyperparameters estimate precision)
#define  parameters of initial prior
a = 10
b = .01

#plot initial prior and likelihood
fig1 = plt.figure()
ax12 = fig1.add_subplot(1, 1, 1)
x = np.linspace(precision - 1, precision + 1, N)
y2 = gamma.pdf(x, a=a, scale=1/b)       #plot initial prior, scale is theta = 1/beta
prior = ax12.plot(x, y2, 'g--', label='Original Prior')
handles1, labels1 = ax12.get_legend_handles_labels()
ax12.legend(handles1, labels1)
ax12.set_title('Conjugate Prior Update - Gaussian with known mean, unknown precision = ' + str(precision), fontweight='bold')

##########################################
#update equations for Gaussian of known variance, unknown mean
#initial hyperparameters
update_a = a
update_b = b
plot_list = [0, 1, 2, 3, 4, 9, 19, 49, N-1]

for j in range(0, N):       #N is the obseration in question, one index off
    n_update = j + 1    
    sum_xn_mu_squared = sum((X[0:n_update] - mu)**2)
Esempio n. 46
0
def tau_prior(tau, tau_rate):
    return (gamma.pdf(tau / 1000, a=1, scale=1 / tau_rate))
Esempio n. 47
0
beta = 4.67873  # Mathematica solution!

######## hospitalization times
hosp_time_dist = np.zeros(len(t))

#################### computation
for k in range(200):  ### sum over geometric distribution

    # determine deterministic hitting time of k infected individuals
    tdet = np.log((k + 1) * alpha * beta * p_surv) / alpha

    if (tdet <= 0): tdet = 0

    # add hospitalisation time distribution to tdet
    index = int(tdet / dt)
    hosp_time_dist[index:] += geom.pmf(k + 1, p_hosp) * gamma.pdf(
        t[0:(len(t) - index)], shape_hosp, scale=scale_hosp)

####### epidemic size distribution
dsize = 50
I = np.arange(dsize, 15001, dsize)
dens_I = np.zeros(len(I))

ind_old = 0
for i in range(len(I)):
    tdet_index = int(
        np.round(np.log(I[i] * alpha * beta * p_surv) / alpha / dt)) + 1

    dens_I[i] = np.sum(hosp_time_dist[ind_old:tdet_index] * dt) / dsize

    ind_old = tdet_index
Esempio n. 48
0
 def function(intensity, int_intensity_diff, shape):
     return shape*intensity * gamma.pdf(x=shape*int_intensity_diff, a=shape)
Esempio n. 49
0
# Based on https://github.com/probml/pmtk3/blob/master/demos/gammaPlotDemo.m

import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml

from scipy.stats import gamma

x = np.linspace(0, 7, 100)
aa = [1.0, 1.5, 2.0, 1.0, 1.5, 2.0]
bb = [1.0, 1.0, 1.0, 2.0, 2.0, 2.0]
#props = ['b-', 'r:', 'k-.', 'g--', 'c-', 'o-']
props = ['b-', 'r-', 'k-', 'b:', 'r:', 'k:']

for a, b, p in zip(aa, bb, props):
    y = gamma.pdf(x, a, scale=1 / b, loc=0)
    plt.plot(x, y, p, lw=3, label='a=%.1f,b=%.1f' % (a, b))
plt.title('Gamma distributions')
plt.legend(fontsize=14)
pml.savefig('gammadist.pdf')
plt.show()

x = np.linspace(0, 7, 100)
b = 1
plt.figure()
for a in [1, 1.5, 2]:
    y = gamma.pdf(x, a, scale=1 / b, loc=0)
    plt.plot(x, y)
plt.legend(['a=%.1f, b=1' % a for a in [1, 1.5, 2]])
plt.title('Gamma(a,b) distributions')
pml.savefig('gammaDistb1.pdf')
Esempio n. 50
0
xlabel = "time (h)"

yticks1 = [0,0.02, 0.04, 0.06]
yticks2 = [0,0.05,0.1]

yticks = [yticks1, yticks2]


alphas = [fit_gamma[0][0][0], fit_gamma[1][0][0]]
betas = [fit_gamma[0][0][1], fit_gamma[1][0][1]]

# load peine facs data to plot figure together


x_arr = [x1, x2]
gamma_arr = [gamma.pdf(x, alpha, 0, 1/beta) for x, alpha, beta in zip(x_arr, alphas, betas)]

fig, (ax1, ax2, ax3) = plt.subplots(1,3, figsize = (15,4))
ylabel = "density"
for df, ax, x, y, barwidth in zip(df_list, (ax1, ax2), x_arr, gamma_arr, width):
    ax.bar(df["time"], df.val_norm, width=barwidth, color="grey", edgecolor="k")
    ax.plot(x, y, color="k")
    ax.set_ylabel(ylabel)
    ax.set_xlabel(xlabel)
    ax.set_xlim([x[0], x[-1]])

ax1.set_xticks(np.arange(0,73,24))
ax1.set_yticks(yticks1)
ax2.set_yticks(yticks2)
#plt.show()
loadfile = "../figures/fig1/peine_numpy.npz"
Esempio n. 51
0
def A_p_prior(A_p, A_p_rate):
    return (gamma.pdf(A_p, a=1, scale=1 / A_p_rate))
Esempio n. 52
0
### effective re-production
tau     = 5#13    #tau equals number of fitted days
a       = 1
b       = 5

n       = timeSpan-tau + 1  #samples for fit
repVec  = np.zeros((n,))

for d in np.arange(1,n):   
    ii  = np.arange(d,d+tau)
    y   = newCases[ii]

    num         = a+ np.sum(y)
 
    s           = np.arange(0,d+tau)
    ws          = gamma.pdf(s,a=3,loc=0,scale=1)  # gamma parameters to play around
    ws          = ws/np.sum(ws)

    
    sum_di      = 0 
    for i in ii:
        w       = np.reshape(ws[:i:],(-1,1))
        w       = w[::-1]
        ys      = np.reshape(newCases[:i:],(1,-1))
        sum_di  += ys@w
        
        
        #unifrom dist.
        #sum_di  += np.mean(ys)
                        
    den         = 1/b +  sum_di
            #A = we(part[I])
            #A = we(median[I])
            C = we(cases[I])
        else:
            I = (state == State[i])
            Y = np.concatenate([Y, we(deaths[I])])
            A = np.concatenate([A, we(home[I])])  # using HOME
            #A = np.concatenate([A, we(work[I])])
            #A = np.concatenate([A, we(part[I])])
            #A = np.concatenate([A, we(median[I])])
            C = np.concatenate([C, we(cases[I])])
    #######################################################

    l = 18
    t = np.linspace(start=0, stop=l, num=l + 1)
    ft = gamma.pdf(t * 7, scale=3.64, a=6.28)  # a - shape parameter
    ft = (ft / sum(ft)) * 0.03
    x = range(1, l + 1)

    pdf = PdfPages("plots/fit_optim_forecast.pdf")
    _, axs = pyplot.subplots(3, 3, figsize=(8, 8))
    theta = np.zeros((51, 5))
    pool = mp.Pool(mp.cpu_count())

    # training loop
    for i in range(51):
        y_true = Y[i, :l]
        m = A[i, :l]
        pool.apply_async(
            worker,
            [i, m, ft[:l], y_true],
Esempio n. 54
0
def gamma_bnd(t = 1,
              node = 1,
              shape = 1.01,
              scale = 1,
              theta = 0):
    return gamma.pdf(t - node, a = shape, scale = scale)
Esempio n. 55
0
def plot_parameter_assumptions(df_parameters, xlimits=[0, 30], lw=3):
    """
    Plot distributions of mean transition times between compartments in the parameters of the 
    OpenABM-Covid19 model
    
    Arguments
    ---------
    df_parameters : pandas.DataFrame
        DataFrame of parameter values as input first input argument to the OpenABM-Covid19 model
        This plotting scripts expects the following columns within this dataframe: 
            mean_time_to_hospital
            mean_time_to_critical, sd_time_to_critical
            mean_time_to_symptoms, sd_time_to_symptoms
            mean_infectious_period, sd_infectious_period
            mean_time_to_recover, sd_time_to_recover
            mean_asymptomatic_to_recovery, sd_asymptomatic_to_recovery
            mean_time_hospitalised_recovery, sd_time_hospitalised_recovery
            mean_time_to_death, sd_time_to_death
            mean_time_critical_survive, sd_time_critical_survive
    
    xlimits : list of ints
        Limits of x axis of gamma distributions showing mean transition times
    lw : float
        Line width used in plotting lines of the PDFs
    
    Returns
    -------
    fig, ax : figure and axis handles to the generated figure using matplotlib.pyplot
    """
    df = df_parameters  # for brevity
    x = np.linspace(xlimits[0], xlimits[1], num=50)

    fig, ax = plt.subplots(nrows=3, ncols=3)

    ####################################
    # Bernoulli of mean time to hospital
    ####################################

    height1 = np.ceil(df.mean_time_to_hospital.values[0]
                      ) - df.mean_time_to_hospital.values[0]
    height2 = df.mean_time_to_hospital.values[0] - np.floor(
        df.mean_time_to_hospital.values[0])

    x1 = np.floor(df.mean_time_to_hospital.values[0])
    x2 = np.ceil(df.mean_time_to_hospital.values[0])
    ax[0, 0].bar([x1, x2], [height1, height2], color="#0072B2")

    ax[0, 0].set_ylim([0, 1.0])
    ax[0, 0].set_xticks([x1, x2])
    ax[0, 0].set_xlabel("Time to hospital\n(from symptoms; days)")
    ax[0, 0].set_ylabel("Density")
    ax[0, 0].set_title("")
    ax[0, 0].spines["top"].set_visible(False)
    ax[0, 0].spines["right"].set_visible(False)

    ####################################
    # Gamma of mean time to critical
    ####################################

    a, b = gamma_params(df.mean_time_to_critical.values,
                        df.sd_time_to_critical.values)
    ax[1, 0].plot(x,
                  gamma.pdf(x, a=a, loc=0, scale=b),
                  linewidth=lw,
                  color="#0072B2")
    ax[1, 0].axvline(df.mean_time_to_critical.values,
                     color="#D55E00",
                     linestyle="dashed",
                     alpha=0.7)
    ax[1, 0].set_xlabel("Time to critical\n(from hospitalised; days)")
    ax[1, 0].set_title("")
    ax[1, 0].spines["top"].set_visible(False)
    ax[1, 0].spines["right"].set_visible(False)
    ax[1, 0].text(0.9,
                  0.7,
                  'mean: {}\nsd: {}'.format(df.mean_time_to_critical.values[0],
                                            df.sd_time_to_critical.values[0]),
                  ha='right',
                  va='center',
                  transform=ax[1, 0].transAxes)

    ################################
    # Gamma of mean time to symptoms
    ################################

    a, b = gamma_params(df.mean_time_to_symptoms.values,
                        df.sd_time_to_symptoms.values)
    ax[0, 1].plot(x,
                  gamma.pdf(x, a=a, loc=0, scale=b),
                  linewidth=lw,
                  color="#0072B2")
    ax[0, 1].axvline(df.mean_time_to_symptoms.values,
                     color="#D55E00",
                     linestyle="dashed",
                     alpha=0.7)
    ax[0, 1].set_xlabel("Time to symptoms\n(from presymptomatic; days)")
    ax[0, 1].set_title("")
    ax[0, 1].spines["top"].set_visible(False)
    ax[0, 1].spines["right"].set_visible(False)
    ax[0, 1].text(0.9,
                  0.7,
                  'mean: {}\nsd: {}'.format(df.mean_time_to_symptoms.values[0],
                                            df.sd_time_to_symptoms.values[0]),
                  ha='right',
                  va='center',
                  transform=ax[0, 1].transAxes)

    ################################
    # Gamma of mean infectious period
    ################################

    a, b = gamma_params(df.mean_infectious_period, df.sd_infectious_period)
    ax[0, 2].plot(x,
                  gamma.pdf(x, a=a, loc=0, scale=b),
                  linewidth=lw,
                  color="#0072B2")
    ax[0, 2].axvline(df.mean_infectious_period.values,
                     color="#D55E00",
                     linestyle="dashed",
                     alpha=0.7)
    ax[0, 2].set_xlabel("Infectious period (days)")
    ax[0, 2].set_title("")
    ax[0, 2].spines["top"].set_visible(False)
    ax[0, 2].spines["right"].set_visible(False)
    ax[0,
       2].text(0.9,
               0.7,
               'mean: {}\nsd: {}'.format(df.mean_infectious_period.values[0],
                                         df.sd_infectious_period.values[0]),
               ha='right',
               va='center',
               transform=ax[0, 2].transAxes)

    ################################
    # Gamma of mean time to recover
    ################################

    a, b = gamma_params(df.mean_time_to_recover, df.sd_time_to_recover)
    ax[1, 1].plot(x,
                  gamma.pdf(x, a=a, loc=0, scale=b),
                  linewidth=lw,
                  color="#0072B2")
    ax[1, 1].axvline(df.mean_time_to_recover.values,
                     color="#D55E00",
                     linestyle="dashed",
                     alpha=0.7)
    ax[1,
       1].set_xlabel("Time to recover\n(from hospitalised or critical; days)")
    ax[1, 1].set_title("")
    ax[1, 1].spines["top"].set_visible(False)
    ax[1, 1].spines["right"].set_visible(False)
    ax[1, 1].text(0.9,
                  0.7,
                  'mean: {}\nsd: {}'.format(df.mean_time_to_recover.values[0],
                                            df.sd_time_to_recover.values[0]),
                  ha='right',
                  va='center',
                  transform=ax[1, 1].transAxes)

    ########################################
    # Gamma of mean asymptomatic to recovery
    ########################################

    a, b = gamma_params(df.mean_asymptomatic_to_recovery,
                        df.sd_asymptomatic_to_recovery)
    ax[2, 0].plot(x,
                  gamma.pdf(x, a=a, loc=0, scale=b),
                  linewidth=lw,
                  color="#0072B2")
    ax[2, 0].axvline(df.mean_asymptomatic_to_recovery.values,
                     color="#D55E00",
                     linestyle="dashed",
                     alpha=0.7)
    ax[2, 0].set_xlabel("Time to recover\n(from asymptomatic; days)")
    ax[2, 0].set_title("")
    ax[2, 0].spines["top"].set_visible(False)
    ax[2, 0].spines["right"].set_visible(False)
    ax[2, 0].text(0.9,
                  0.7,
                  'mean: {}\nsd: {}'.format(
                      df.mean_asymptomatic_to_recovery.values[0],
                      df.sd_asymptomatic_to_recovery.values[0]),
                  ha='right',
                  va='center',
                  transform=ax[2, 0].transAxes)

    ########################################
    # Gamma of mean hospitalised to recovery
    ########################################

    a, b = gamma_params(df.mean_time_hospitalised_recovery,
                        df.sd_time_hospitalised_recovery)
    ax[2, 1].plot(x,
                  gamma.pdf(x, a=a, loc=0, scale=b),
                  linewidth=lw,
                  color="#0072B2")
    ax[2, 1].axvline(df.mean_time_hospitalised_recovery.values,
                     color="#D55E00",
                     linestyle="dashed",
                     alpha=0.7)
    ax[2, 1].set_xlabel(
        "Time to recover\n(from hospitalisation to hospital discharge if not ICU\nor from ICU discharge to hospital discharge if ICU; days)"
    )
    ax[2, 1].set_title("")
    ax[2, 1].spines["top"].set_visible(False)
    ax[2, 1].spines["right"].set_visible(False)
    ax[2, 1].text(0.9,
                  0.7,
                  'mean: {}\nsd: {}'.format(
                      df.mean_time_hospitalised_recovery.values[0],
                      df.sd_time_hospitalised_recovery.values[0]),
                  ha='right',
                  va='center',
                  transform=ax[2, 1].transAxes)

    #############################
    # Gamma of mean time to death
    #############################

    a, b = gamma_params(df.mean_time_to_death.values,
                        df.sd_time_to_death.values)
    ax[1, 2].plot(x,
                  gamma.pdf(x, a=a, loc=0, scale=b),
                  linewidth=lw,
                  c="#0072B2")
    ax[1, 2].axvline(df.mean_time_to_death.values,
                     color="#D55E00",
                     linestyle="dashed",
                     alpha=0.7)
    ax[1, 2].set_xlabel("Time to death\n(from critical; days)")
    ax[1, 2].set_title("")
    ax[1, 2].spines["top"].set_visible(False)
    ax[1, 2].spines["right"].set_visible(False)
    ax[1, 2].text(0.9,
                  0.7,
                  'mean: {}\nsd: {}'.format(df.mean_time_to_death.values[0],
                                            df.sd_time_to_death.values[0]),
                  ha='right',
                  va='center',
                  transform=ax[1, 2].transAxes)

    ########################################
    # Gamma of mean time to survive if critical: FIXME - definitions
    ########################################

    a, b = gamma_params(df.mean_time_critical_survive,
                        df.sd_time_critical_survive)
    ax[2, 2].plot(x,
                  gamma.pdf(x, a=a, loc=0, scale=b),
                  linewidth=lw,
                  color="#0072B2")
    ax[2, 2].axvline(df.mean_time_critical_survive.values,
                     color="#D55E00",
                     linestyle="dashed",
                     alpha=0.7)
    ax[2, 2].set_xlabel("Time to survive\n(if ICU; days)")
    ax[2, 2].set_title("")
    ax[2, 2].spines["top"].set_visible(False)
    ax[2, 2].spines["right"].set_visible(False)
    ax[2, 2].text(0.9,
                  0.7,
                  'mean: {}\nsd: {}'.format(
                      df.mean_time_critical_survive.values[0],
                      df.sd_time_critical_survive.values[0]),
                  ha='right',
                  va='center',
                  transform=ax[2, 2].transAxes)

    plt.subplots_adjust(hspace=0.5)

    return (fig, ax)
Esempio n. 56
0
from diafunc import linearfunc
from scipy.integrate import quad
import scipy.stats as st
import matplotlib.pyplot as plt
from scipy.integrate import quad
from scipy.stats import gamma


def myf(maxmmol, maxgoing, endgoing, typebranch1, typebranch2):
    pass


norm = st.norm(loc=1, scale=0.25)
x = np.linspace(norm.ppf(0.0001), norm.ppf(0.9999), 50)

a = 2
#erl = st.erlang.stats(a)
mean, var, skew, kurt = gamma.stats(a, moments='mvsk')
print(mean, var, skew, kurt)
x2 = np.linspace(0, 6, 100)
#x2 = np.linspace(erl.ppf(0.0001, a), erl.ppf(0.9999, a), 50)
res, err = quad(gamma.pdf, 0, 6, args=(a))
print(res)
print(x2)

fig, ax = plt.subplots()

ax.plot(x, norm.pdf(x))
ax.plot(x2, gamma.pdf(x2, a))

plt.show()
Esempio n. 57
0
##### PDE solution
fx = np.zeros((int(tfin / dt) + 1, int(tfin / dt) + 1))

tot = [1]

# initialize with 1 individual at time 0 with age 0
fx[0, 0] = 1
k = 2
mu_adj = (1 + pext[0])
inf_times = [0]

# infectiousness vector
mu = []
x = 0
for i in range(int(tfin / dt) + 1):
    mu.append(R0 * (gamma.pdf(x + dt / 2, shape_inf, scale=scale_inf)))
    x = x + dt

mu = np.asarray(mu)

for i in range(int(tfin / dt)):
    # move up age
    fx[i + 1, 1:] = fx[i, 0:-1]

    # add infections
    # adjusted mu
    if (tot[-1] >= k):
        prod = 1
        for j in range(len(inf_times)):
            prod = pext[i - inf_times[j]] * prod
Esempio n. 58
0
def gamma(scale, prior_shape):
    x = np.linspace(gamma.ppf(0.001, prior_shape, loc=0, scale=scale),
                    gamma.ppf(0.999, prior_shape, loc=0, scale=scale))
    y = gamma.pdf(x, prior_shape, loc=0, scale=scale)
    return x, y
Esempio n. 59
0
        P = [1 - i for i in Q]
        lambdat = [F[i] / P[i] for i in range(len(F))]
        print("normal")

    if min(chi_exp, chi_normal, chi_uniform, chi_gamma) == chi_uniform:
        F = uniform.pdf(range(int(min(array)), int(max(array))), min(array),
                        max(array))
        Q = uniform.cdf(range(int(min(array)), int(max(array))), min(array),
                        max(array))
        P = [1 - i for i in Q]
        lambdat = [F[i] / P[i] for i in range(len(F))]
        print("uni")

    if min(chi_exp, chi_normal, chi_uniform, chi_gamma) == chi_gamma:
        F = gamma.pdf(range(int(min(array)), int(max(array))),
                      a=alpha,
                      scale=1 / beta)
        Q = gamma.cdf(range(int(min(array)), int(max(array))),
                      a=alpha,
                      scale=1 / beta)
        P = [1 - i for i in Q]
        lambdat = [F[i] / P[i] for i in range(len(F))]
        print("gamma")

    h = (max(array) - min(array)) / n_cells
    borders = [min(array) + h * i for i in range(n_cells)]
    qt = [0 for i in range(n_cells)]
    for i in range(len(array)):
        for j in range(len(borders)):
            if array[i] <= borders[j] + h:
                qt[j] += 1
Esempio n. 60
0
y[np.logical_or(np.kron(d_star, np.ones(T,)) == 0,y < 0)] = 0.
y = y.reshape(N,T)
X = X.reshape(N,T,2)

i=0

H=10000
resample_type = 'multinomial'
K1 = 2
K2 = 2
K3 = 1
K4 = 1

### initialization
theta = np.concatenate([np.random.normal(0,5,[H,K1]), np.random.normal(0,5,[H,K2]),np.random.gamma(1,1,[H,K3]), np.random.gamma(1,1,[H,K4])], axis=1)
w = np.concatenate([norm.pdf(theta[:,:K1],0,5), norm.pdf(theta[:,K1:K1+K2],0,5), gamma.pdf(theta[:,K1+K2+K3-1],1).reshape(-1,1), gamma.pdf(theta[:,K1+K2+K3+K4-1],1).reshape(-1,1)],axis=1)
w = w / w.sum(axis=0) # ini_weights

### run sequential monte carlo
all_mu_theta = []
all_std_theta = []

for i in range(N):
    print(i)
    ### run sequential monte carlo
    ###reweight
    #parallel computing
    w = reweight(theta, w, y, i, X, Z, K1, K2, K3, K4)
    ### resampling-np.random.choice
    all_theta_ = []
    all_w_ = []