Ejemplo n.º 1
0
def p_y(n,y,M):
    t_1 = m.exp(-0.5*n*y**2)
    t_2 = m.erf((0.5*n)**(0.5)*(y-0.5*M)) 
    t_3 = m.erf((0.5*n)**(0.5)*(y+0.5*M))
    m_f1 = (n/(8*m.pi))**2
    m_f2 = (n/(2*m.pi))**2/M
    return m_f1*(t_1 - m_f2*(t_2 - t_3))
Ejemplo n.º 2
0
 def output_branches(self):
     outfile = open(self.fout + ".line.txt", "w")
     for node in self.tree.traverse(strategy="postorder"):
         if not node.is_leaf():
             cn = self.name_coords[node.name]
             childs = node.get_children()
             cl = self.name_coords[childs[0].name]
             cr = self.name_coords[childs[1].name]
             l_mds_dis = self.innernode_dis_mds_matrix[node.name + ":" + childs[0].name]
             l_tree_dis = self.innernode_dis_tree_matrix[node.name + ":" + childs[0].name] 
             r_mds_dis = self.innernode_dis_mds_matrix[node.name + ":" + childs[1].name]
             r_tree_dis = self.innernode_dis_tree_matrix[node.name + ":" + childs[1].name] 
             l_ratio = (l_tree_dis - l_mds_dis)/l_mds_dis
             r_ratio = (r_tree_dis - r_mds_dis)/r_mds_dis
             lstroke = math.erf(l_ratio)
             rstroke = math.erf(r_ratio)
             if lstroke < 0:
                 lstroke = 2.0
             else:
                 lstroke = 4.0*lstroke + 2.0
             
             if rstroke < 0:
                 rstroke = 2.0
             else:
                 rstroke = 4.0*rstroke + 2.0 
             
             outfile.write(repr(cn[0])+","+repr(cn[1])+","+repr(cl[0])+","+repr(cl[1])+","+repr(lstroke)+"\n")
             outfile.write(repr(cn[0])+","+repr(cn[1])+","+repr(cr[0])+","+repr(cr[1])+","+repr(rstroke)+"\n")
     outfile.close()
Ejemplo n.º 3
0
def dynamical_friction_sis(x, y, z, vx, vy, vz, M_sat):
    x = x * units.kpc
    y = y * units.kpc
    z = z * units.kpc
    r = np.sqrt(x**2 + y**2 + z**2)
    vx = vx * units.km / units.s
    vy = vy * units.km / units.s
    vz = vz * units.km / units.s
    v = np.sqrt(vx**2 + vy**2 + vz**2)
    a = 10 # concentration parameter
    # Density at distance r and a velocity v
    rho = dens_sis(10, r.value, v.value) # a, r, v
    v = v.to(units.kpc / units.s)
    M_sat = M_sat * units.Msun
    factor = - 4 * np.pi * G**2  
    Coulomb = coulomb_log(r)
    sigma = v / np.sqrt(2)
    X = v / ( np.sqrt(2) * sigma ) #Check this
    F_dfx = factor * M_sat * rho * Coulomb / v**3 * (  erf(X) - 2*X/(np.sqrt(np.pi) * np.exp(-X**2))  ) * vx
    F_dfy = factor * M_sat * rho * Coulomb / v**3 * (  erf(X) - 2*X/(np.sqrt(np.pi) * np.exp(-X**2))  ) * vy
    F_dfz = factor * M_sat * rho * Coulomb / v**3 * (  erf(X) - 2*X/(np.sqrt(np.pi) * np.exp(-X**2))  ) * vz
    F_dfx = F_dfx.to(units.kpc / units.Gyr**2)
    F_dfy = F_dfy.to(units.kpc / units.Gyr**2)
    F_dfz = F_dfz.to(units.kpc / units.Gyr**2)
    return F_dfx.value, F_dfy.value, F_dfz.value
Ejemplo n.º 4
0
def response( t, sig, maxamp, maxt, fastconst, slowconst ):
    """
    t=0 is assumed to be max of distribution
    """
    # simplistic
    #farg = t/sig
    #f = 0.5*maxamp*np.exp( -0.5*farg*farg )#/sig/np.sqrt(2*3.14159)
    #if t<0:
    #    s = 0.0
    #else:
    #    s = 0.5*maxamp*np.exp( -t/config.slowconst )

    # slow component shape: expo convolved with gaus
    t_smax = 95.0 # peak of only slow component. numerically solved for det. smearing=3.5*15.625 ns, decay time const= 1500 ns
    t_fmax = 105.0 # numerically solved for det. smearing=3.5*15.625 ns, decay time const= 6 ns
    #dt_smax = -10.0 # expect slow comp peak to be 10 ns earlier than fast component peak
    smax = np.exp( sig*sig/(2*slowconst*slowconst) - t_fmax/slowconst )*(1 - math.erf( (sig*sig - slowconst*t_fmax )/(np.sqrt(2)*sig*slowconst ) ) )
    # normalize max at fast component peak
    As = 0.3*maxamp/smax
    s = As*np.exp( sig*sig/(2*slowconst*slowconst) - t/slowconst )*(1 - math.erf( (sig*sig - slowconst*t )/(np.sqrt(2)*sig*slowconst ) ) )
    #s = np.exp( sig*sig/(2*slowconst*slowconst))*(1-math.erf( (sig*sig)/(np.sqrt(2)*sig*slowconst ) ) )
    #s = maxamp*np.exp( sig*sig/(2*slowconst*slowconst) - t/slowconst )*(1 - math.erf( (sig*sig - slowconst*t )/(np.sqrt(2)*sig*slowconst ) ) )

    # fast component: since time const is smaller than spe response, we model as simple gaussian
    #
    farg = t/sig
    fmax = np.exp( -0.5*farg*farg )
    Af = 0.8*maxamp
    f = Af*np.exp( -0.5*farg*farg )

    #return fastfraction*f + slowfraction*s
    #print t, f, s
    return f+s
Ejemplo n.º 5
0
  def __call__(self, val):
    import numpy
    from math import sqrt, log, pi, erf
    sigma_m = val
    #sigma_m = abs(val[0])

    dphi2 = self.dphi / 2
    den =  sqrt(2) * sigma_m / abs(self.zeta)# / 0.670

    a = (self.tau + dphi2) / den
    b = (self.tau - dphi2) / den

    a = numpy.array([erf(a[i]) for i in range(len(a))])
    b = numpy.array([erf(b[i]) for i in range(len(b))])

    r = (a - b) / 2.0#(2 * self.dphi)

#        ind = numpy.where(r > 0)[0]
#        ret = r
#        ret[ind] = numpy.log(r[ind])
    ind = numpy.where(r <= 0)
    if len(ind[0]) > 0:
      r[ind] = 1e-7
    #ret[ind] = 0
    #print ind, r[ind], self.zeta[ind], self.tau[ind]
    return numpy.log(r)
Ejemplo n.º 6
0
def black_calc():
    stage4 = fmv * math.exp(-div * exp_term)
    stage5 = (1.0 + math.erf(d1 / math.sqrt(2.0))) / 2.0
    stage6 = exp_price * math.exp(-rate * exp_term)
    stage7 = (1.0 + math.erf(d2 / math.sqrt(2.0))) / 2.0
    c = (stage4*stage5)-(stage6*stage7)
    return c
Ejemplo n.º 7
0
def calc_charge_loss_fraction_in_line(accelerator, **kwargs):
    """Calculate charge loss in a line

    Keyword arguments:
    twiss_at_entrance -- Twiss parameters at the start of first element
    global_coupling   -- Global coupling
    energy_spread     -- Relative energy spread
    emittance         -- [m·rad]
    delta_rx          -- [m]
    delta_angle       -- [rad]
    hmax              -- [m]
    hmin              -- [m]
    vmax              -- [m]
    vmin              -- [m]
    """
    init_twiss, energy_spread, emittance, hmax, hmin, vmax, vmin = _process_loss_fraction_args(accelerator, **kwargs)
    coupling = kwargs['global_coupling']

    try:
        twiss, m66 = pyaccel.optics.calc_twiss(accelerator, init_twiss = init_twiss, indices ='open')
        betax, etax, betay, etay = twiss.betax, twiss.etax, twiss.betay, twiss.etay
        if math.isnan(betax[-1]):
            loss_fraction = 1.0
            return (loss_fraction, None, None)
    except (numpy.linalg.linalg.LinAlgError, pyaccel.optics.OpticsException, pyaccel.tracking.TrackingException):
        loss_fraction = 1.0
        return (loss_fraction, None, None)

    emitx = emittance * 1 / (1 + coupling)
    emity = emittance * coupling / (1 + coupling)
    sigmax = numpy.sqrt(betax * emitx + (etax * energy_spread)**2)
    sigmay = numpy.sqrt(betay * emity + (etax * energy_spread)**2)
    h_vc = hmax - hmin
    v_vc = vmax - vmin
    co = twiss.co
    rx, ry = co[0,:], co[2,:]
    xlim_inf, xlim_sup = rx - hmin, hmax - rx
    ylim_inf, ylim_sup = ry - vmin, vmax - ry
    xlim_inf[xlim_inf < 0] = 0
    xlim_sup[xlim_sup < 0] = 0
    ylim_inf[ylim_inf < 0] = 0
    ylim_sup[ylim_sup < 0] = 0
    xlim_inf[xlim_inf > h_vc] = 0
    xlim_sup[xlim_sup > h_vc] = 0
    ylim_inf[ylim_inf > v_vc] = 0
    ylim_sup[ylim_sup > v_vc] = 0
    min_xfrac_inf = numpy.amin(xlim_inf/sigmax)
    min_xfrac_sup = numpy.amin(xlim_sup/sigmax)
    min_yfrac_inf = numpy.amin(ylim_inf/sigmay)
    min_yfrac_sup = numpy.amin(ylim_sup/sigmay)
    sqrt2 = math.sqrt(2)

    x_surviving_fraction = 0.5*math.erf(min_xfrac_inf/sqrt2) + \
                           0.5*math.erf(min_xfrac_sup/sqrt2)
    y_surviving_fraction = 0.5*math.erf(min_yfrac_inf/sqrt2) + \
                           0.5*math.erf(min_yfrac_sup/sqrt2)
    surviving_fraction = x_surviving_fraction * y_surviving_fraction
    loss_fraction = 1.0 - surviving_fraction
    return loss_fraction, twiss, m66
Ejemplo n.º 8
0
def alpha2(a, N, Nmax, Nmin=1):
    y = sqrt(pi*Nmin*Nmax)/(2.0*a) * exp((a * log2(sqrt(Nmax/Nmin)))**2.0)
    y = y * exp((log(2.0)/(2.0*a))**2.0)
    y = y * erf(a * log2(sqrt(Nmax/Nmin)) - log(2.0)/(2.0*a))
    y += erf(a * log2(sqrt(Nmax/Nmin)) + log(2.0)/(2.0*a))
    y -= N

    return y # find alpha
Ejemplo n.º 9
0
 def marg_rating(self):
     if self._marg_rating is None:
         s = self.sigmac2
         sp4 = s+4
         self._marg_rating = 0.5*log(2*pi*s/sp4)*exp(-9/(8*sp4))
         self._marg_rating += log(erf(0.75*sqrt(0.5*s/sp4))
                                  - erf(-0.25*(17*s+80)/sqrt(2*s*sp4)))
     return self._marg_rating
Ejemplo n.º 10
0
def test_genz_gaussian_exact():
    u = np.array([1, 21, 2], dtype=float)
    a = np.array([1/10, 1/100, 1/500], dtype=float)
    val = ti.genz_gaussian_exact(u, a)
    exact = 62500*pow(np.pi, 3/2)*math.erf(1/10)*(math.erf(1/250) -
            math.erf(1/500))*(math.erf(21/100) - math.erf(1/5))

    assert np.allclose([val], [exact])
 def transitionProbability(self, T_total, rho0, rhoBar,sigmaRhoSquared,tauEff):
     
     # argument for the Error Function
     x1 =  -(self.rhoStar - rhoBar + (rhoBar - rho0)*np.exp(-T_total/tauEff))/(np.sqrt(sigmaRhoSquared*(1.-np.exp(-2.*T_total/tauEff))))
     # transition probability
     if rho0 == 0.:
         return (1. + math.erf(x1))/2.
     else:
         return (1. - math.erf(x1))/2.
Ejemplo n.º 12
0
    def alpha(self, a, Nmax, Nmin=1):

        """Numerically solve for Preston's a. Needed to estimate S using the lognormal"""

        y = sqrt(pi*Nmin*Nmax)/(2.0*a) * exp((a * log2(sqrt(Nmax/Nmin)))**2.0)
        y = y * exp((log(2.0)/(2.0*a))**2.0)
        y = y * erf(a * log2(sqrt(Nmax/Nmin)) - log(2.0)/(2.0*a))
        y += erf(a * log2(sqrt(Nmax/Nmin)) + log(2.0)/(2.0*a))
        y -= self.N

        return y # find alpha
Ejemplo n.º 13
0
 def _cdf(self, x):
     """
     Calculate cumulative distribution function in a certain point
     """
     if isinstance(x, float):
         return 1.0 / 2.0 * (1 + math.erf(x / np.sqrt(2)))
     else:
         return (
             1.0 / 2.0 *
             (1 + np.array([math.erf(n / np.sqrt(2)) for n in x]))
         )
Ejemplo n.º 14
0
def test_erf():
    table = [
        (0.0,  0.0000000),
        (0.05, 0.0563720),
        (0.1,  0.1124629),
        (0.15, 0.1679960),
        (0.2,  0.2227026),
        (0.25, 0.2763264),
        (0.3,  0.3286268),
        (0.35, 0.3793821),
        (0.4,  0.4283924),
        (0.45, 0.4754817),
        (0.5,  0.5204999),
        (0.55, 0.5633234),
        (0.6,  0.6038561),
        (0.65, 0.6420293),
        (0.7,  0.6778012),
        (0.75, 0.7111556),
        (0.8,  0.7421010),
        (0.85, 0.7706681),
        (0.9,  0.7969082),
        (0.95, 0.8208908),
        (1.0,  0.8427008),
        (1.1,  0.8802051),
        (1.2,  0.9103140),
        (1.3,  0.9340079),
        (1.4,  0.9522851),
        (1.5,  0.9661051),
        (1.6,  0.9763484),
        (1.7,  0.9837905),
        (1.8,  0.9890905),
        (1.9,  0.9927904),
        (2.0,  0.9953223),
        (2.1,  0.9970205),
        (2.2,  0.9981372),
        (2.3,  0.9988568),
        (2.4,  0.9993115),
        (2.5,  0.9995930),
        (2.6,  0.9997640),
        (2.7,  0.9998657),
        (2.8,  0.9999250),
        (2.9,  0.9999589),
        (3.0,  0.9999779),
        (3.1,  0.9999884),
        (3.2,  0.9999940),
        (3.3,  0.9999969),
        (3.4,  0.9999985),
        (3.5,  0.9999993),
        (4.0,  1.0000000),
    ]
    
    for x, y in table:
        AlmostEqual(y, math.erf(x), tolerance=7)
        AlmostEqual(-y, math.erf(-x), tolerance=7)
Ejemplo n.º 15
0
def erfContribution(dx, sigma, N):
    	"""
    	Solves for the distance to the closest periodic replica of x1 to x2. Integrates a gaussian of stdev sigma.
    	x1 and x2 are in reduced units, and N is the number of reduced units in the cubic system.
	Quantities in dx must be in [-N, N].
	Uses the same distance computation as GaussianContribution.
    	"""
	half = float(N)/2.0
    	dist_vec = [ abs(abs(abs(x)-half)-half) for x in dx]
	half_box = .5/sigma
    	my_erf = lambda x : - math.erf(-half_box - 2*half_box*x) + math.erf(half_box - 2*half_box*x)
    	contribution = reduce( mul, [ my_erf(x) for x in dist_vec] ) / 8
    	return contribution
Ejemplo n.º 16
0
 def _phit(self, z, zstar):
     """
     Model component of hitting target
     :param z: Reading, in cm
     :param zstar: Expected reading, from ray casting
     :return p: Probability of 'hit'
     """
     if z < self._max_z:
         N = 1.0/math.sqrt(2*math.pi*self._sigmahit**2)*math.e**(-0.5*(z-zstar)**2/self._sigmahit**2)
         eta = 0.5*(math.erf((self._max_z-zstar)/(self._sigmahit*math.sqrt(2))) + math.erf(zstar/(math.sqrt(2)*self._sigmahit)))
         return N*eta
     else:
         return 0
Ejemplo n.º 17
0
def potentialNucEnergy(potential_onNuc_basis, basis_aParam2, intNucDist, nucCharge, swap):
	#Equation found for formula in Modern Quantum Chemistry by Szabo eqn A.33 pg 415
	#Always assume potential_onNuc_basis is zeroed on the nucleus which charge is specified
	
	#TODO:Check answers with John
	
	a = potential_onNuc_basis
	b = a.T
	coeff = ((-2 * math.pi)/(a + b)) * nucCharge
	exponent = 0
	f0_Function_input = 0
	f0_Function = 1
	potential_onNuc_basis_matrix = coeff
	
	a = basis_aParam2
	b = a.T
	coeff = ((-2 * math.pi)/(a + b)) * nucCharge
	exponent = 0
	f0_function_input = (a + b) * (intNucDist ** 2)
	f0_erf = np.power(f0_function_input, .5)
	for x in np.nditer(f0_erf, op_flags=['readwrite']):
		x[...] = math.erf(x)
	f0_Function = 0.5 * np.multiply(np.power((math.pi/f0_function_input), .5), f0_erf)
	basis_aParam2_matrix = np.multiply(coeff, f0_Function)
	
	a = potential_onNuc_basis
	b = basis_aParam2.T
	coeff = ((-2 * math.pi)/(a + b)) * nucCharge
	exponent = (-1 * (np.multiply(a,b)/(a + b)) * (intNucDist ** 2))
	f0_function_input = ((a * 0) + np.power((b * intNucDist), 2.))/(a + b)
	f0_erf = np.power(f0_function_input, .5)
	for x in np.nditer(f0_erf, op_flags=['readwrite']):
		x[...] = math.erf(x)
	f0_Function = 0.5 * np.multiply(np.power((math.pi/f0_function_input), .5), f0_erf)
	
	basis_set1toset2_matrix = np.multiply(np.multiply(coeff, f0_Function), np.exp(exponent))
	
	basis_set2toset1_matrix = basis_set1toset2_matrix.T
	
	
	if (swap):
		matrix1 = np.concatenate((basis_aParam2_matrix, basis_set1toset2_matrix))
		matrix2 = np.concatenate((basis_set2toset1_matrix, potential_onNuc_basis_matrix))
		matrix = np.concatenate((matrix1, matrix2), axis=1)
		
	else:
		matrix1 = np.concatenate((potential_onNuc_basis_matrix, basis_set2toset1_matrix))
		matrix2 = np.concatenate((basis_set1toset2_matrix, basis_aParam2_matrix))
		matrix = np.concatenate((matrix1, matrix2), axis=1)
		
	return matrix
Ejemplo n.º 18
0
def pvalue(x):
    '''Cumulative distribution function for the standard normal distribution
    python
    import math
    x=2.33
    x=1.96
    1-(1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
    x=-2.33
    (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
    '''
    if x < 0:
        return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
    else:
        return 1-(1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
Ejemplo n.º 19
0
  def run(self):
    from dials.algorithms.profile_model.gaussian_rs import \
      PartialityCalculator3D
    from dials.array_family import flex

    calculator = PartialityCalculator3D(
      self.experiment.beam,
      self.experiment.goniometer,
      self.experiment.scan,
      self.sigma_m)

    predicted = flex.reflection_table.from_predictions_multi(self.experiments)
    predicted['bbox'] = predicted.compute_bbox(self.experiments)

    # Remove any touching edges of scan to get only fully recorded
    x0, x1, y0, y1, z0, z1 = predicted['bbox'].parts()
    predicted = predicted.select((z0 > 0) & (z1 < 100))
    assert(len(predicted) > 0)

    # Compute partiality
    partiality = calculator(
      predicted['s1'],
      predicted['xyzcal.px'].parts()[2],
      predicted['bbox'])

    # Should have all fully recorded
    assert(len(partiality) == len(predicted))
    from math import sqrt, erf
    three_sigma = 0.5 * (erf(3.0 / sqrt(2.0)) - erf(-3.0 / sqrt(2.0)))
    assert(partiality.all_gt(three_sigma))

    # Trim bounding boxes
    x0, x1, y0, y1, z0, z1 = predicted['bbox'].parts()
    z0 = z0 + 1
    z1 = z1 - 1
    predicted['bbox'] = flex.int6(x0, x1, y0, y1, z0, z1)
    predicted = predicted.select(z1 > z0)
    assert(len(predicted) > 0)

    # Compute partiality
    partiality = calculator(
      predicted['s1'],
      predicted['xyzcal.px'].parts()[2],
      predicted['bbox'])

    # Should have all partials
    assert(len(partiality) == len(predicted))
    assert(partiality.all_le(1.0) and partiality.all_gt(0))

    print 'OK'
Ejemplo n.º 20
0
def genDocument(cur):

	mu = random.randrange(0, g_wordsNum)
	sigma = random.randrange(1, 10*precision)/precision
	docSize = random.randrange(10, 100)
	cur.execute("INSERT INTO Documents (docName, docSize, mu, sigma)\
		VALUES(\"\",\"" + str(docSize)+ "\",\"" + str(mu)+ "\", \"" + str(sigma) + "\" )")
	docID = cur.lastrowid
	yFrom = (1 + math.erf((0 - mu)/ (sigma * math.sqrt(2)))) / 2
	yTo   = (1 + math.erf((g_wordsNum - mu)/ (sigma * math.sqrt(2)))) / 2

	for i in range(1, docSize):
		addInvertedID(docID, randomWord(mu, sigma, yFrom, yTo), cur)
	
	return docID
Ejemplo n.º 21
0
    def prob_mean_larger(self, other):
        """
        Probability that actual mean of this dist is larger than of another.
        """
        if self.n == 0 or other.n == 0:
            return 0.5
        diff_mean = self.mean() - other.mean()

        sigma1 = self.sigma()
        sigma2 = other.sigma()
        # If we have no data about variance of one of the distributions,
        # we take it from another distribution.
        if other.n == 1:
            sigma2 = sigma1
        if self.n == 1:
            sigma1 = sigma2

        diff_sigma = sqrt(sigma1**2 + sigma2**2)

        if diff_sigma == 0:
            if diff_mean > 0:
                return 1
            elif diff_mean < 0:
                return 0
            else:
                return 0.5
        p = 0.5 * (1 + erf(diff_mean / (sqrt(2) * diff_sigma)))
        return p
Ejemplo n.º 22
0
    def calc_stats(self, count, target, total):
        num = max(len(target), 1)
        r = {}
        for t in ('pgs', 'objects', 'bytes'):
            if total[t] == 0:
                r[t] = {
                    'avg': 0,
                    'stddev': 0,
                    'sum_weight': 0,
                    'score': 0,
                }
                continue

            avg = float(total[t]) / float(num)
            dev = 0.0

            # score is a measure of how uneven the data distribution is.
            # score lies between [0, 1), 0 means perfect distribution.
            score = 0.0
            sum_weight = 0.0

            for k, v in six.iteritems(count[t]):
                # adjust/normalize by weight
                if target[k]:
                    adjusted = float(v) / target[k] / float(num)
                else:
                    adjusted = 0.0

                # Overweighted devices and their weights are factors to calculate reweight_urgency.
                # One 10% underfilled device with 5 2% overfilled devices, is arguably a better
                # situation than one 10% overfilled with 5 2% underfilled devices
                if adjusted > avg:
                    '''
                    F(x) = 2*phi(x) - 1, where phi(x) = cdf of standard normal distribution
                    x = (adjusted - avg)/avg.
                    Since, we're considering only over-weighted devices, x >= 0, and so phi(x) lies in [0.5, 1).
                    To bring range of F(x) in range [0, 1), we need to make the above modification.

                    In general, we need to use a function F(x), where x = (adjusted - avg)/avg
                    1. which is bounded between 0 and 1, so that ultimately reweight_urgency will also be bounded.
                    2. A larger value of x, should imply more urgency to reweight.
                    3. Also, the difference between F(x) when x is large, should be minimal.
                    4. The value of F(x) should get close to 1 (highest urgency to reweight) with steeply.

                    Could have used F(x) = (1 - e^(-x)). But that had slower convergence to 1, compared to the one currently in use.

                    cdf of standard normal distribution: https://stackoverflow.com/a/29273201
                    '''
                    score += target[k] * (math.erf(((adjusted - avg)/avg) / math.sqrt(2.0)))
                    sum_weight += target[k]
                dev += (avg - adjusted) * (avg - adjusted)
            stddev = math.sqrt(dev / float(max(num - 1, 1)))
            score = score / max(sum_weight, 1)
            r[t] = {
                'avg': avg,
                'stddev': stddev,
                'sum_weight': sum_weight,
                'score': score,
            }
        return r
Ejemplo n.º 23
0
 def CDF(self, x):
     if (self.sigma == 0.0) and (x < self.mu):
         return 0.0
     elif (self.sigma == 0.0) and (x >= self.mu):
         return 1.0
     else:
         return 0.5 * (1.0 + math.erf((x - self.mu)/(self.sigma * math.sqrt(2.0))))
Ejemplo n.º 24
0
def gammainc_halfint(s, x):
    """ Lower incomplete gamma function =
            integral from 0 to x of t ** (s-1) exp(-t) dt divided by gamma(s),
        i.e., the fraction of gamma that you get if you integrate only until
            x instead of all the way to infinity.
        Implemented here only if s is a positive multiple of 0.5.
    """
    # scipy equivalent: scipy.special.gammainc(s,x)

    if s <= 0:
        raise ValueError('%s is not positive' % s)
    if x < 0:
        raise ValueError('%s < 0' % x)
    if s * 2 != int(s * 2):
        raise NotImplementedError('%s is not a multiple of 0.5' % s)

    # Handle integers analytically
    if s == int(s):
        term = 1
        total = 1
        for k in range(1, int(s)):
            term *= x / k
            total += term
        return 1 - exp(-x) * total

    # Otherwise s is integer + 0.5. Decrease to 0.5 using recursion formula:
    result = 0.0
    while s > 1:
        result -= x ** (s - 1) * exp(-x) / gamma(s)
        s = s - 1
    # Then use gammainc(0.5, x) = erf(sqrt(x))
    result += erf(sqrt(x))
    return result
Ejemplo n.º 25
0
def main():
    iIndex = 0
    for line in sys.stdin:
        Coh,z,grain_size,tm,a,x1,x2,length,width,thick,strike,dip,zs= map(float, line.split())
        n = 1.0
        A = 1e6
        #Coh = 2000 
        r = 1.0
        Ea = 335e3
        p = 3330 * 9.8 * z * 1000
        Va = 4e-6
        R = 8.3144
        G = 30e3
        pp = 3
        d = grain_size

        Tm = tm
        Rm = 3330              # mantle density        (kg/m^3)
        Cp = 1171              # specific heat         (J/kg/K) 
        k = 3.138

        kappa = k / (Rm * Cp)
        T = (Tm * math.erf ((z-zs) / math.sqrt ( 4 * kappa * a * 3.15e7)))+273

        gamma0=((G**n)*A*(Coh**r)*(d**(-pp))* math.exp (-(Ea + p*Va)/(R*T)))
        if (not math.isnan(gamma0)):
            iIndex+=1
            print("%d %.6f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f" % (iIndex,gamma0,x1,x2,z,length,width,thick,strike,dip))
Ejemplo n.º 26
0
    def sort_key(self):
        """Returns 1.0 for a ratio of 1, falling to 0.0 for extremely small
        or large values."""

        score = math.log10(self.score)
        cdf = (1.0 + math.erf(score / math.sqrt(2.0))) / 2.0
        return 1 - 2*math.fabs(0.5 - cdf)
Ejemplo n.º 27
0
def finite_mixture_model(fined_predictions, function_type):
    '''
    function_type: 0 pdf, 1 cdf
    note:
        mu and sigma is not mean and variance.
    return:
        plotxy:[plotx,ploty]
    '''
    #假设每个prediction都包含了均值和方差, mean variance
    #利用含变量的lambda表达式的自动变化特性
    mu = lambda :math.log(mean) - math.log(1+var/(mean**2))/2
    sigma2 = lambda :math.log(1+var/(mean**2))

    if function_type == 0:
        log_normal = lambda x:math.exp( -(math.log(x) - mu())**2/(2*sigma2()))/(x*math.sqrt(sigma2())*math.sqrt(2*math.pi))
    else:
        log_normal = lambda x:(1+math.erf((math.log(x) - mu())/(math.sqrt(sigma2()*2))))/2
    fined_prediction_tuple=[(fp['prior'], fp['mean'], fp['var']) for fp in fined_predictions]
    plotxy = [[0.01*i for i in range(1,101)]]
    #for x in plotxy[0]:
    #    #[prior*log_normal(x) for (prior,mean,var) in fined_prediction_tuple]
    #    for (prior,mean,var) in fined_prediction_tuple:
    #        print prior,mean,var,mu(),sigma2()
    plotxy.append([sum([prior*log_normal(x) for (prior,mean,var) in fined_prediction_tuple]) for x in plotxy[0]])
    return plotxy
Ejemplo n.º 28
0
def main():
    iIndex = 0
    for line in sys.stdin:
        Coh,z,a,x1,x2,length,width,thick,strike,dip,zs= map(float, line.split())
        n = 3.5
        A = 90
        #Coh = 2000 
        r = 1.2
        Ea = 462e3
        p = 3300 * 9.8 * z * 1000
        Va = 11e-6
        R = 8.3144
        G = 30e3

        Tm = 1350          # in K ( K = c + 273)
        Rm = 3300              # mantle density        (kg/m^3)
        Cp = 1171              # specific heat         (J/kg/K) 
        k = 3.138
        corr=3.14e7/((1e3)**(n-1))

        kappa = k / (Rm * Cp)
        T = (Tm * math.erf ((z-zs) / math.sqrt ( 4 * kappa * a * 3.15e7)))+273

        gamma0=corr*((G**n)*A*(Coh**r)* math.exp (-(Ea + p*Va)/(R*T)))
        if (not math.isnan(gamma0)):
            iIndex+=1
            #print(iIndex,gamma0,x1,x2,z,length,width,thick,strike,dip)
            print(x1,z,a)
def diffusion_percentage(length, diffusion_coefficient, time):
    L = length
    D = diffusion_coefficient
    t = time
    a = math.sqrt(D*t)
    return 1/L * (-2*math.pi*math.pow(a,2)*L*math.erf(L/(2*a))+2*math.pi*math.pow(a,2)*L-4*math.sqrt(math.pi)*
                  math.pow(a,3) * math.exp(-1*math.pow(L,2)/(4*math.pow(a,2))) - (-4*math.sqrt(math.pi)*math.pow(a,3)))
Ejemplo n.º 30
0
def test_save_output():
    filename = "function_values"
    x = np.arange(0, 100, 1)
    y = np.exp(x)
    y1 = y*math.erf(0.5)

    output.save_output(x, y, filename, q_or_2theta="Q", err=None,
                       dir_path=None)
    output.save_output(x, y, filename, q_or_2theta="2theta", ext=".dat",
                       err=None, dir_path=None)
    output.save_output(x, y, filename, q_or_2theta="2theta", ext=".xye",
                       err=y1, dir_path=None)

    Data_chi = np.loadtxt("function_values.chi", skiprows=7)
    Data_dat = np.loadtxt("function_values.dat", skiprows=7)
    Data_xye = np.loadtxt("function_values.xye", skiprows=7)

    assert_array_almost_equal(x, Data_chi[:, 0])
    assert_array_almost_equal(y, Data_chi[:, 1])

    assert_array_almost_equal(x, Data_dat[:, 0])
    assert_array_almost_equal(y, Data_dat[:, 1])

    assert_array_almost_equal(x, Data_xye[:, 0])
    assert_array_almost_equal(y, Data_xye[:, 1])
    assert_array_almost_equal(y1, Data_xye[:, 2])

    os.remove("function_values.chi")
    os.remove("function_values.dat")
    os.remove("function_values.xye")
def normal_cdf(x, mu=0, sigma=1):  # cumulative distribution function
    return (1 + math.erf((x - mu) / math.sqrt(2) / sigma)) / 2
Ejemplo n.º 32
0
def cdfnorm(x, mu, sigma):
    return 0.5 * (1 + math.erf((x - mu) / (sigma * (2**1 / 2))))
Ejemplo n.º 33
0
 def g(self, x):
     #return np.tanh(x)
     return math.erf(x)
Ejemplo n.º 34
0
 def norm_score(self):
     """Return 1.0 for a ratio of 1, falling to 0.0 for extremely small
     or large values."""
     score = math.log10(self.score)
     cdf = (1.0 + math.erf(score / math.sqrt(2.0))) / 2.0
     return 1 - 2 * math.fabs(0.5 - cdf)
Ejemplo n.º 35
0
def normal_cdf(x, mu=0, sigma=1):
    return (1 + math.erf((x - mu) / math.sqrt(2) / sigma)) / 2
import math

if __name__ == '__main__':
    mean, std = 70, 10
    cdf = lambda x: 0.5 * (1 + math.erf((x - mean) / (std * (2**0.5))))

    print(round((1 - cdf(80)) * 100, 2))
    print(round((1 - cdf(60)) * 100, 2))
    print(round((cdf(60)) * 100, 2))
Ejemplo n.º 37
0
def comp_source_time_function(t, hdur):
    # quasi Heaviside, small Gaussian moment-rate tensor with hdur
    # (according to SPECFEM definition)
    val = 0.5 * (1.0 + erf(t / hdur))
    return val
Ejemplo n.º 38
0
    def __init__(
        self,
        galaxy_pdf_list: List[Galaxy],
        grid: aa.Grid2D,
        mat_plot_1d: MatPlot1D = MatPlot1D(),
        visuals_1d: Visuals1D = Visuals1D(),
        include_1d: Include1D = Include1D(),
        mat_plot_2d: MatPlot2D = MatPlot2D(),
        visuals_2d: Visuals2D = Visuals2D(),
        include_2d: Include2D = Include2D(),
        sigma: Optional[float] = 3.0,
    ):
        """
        Plots the attributes of a list of `GalaxyProfile` objects using the matplotlib methods `plot()` and `imshow()`
        and many other matplotlib functions which customize the plot's appearance.

        Figures plotted by this object average over a list galaxy profiles to computed the average value of each 
        attribute with errors, where the 1D regions within the errors are plotted as a shaded region to show the range 
        of plausible models. Therefore, the input list of galaxies is expected to represent the probability density
        function of an inferred model-fit.

        The `mat_plot_1d` and `mat_plot_2d` attributes wrap matplotlib function calls to make the figure. By default,
        the settings passed to every matplotlib function called are those specified in
        the `config/visualize/mat_wrap/*.ini` files, but a user can manually input values into `MatPlot2D` to
        customize the figure's appearance.

        Overlaid on the figure are visuals, contained in the `Visuals1D` and `Visuals2D` objects. Attributes may be
        extracted from the `GalaxyProfile` and plotted via the visuals object, if the corresponding entry is `True` in
        the `Include1D` or `Include2D` object or the `config/visualize/include.ini` file.

        Parameters
        ----------
        galaxy_profile_pdf_list
            The list of galaxy profiles whose mean and error values the plotter plots.
        grid
            The 2D (y,x) grid of coordinates used to evaluate the galaxy profile quantities that are plotted.
        mat_plot_1d
            Contains objects which wrap the matplotlib function calls that make 1D plots.
        visuals_1d
            Contains 1D visuals that can be overlaid on 1D plots.
        include_1d
            Specifies which attributes of the `GalaxyProfile` are extracted and plotted as visuals for 1D plots.
        mat_plot_2d
            Contains objects which wrap the matplotlib function calls that make 2D plots.
        visuals_2d
            Contains 2D visuals that can be overlaid on 2D plots.
        include_2d
            Specifies which attributes of the `GalaxyProfile` are extracted and plotted as visuals for 2D plots.
        sigma
            The confidence interval in terms of a sigma value at which the errors are computed (e.g. a value of
            sigma=3.0 uses confidence intevals at ~0.01 and 0.99 the PDF).
        """
        super().__init__(
            galaxy=None,
            grid=grid,
            mat_plot_2d=mat_plot_2d,
            include_2d=include_2d,
            visuals_2d=visuals_2d,
            mat_plot_1d=mat_plot_1d,
            include_1d=include_1d,
            visuals_1d=visuals_1d,
        )

        self.galaxy_pdf_list = galaxy_pdf_list
        self.sigma = sigma
        self.low_limit = (1 - math.erf(sigma / math.sqrt(2))) / 2
Ejemplo n.º 39
0
def calculate_pair(snp1, snp2, pop, web, request=None):

    # trim any whitespace
    snp1 = snp1.lower().strip()
    snp2 = snp2.lower().strip() 

    # Set data directories using config.yml
    with open('config.yml', 'r') as f:
        config = yaml.load(f)
    env = config['env']
    api_mongo_addr = config['api']['api_mongo_addr']
    dbsnp_version = config['data']['dbsnp_version']
    pop_dir = config['data']['pop_dir']
    vcf_dir = config['data']['vcf_dir']
    mongo_username = config['database']['mongo_user_readonly']
    mongo_password = config['database']['mongo_password']
    mongo_port = config['database']['mongo_port']

    tmp_dir = "./tmp/"

    # Ensure tmp directory exists
    if not os.path.exists(tmp_dir):
        os.makedirs(tmp_dir)

    # Create JSON output
    output = {}

    # Connect to Mongo snp database
    if env == 'local':
        mongo_host = api_mongo_addr
    else: 
        mongo_host = 'localhost'
    if web:
        client = MongoClient('mongodb://' + mongo_username + ':' + mongo_password + '@' + mongo_host+'/admin', mongo_port)
    else:
        if env == 'local':
            client = MongoClient('mongodb://' + mongo_username + ':' + mongo_password + '@' + mongo_host+'/admin', mongo_port)
        else:
            client = MongoClient('localhost', mongo_port)
    db = client["LDLink"]

    def get_coords(db, rsid):
        rsid = rsid.strip("rs")
        query_results = db.dbsnp151.find_one({"id": rsid})
        query_results_sanitized = json.loads(json_util.dumps(query_results))
        return query_results_sanitized

    # Query genomic coordinates
    def get_rsnum(db, coord):
        temp_coord = coord.strip("chr").split(":")
        chro = temp_coord[0]
        pos = temp_coord[1]
        query_results = db.dbsnp151.find({"chromosome": chro.upper() if chro == 'x' or chro == 'y' else chro, "position": pos})
        query_results_sanitized = json.loads(json_util.dumps(query_results))
        return query_results_sanitized

    # Replace input genomic coordinates with variant ids (rsids)
    def replace_coord_rsid(db, snp):
        if snp[0:2] == "rs":
            return snp
        else:
            snp_info_lst = get_rsnum(db, snp)
            print("snp_info_lst")
            print(snp_info_lst)
            if snp_info_lst != None:
                if len(snp_info_lst) > 1:
                    var_id = "rs" + snp_info_lst[0]['id']
                    ref_variants = []
                    for snp_info in snp_info_lst:
                        if snp_info['id'] == snp_info['ref_id']:
                            ref_variants.append(snp_info['id'])
                    if len(ref_variants) > 1:
                        var_id = "rs" + ref_variants[0]
                        if "warning" in output:
                            output["warning"] = output["warning"] + \
                            ". Multiple rsIDs (" + ", ".join(["rs" + ref_id for ref_id in ref_variants]) + ") map to genomic coordinates " + snp
                        else:
                            output["warning"] = "Multiple rsIDs (" + ", ".join(["rs" + ref_id for ref_id in ref_variants]) + ") map to genomic coordinates " + snp
                    elif len(ref_variants) == 0 and len(snp_info_lst) > 1:
                        var_id = "rs" + snp_info_lst[0]['id']
                        if "warning" in output:
                            output["warning"] = output["warning"] + \
                            ". Multiple rsIDs (" + ", ".join(["rs" + ref_id for ref_id in ref_variants]) + ") map to genomic coordinates " + snp
                        else:
                            output["warning"] = "Multiple rsIDs (" + ", ".join(["rs" + ref_id for ref_id in ref_variants]) + ") map to genomic coordinates " + snp
                    else:
                        var_id = "rs" + ref_variants[0]
                    return var_id
                elif len(snp_info_lst) == 1:
                    var_id = "rs" + snp_info_lst[0]['id']
                    return var_id
                else:
                    return snp
            else:
                return snp
        return snp

    snp1 = replace_coord_rsid(db, snp1)
    snp2 = replace_coord_rsid(db, snp2)

    # Find RS numbers in snp database
    # SNP1
    snp1_coord = get_coords(db, snp1)
    if snp1_coord == None:
        output["error"] = snp1 + " is not in dbSNP build " + dbsnp_version + "."
        return(json.dumps(output, sort_keys=True, indent=2))

    # SNP2
    snp2_coord = get_coords(db, snp2)
    if snp2_coord == None:
        output["error"] = snp2 + " is not in dbSNP build " + dbsnp_version + "."
        return(json.dumps(output, sort_keys=True, indent=2))

    # Check if SNPs are on the same chromosome
    if snp1_coord['chromosome'] != snp2_coord['chromosome']:
        output["warning"] = snp1 + " and " + \
            snp2 + " are on different chromosomes"

    # Select desired ancestral populations
    pops = pop.split("+")
    pop_dirs = []
    for pop_i in pops:
        if pop_i in ["ALL", "AFR", "AMR", "EAS", "EUR", "SAS", "ACB", "ASW", "BEB", "CDX", "CEU", "CHB", "CHS", "CLM", "ESN", "FIN", "GBR", "GIH", "GWD", "IBS", "ITU", "JPT", "KHV", "LWK", "MSL", "MXL", "PEL", "PJL", "PUR", "STU", "TSI", "YRI"]:
            pop_dirs.append(pop_dir + pop_i + ".txt")
        else:
            output["error"] = pop_i + " is not an ancestral population. Choose one of the following ancestral populations: AFR, AMR, EAS, EUR, or SAS; or one of the following sub-populations: ACB, ASW, BEB, CDX, CEU, CHB, CHS, CLM, ESN, FIN, GBR, GIH, GWD, IBS, ITU, JPT, KHV, LWK, MSL, MXL, PEL, PJL, PUR, STU, TSI, or YRI."
            return(json.dumps(output, sort_keys=True, indent=2))

    get_pops = "cat " + " ".join(pop_dirs)
    proc = subprocess.Popen(get_pops, shell=True, stdout=subprocess.PIPE)
    pop_list = [x.decode('utf-8') for x in proc.stdout.readlines()]

    ids = [i.strip() for i in pop_list]
    pop_ids = list(set(ids))

    # Extract 1000 Genomes phased genotypes

    # SNP1
    vcf_file1 = vcf_dir + snp1_coord['chromosome'] + ".phase3_shapeit2_mvncall_integrated_v5.20130502.genotypes.vcf.gz"
    tabix_snp1_offset = "tabix {0} {1}:{2}-{2} | grep -v -e END".format(
        vcf_file1, snp1_coord['chromosome'], snp1_coord['position'])
    proc1_offset = subprocess.Popen(
        tabix_snp1_offset, shell=True, stdout=subprocess.PIPE)
    vcf1_offset = [x.decode('utf-8') for x in proc1_offset.stdout.readlines()]

    # SNP2
    vcf_file2 = vcf_dir + snp2_coord['chromosome'] + ".phase3_shapeit2_mvncall_integrated_v5.20130502.genotypes.vcf.gz"
    tabix_snp2_offset = "tabix {0} {1}:{2}-{2} | grep -v -e END".format(
        vcf_file2, snp2_coord['chromosome'], snp2_coord['position'])
    proc2_offset = subprocess.Popen(
        tabix_snp2_offset, shell=True, stdout=subprocess.PIPE)
    vcf2_offset = [x.decode('utf-8') for x in proc2_offset.stdout.readlines()]

    vcf1_pos = snp1_coord['position']
    vcf2_pos = snp2_coord['position']
    vcf1 = vcf1_offset
    vcf2 = vcf2_offset

    # Import SNP VCF files

    # SNP1
    if len(vcf1) == 0:
        output["error"] = snp1 + " is not in 1000G reference panel."
        return(json.dumps(output, sort_keys=True, indent=2))
    elif len(vcf1) > 1:
        geno1 = []
        for i in range(len(vcf1)):
            if vcf1[i].strip().split()[2] == snp1:
                geno1 = vcf1[i].strip().split()
        if geno1 == []:
            output["error"] = snp1 + " is not in 1000G reference panel."
            return(json.dumps(output, sort_keys=True, indent=2))
    else:
        geno1 = vcf1[0].strip().split()

    if geno1[2] != snp1:
        if "warning" in output:
            output["warning"] = output["warning"] + \
                ". Genomic position for query variant1 (" + snp1 + \
                ") does not match RS number at 1000G position (chr" + \
                geno1[0]+":"+geno1[1]+")"
        else:
            output["warning"] = "Genomic position for query variant1 (" + snp1 + \
                ") does not match RS number at 1000G position (chr" + \
                geno1[0]+":"+geno1[1]+")"
        snp1 = geno1[2]

    if "," in geno1[3] or "," in geno1[4]:
        output["error"] = snp1 + " is not a biallelic variant."
        return(json.dumps(output, sort_keys=True, indent=2))

    if len(geno1[3]) == 1 and len(geno1[4]) == 1:
        snp1_a1 = geno1[3]
        snp1_a2 = geno1[4]
    elif len(geno1[3]) == 1 and len(geno1[4]) > 1:
        snp1_a1 = "-"
        snp1_a2 = geno1[4][1:]
    elif len(geno1[3]) > 1 and len(geno1[4]) == 1:
        snp1_a1 = geno1[3][1:]
        snp1_a2 = "-"
    elif len(geno1[3]) > 1 and len(geno1[4]) > 1:
        snp1_a1 = geno1[3][1:]
        snp1_a2 = geno1[4][1:]

    allele1 = {"0|0": [snp1_a1, snp1_a1], "0|1": [snp1_a1, snp1_a2], "1|0": [snp1_a2, snp1_a1], "1|1": [
        snp1_a2, snp1_a2], "0": [snp1_a1, "."], "1": [snp1_a2, "."], "./.": [".", "."], ".": [".", "."]}

    # SNP2
    if len(vcf2) == 0:
        output["error"] = snp2 + " is not in 1000G reference panel."
        return(json.dumps(output, sort_keys=True, indent=2))
    elif len(vcf2) > 1:
        geno2 = []
        for i in range(len(vcf2)):
            if vcf2[i].strip().split()[2] == snp2:
                geno2 = vcf2[i].strip().split()
        if geno2 == []:
            output["error"] = snp2 + " is not in 1000G reference panel."
            return(json.dumps(output, sort_keys=True, indent=2))
    else:
        geno2 = vcf2[0].strip().split()

    if geno2[2] != snp2:
        if "warning" in output:
            output["warning"] = output["warning"] + \
                ". Genomic position for query variant2 (" + snp2 + \
                ") does not match RS number at 1000G position (chr" + \
                geno2[0]+":"+geno2[1]+")"
        else:
            output["warning"] = "Genomic position for query variant2 (" + snp2 + \
                ") does not match RS number at 1000G position (chr" + \
                geno2[0]+":"+geno2[1]+")"
        snp2 = geno2[2]

    if "," in geno2[3] or "," in geno2[4]:
        output["error"] = snp2 + " is not a biallelic variant."
        return(json.dumps(output, sort_keys=True, indent=2))

    if len(geno2[3]) == 1 and len(geno2[4]) == 1:
        snp2_a1 = geno2[3]
        snp2_a2 = geno2[4]
    elif len(geno2[3]) == 1 and len(geno2[4]) > 1:
        snp2_a1 = "-"
        snp2_a2 = geno2[4][1:]
    elif len(geno2[3]) > 1 and len(geno2[4]) == 1:
        snp2_a1 = geno2[3][1:]
        snp2_a2 = "-"
    elif len(geno2[3]) > 1 and len(geno2[4]) > 1:
        snp2_a1 = geno2[3][1:]
        snp2_a2 = geno2[4][1:]

    allele2 = {"0|0": [snp2_a1, snp2_a1], "0|1": [snp2_a1, snp2_a2], "1|0": [snp2_a2, snp2_a1], "1|1": [
        snp2_a2, snp2_a2], "0": [snp2_a1, "."], "1": [snp2_a2, "."], "./.": [".", "."], ".": [".", "."]}

    if geno1[1] != vcf1_pos:
        output["error"] = "VCF File does not match variant coordinates for SNP1."
        return(json.dumps(output, sort_keys=True, indent=2))

    if geno2[1] != vcf2_pos:
        output["error"] = "VCF File does not match variant coordinates for SNP2."
        return(json.dumps(output, sort_keys=True, indent=2))

    # Get headers
    tabix_snp1_h = "tabix -H {0} | grep CHROM".format(vcf_file1)
    proc1_h = subprocess.Popen(
        tabix_snp1_h, shell=True, stdout=subprocess.PIPE)
    head1 = [x.decode('utf-8') for x in proc1_h.stdout.readlines()][0].strip().split()

    tabix_snp2_h = "tabix -H {0} | grep CHROM".format(vcf_file2)
    proc2_h = subprocess.Popen(
        tabix_snp2_h, shell=True, stdout=subprocess.PIPE)
    head2 = [x.decode('utf-8') for x in proc2_h.stdout.readlines()][0].strip().split()

    # Combine phased genotypes
    geno = {}
    for i in range(9, len(head1)):
        geno[head1[i]] = [allele1[geno1[i]], ".."]

    for i in range(9, len(head2)):
        if head2[i] in geno:
            geno[head2[i]][1] = allele2[geno2[i]]

    # Extract haplotypes
    hap = {}
    for ind in pop_ids:
        if ind in geno:
            hap1 = geno[ind][0][0] + "_" + geno[ind][1][0]
            hap2 = geno[ind][0][1] + "_" + geno[ind][1][1]
            if hap1 in hap:
                hap[hap1] += 1
            else:
                hap[hap1] = 1

            if hap2 in hap:
                hap[hap2] += 1
            else:
                hap[hap2] = 1

    # Remove missing haplotypes
    keys = list(hap.keys())
    for key in keys:
        if "." in key:
            hap.pop(key, None)

    # Check all haplotypes are present
    if len(hap) != 4:
        snp1_a = [snp1_a1, snp1_a2]
        snp2_a = [snp2_a1, snp2_a2]
        haps = [snp1_a[0] + "_" + snp2_a[0], snp1_a[0] + "_" + snp2_a[1],
                snp1_a[1] + "_" + snp2_a[0], snp1_a[1] + "_" + snp2_a[1]]
        for i in haps:
            if i not in hap:
                hap[i] = 0

    # Sort haplotypes
    A = hap[sorted(hap)[0]]
    B = hap[sorted(hap)[1]]
    C = hap[sorted(hap)[2]]
    D = hap[sorted(hap)[3]]
    N = A + B + C + D
    tmax = max(A, B, C, D)

    hap1 = sorted(hap, key=hap.get, reverse=True)[0]
    hap2 = sorted(hap, key=hap.get, reverse=True)[1]
    hap3 = sorted(hap, key=hap.get, reverse=True)[2]
    hap4 = sorted(hap, key=hap.get, reverse=True)[3]

    delta = float(A * D - B * C)
    Ms = float((A + C) * (B + D) * (A + B) * (C + D))
    if Ms != 0:

        # D prime
        if delta < 0:
            D_prime = abs(delta / min((A + C) * (A + B), (B + D) * (C + D)))
        else:
            D_prime = abs(delta / min((A + C) * (C + D), (A + B) * (B + D)))

        # R2
        r2 = (delta**2) / Ms

        # P-value
        num = (A + B + C + D) * (A * D - B * C)**2
        denom = Ms
        chisq = num / denom
        p = 2 * (1 - (0.5 * (1 + math.erf(chisq**0.5 / 2**0.5))))

    else:
        D_prime = "NA"
        r2 = "NA"
        chisq = "NA"
        p = "NA"

    # Find Correlated Alleles
    if str(r2) != "NA" and float(r2) > 0.1:
        Ac=hap[sorted(hap)[0]]
        Bc=hap[sorted(hap)[1]]
        Cc=hap[sorted(hap)[2]]
        Dc=hap[sorted(hap)[3]]

        if ((Ac*Dc) / max((Bc*Cc), 0.01) > 1):
            corr1 = snp1 + "(" + sorted(hap)[0].split("_")[0] + ") allele is correlated with " + snp2 + "(" + sorted(hap)[0].split("_")[1] + ") allele"
            corr2 = snp1 + "(" + sorted(hap)[3].split("_")[0] + ") allele is correlated with " + snp2 + "(" + sorted(hap)[3].split("_")[1] + ") allele"
            corr_alleles = [corr1, corr2]
        else:
            corr1 = snp1 + "(" + sorted(hap)[1].split("_")[0] + ") allele is correlated with " + snp2 + "(" + sorted(hap)[1].split("_")[1] + ") allele"
            corr2 = snp1 + "(" + sorted(hap)[2].split("_")[0] + ") allele is correlated with " + snp2 + "(" + sorted(hap)[2].split("_")[1] + ") allele"
            corr_alleles = [corr1, corr2]
    else:
        corr_alleles = [snp1 + " and " + snp2 + " are in linkage equilibrium"]
        
    

    # Create JSON output
    snp_1 = {}
    snp_1["rsnum"] = snp1
    snp_1["coord"] = "chr" + snp1_coord['chromosome'] + ":" + \
        vcf1_pos

    snp_1_allele_1 = {}
    snp_1_allele_1["allele"] = sorted(hap)[0].split("_")[0]
    snp_1_allele_1["count"] = str(A + B)
    snp_1_allele_1["frequency"] = str(round(float(A + B) / N, 3))
    snp_1["allele_1"] = snp_1_allele_1

    snp_1_allele_2 = {}
    snp_1_allele_2["allele"] = sorted(hap)[2].split("_")[0]
    snp_1_allele_2["count"] = str(C + D)
    snp_1_allele_2["frequency"] = str(round(float(C + D) / N, 3))
    snp_1["allele_2"] = snp_1_allele_2
    output["snp1"] = snp_1

    snp_2 = {}
    snp_2["rsnum"] = snp2
    snp_2["coord"] = "chr" + snp2_coord['chromosome'] + ":" + \
        vcf2_pos

    snp_2_allele_1 = {}
    snp_2_allele_1["allele"] = sorted(hap)[0].split("_")[1]
    snp_2_allele_1["count"] = str(A + C)
    snp_2_allele_1["frequency"] = str(round(float(A + C) / N, 3))
    snp_2["allele_1"] = snp_2_allele_1

    snp_2_allele_2 = {}
    snp_2_allele_2["allele"] = sorted(hap)[1].split("_")[1]
    snp_2_allele_2["count"] = str(B + D)
    snp_2_allele_2["frequency"] = str(round(float(B + D) / N, 3))
    snp_2["allele_2"] = snp_2_allele_2
    output["snp2"] = snp_2

    two_by_two = {}
    cells = {}
    cells["c11"] = str(A)
    cells["c12"] = str(B)
    cells["c21"] = str(C)
    cells["c22"] = str(D)
    two_by_two["cells"] = cells
    two_by_two["total"] = str(N)
    output["two_by_two"] = two_by_two

    haplotypes = {}
    hap_1 = {}
    hap_1["alleles"] = hap1
    hap_1["count"] = str(hap[hap1])
    hap_1["frequency"] = str(round(float(hap[hap1]) / N, 3))
    haplotypes["hap1"] = hap_1

    hap_2 = {}
    hap_2["alleles"] = hap2
    hap_2["count"] = str(hap[hap2])
    hap_2["frequency"] = str(round(float(hap[hap2]) / N, 3))
    haplotypes["hap2"] = hap_2

    hap_3 = {}
    hap_3["alleles"] = hap3
    hap_3["count"] = str(hap[hap3])
    hap_3["frequency"] = str(round(float(hap[hap3]) / N, 3))
    haplotypes["hap3"] = hap_3

    hap_4 = {}
    hap_4["alleles"] = hap4
    hap_4["count"] = str(hap[hap4])
    hap_4["frequency"] = str(round(float(hap[hap4]) / N, 3))
    haplotypes["hap4"] = hap_4
    output["haplotypes"] = haplotypes

    statistics = {}
    if Ms != 0:
        statistics["d_prime"] = str(round(D_prime, 4))
        statistics["r2"] = str(round(r2, 4))
        statistics["chisq"] = str(round(chisq, 4))
        if p >= 0.0001:
            statistics["p"] = str(round(p, 4))
        else:
            statistics["p"] = "<0.0001"
    else:
        statistics["d_prime"] = D_prime
        statistics["r2"] = r2
        statistics["chisq"] = chisq
        statistics["p"] = p

    output["statistics"] = statistics

    output["corr_alleles"] = corr_alleles

    # Generate output file
    ldpair_out = open(tmp_dir + "LDpair_" + request + ".txt", "w")
    print("Query SNPs:", file=ldpair_out)
    print(output["snp1"]["rsnum"] + \
        " (" + output["snp1"]["coord"] + ")", file=ldpair_out)
    print(output["snp2"]["rsnum"] + \
        " (" + output["snp2"]["coord"] + ")", file=ldpair_out)
    print("", file=ldpair_out)
    print(pop + " Haplotypes:", file=ldpair_out)
    print(" " * 15 + output["snp2"]["rsnum"], file=ldpair_out)
    print(" " * 15 + \
        output["snp2"]["allele_1"]["allele"] + " " * \
        7 + output["snp2"]["allele_2"]["allele"], file=ldpair_out)
    print(" " * 13 + "-" * 17, file=ldpair_out)
    print(" " * 11 + output["snp1"]["allele_1"]["allele"] + " | " + output["two_by_two"]["cells"]["c11"] + " " * (5 - len(output["two_by_two"]["cells"]["c11"])) + " | " + output["two_by_two"]["cells"]["c12"] + " " * (
        5 - len(output["two_by_two"]["cells"]["c12"])) + " | " + output["snp1"]["allele_1"]["count"] + " " * (5 - len(output["snp1"]["allele_1"]["count"])) + " (" + output["snp1"]["allele_1"]["frequency"] + ")", file=ldpair_out)
    print(output["snp1"]["rsnum"] + " " * \
        (10 - len(output["snp1"]["rsnum"])) + " " * 3 + "-" * 17, file=ldpair_out)
    print(" " * 11 + output["snp1"]["allele_2"]["allele"] + " | " + output["two_by_two"]["cells"]["c21"] + " " * (5 - len(output["two_by_two"]["cells"]["c21"])) + " | " + output["two_by_two"]["cells"]["c22"] + " " * (
        5 - len(output["two_by_two"]["cells"]["c22"])) + " | " + output["snp1"]["allele_2"]["count"] + " " * (5 - len(output["snp1"]["allele_2"]["count"])) + " (" + output["snp1"]["allele_2"]["frequency"] + ")", file=ldpair_out)
    print(" " * 13 + "-" * 17, file=ldpair_out)
    print(" " * 15 + output["snp2"]["allele_1"]["count"] + " " * (5 - len(output["snp2"]["allele_1"]["count"])) + " " * 3 + output["snp2"]["allele_2"]["count"] + " " * (
        5 - len(output["snp2"]["allele_2"]["count"])) + " " * 3 + output["two_by_two"]["total"], file=ldpair_out)
    print(" " * 14 + "(" + output["snp2"]["allele_1"]["frequency"] + ")" + " " * (5 - len(output["snp2"]["allele_1"]["frequency"])) + \
        " (" + output["snp2"]["allele_2"]["frequency"] + ")" + \
        " " * (5 - len(output["snp2"]["allele_2"]["frequency"])), file=ldpair_out)
    print("", file=ldpair_out)
    print("          " + output["haplotypes"]["hap1"]["alleles"] + ": " + \
        output["haplotypes"]["hap1"]["count"] + \
        " (" + output["haplotypes"]["hap1"]["frequency"] + ")", file=ldpair_out)
    print("          " + output["haplotypes"]["hap2"]["alleles"] + ": " + \
        output["haplotypes"]["hap2"]["count"] + \
        " (" + output["haplotypes"]["hap2"]["frequency"] + ")", file=ldpair_out)
    print("          " + output["haplotypes"]["hap3"]["alleles"] + ": " + \
        output["haplotypes"]["hap3"]["count"] + \
        " (" + output["haplotypes"]["hap3"]["frequency"] + ")", file=ldpair_out)
    print("          " + output["haplotypes"]["hap4"]["alleles"] + ": " + \
        output["haplotypes"]["hap4"]["count"] + \
        " (" + output["haplotypes"]["hap4"]["frequency"] + ")", file=ldpair_out)
    print("", file=ldpair_out)
    print("          D': " + output["statistics"]["d_prime"], file=ldpair_out)
    print("          R2: " + output["statistics"]["r2"], file=ldpair_out)
    print("      Chi-sq: " + output["statistics"]["chisq"], file=ldpair_out)
    print("     p-value: " + output["statistics"]["p"], file=ldpair_out)
    print("", file=ldpair_out)
    if len(output["corr_alleles"]) == 2:
        print(output["corr_alleles"][0], file=ldpair_out)
        print(output["corr_alleles"][1], file=ldpair_out)
    else:
        print(output["corr_alleles"][0], file=ldpair_out)

    try:
        output["warning"]
    except KeyError:
        www = "do nothing"
    else:
        print("WARNING: " + output["warning"] + "!", file=ldpair_out)
    ldpair_out.close()

    # Return output
    return(json.dumps(output, sort_keys=True, indent=2))
Ejemplo n.º 40
0
def equation(d, x, t, cm, c):
    return (c - cm*(1.-erf(x/2./np.sqrt(d*t))))
Ejemplo n.º 41
0
 def cdf(self, x):
     'Cumulative distribution function.  P(X <= x)'
     if not self.sigma:
         raise StatisticsError('cdf() not defined when sigma is zero')
     return 0.5 * (1.0 + erf((x - self.mu) / (self.sigma * sqrt(2.0))))
Ejemplo n.º 42
0
def cdf(x):
    return 0.5 * (1 + math.erf((x - mean) / (std * (2**0.5))))
Ejemplo n.º 43
0
def standard_normal_cdf(x):
    result = (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
    return result
Ejemplo n.º 44
0
def math_erf(A, B):
    i = cuda.grid(1)
    B[i] = math.erf(A[i])
def normalcdf(x,m,sd):
    return (1 + math.erf((x-m) / math.sqrt(2) / sd)) / 2
Ejemplo n.º 46
0
from math import erf, sqrt

capacity = 9800
boxes = 49
mean = 205 * boxes
sd = 15 * sqrt(boxes)

val1 = (1 + erf((capacity - mean) / (sd * sqrt(2)))) / 2
print(round(val1, 4))
def cumul(x, m, std):
    return 0.5 * (1 + math.erf((x - m) / (std * math.sqrt(2))))
Ejemplo n.º 48
0
 def ufunc(x):
     return math.erf(x)
Ejemplo n.º 49
0
def BPSO(prob_prams, pso_prams):
    #================================Problem definition===========================================
    nVar = prob_prams['nVar']
    #varSize=[1,prob_prams['nVar']]
    costFn = prob_prams['costFn']

    #===========================================================================
    #===================================Parameters of BPSO========================================

    MaxIt = pso_prams['MaxIt']  # Maximum number of iteration
    nPop = pso_prams['nPop']  # Number of particle
    w = pso_prams['w']
    #Inertia coefficient
    wdamp = pso_prams['wdamp']  #Damping ratio
    c1 = pso_prams['c1']
    c2 = pso_prams['c2']
    Vmax = pso_prams['Vmax']
    trans_fn = pso_prams['trans_fn']
    s = np.zeros(nVar)

    #===========================================================================
    #=================================Initialization==========================================
    GlobalBest_Postion = []
    GlobalBest_Cost = np.Infinity

    p = [particle() for i in range(0, nPop)]
    for i in range(0, nPop):
        #Generate random position
        p[i].Position = np.random.randint(2, size=nVar)
        #Initialize velocity
        p[i].Velocity = np.zeros(nVar)

        #Update personal best
        p[i].Best['Position'] = p[i].Position.copy()
        p[i].Best['Cost'] = p[i].Cost

        #Update Global Best
        GlobalBest_Cost, GlobalBest_Postion = p[i].updateGbest(
            GlobalBest_Cost, GlobalBest_Postion)

    BestCosts = [0] * MaxIt
    gbests = [0] * MaxIt

    # ===========================Evaluation Iteration begins here==============================================
    for it in range(MaxIt):
        for i in range(nPop):
            #Evaluate the cost function
            p[i].Cost = costFn(p[i].Position)

            if p[i].updatePbest():
                GlobalBest_Cost, GlobalBest_Postion = p[i].updateGbest(
                    GlobalBest_Cost, GlobalBest_Postion)

            #Update Velocity
            p[i].updateVelocity(GlobalBest_Postion, c1, c2, w, nVar)

            p[i].Velocity = tools.boundary(p[i].Velocity, -Vmax, Vmax)
            #Update Position and Cost

            #Transfer functions
            for j in range(nVar):
                if trans_fn == 1:
                    s[j] = 1 / (1 + exp(-2 * p[i].Velocity[j]))
                if trans_fn == 2:
                    s[j] = 1 / (1 + exp(-p[i].Velocity[j]))
                if trans_fn == 3:
                    s[j] = 1 / (1 + exp(-p[i].Velocity[j] / 2))
                if trans_fn == 4:
                    s[j] = 1 / (1 + exp(-p[i].Velocity[j] / 3))

                if trans_fn <= 4:
                    if random() < s[j]:
                        p[i].Position[j] = 1
                    else:
                        p[i].Position[j] = 0
                if trans_fn == 5:
                    s[j] = abs(erf(((sqrt(pi) / 2) * p[i].Velocity[j])))
                if trans_fn == 6:
                    s[j] = abs(tanh(p[i].Velocity[j]))
                if trans_fn == 7:
                    s[j] = abs(p[i].Velocity[j] / sqrt(
                        (1 + p[i].Velocity[j]**2)))
                if trans_fn == 8:
                    s[j] = abs((2 / pi) * atan((pi / 2) * p[i].Velocity[j]))

                if trans_fn > 4:
                    if random() < s[j]:
                        p[i].Position[j] = tools.flip(p[i].Position[j])

        BestCosts[it] = GlobalBest_Cost
        gbests[it] = (GlobalBest_Postion, GlobalBest_Cost)

        if pso_prams['showItr']:
            print("Iteration", it, 'Best cost', BestCosts[it])

            #Damping  Inertia Coefficient
        w = w * wdamp
        #----------------------------------------------------------------------------------------------
    out_pso = {
        'BestCosts': BestCosts,
        'GlobalBest': (GlobalBest_Postion, GlobalBest_Cost),
        'gbests': gbests,
        'particle': p
    }
    return out_pso
def Vfunction2(gamma):
    val = (math.pi)**(0.5)
    val /= 2
    val *= gamma
    val = math.erf(val)
    return abs(val)
 def h(self, z):
     return 0.5 * z * np.sqrt(math.pi) * math.erf(z) + np.exp(-(z**2))
Ejemplo n.º 52
0
    def set_initial_guess(self):
        """
        Set the initial guess for the solution. The initial guess is generated
        by assuming infinitely-fast chemistry.
        """

        super(CounterflowDiffusionFlame, self).set_initial_guess()

        moles = lambda el: (self.gas.elemental_mass_fraction(el) /
                            self.gas.atomic_weight(el))

        # Compute stoichiometric mixture composition
        Yin_f = self.fuel_inlet.Y
        self.gas.TPY = self.fuel_inlet.T, self.P, Yin_f
        mdotf = self.fuel_inlet.mdot
        u0f = mdotf / self.gas.density
        T0f = self.fuel_inlet.T

        sFuel = moles('O')
        if 'C' in self.gas.element_names:
            sFuel -= 2 * moles('C')
        if 'H' in self.gas.element_names:
            sFuel -= 0.5 * moles('H')

        Yin_o = self.oxidizer_inlet.Y
        self.gas.TPY = self.oxidizer_inlet.T, self.P, Yin_o
        mdoto = self.oxidizer_inlet.mdot
        u0o = mdoto / self.gas.density
        T0o = self.oxidizer_inlet.T

        sOx = moles('O')
        if 'C' in self.gas.element_names:
            sOx -= 2 * moles('C')
        if 'H' in self.gas.element_names:
            sOx -= 0.5 * moles('H')

        zst = 1.0 / (1 - sFuel / sOx)
        Yst = zst * Yin_f + (1.0 - zst) * Yin_o

        # get adiabatic flame temperature and composition
        Tbar = 0.5 * (T0f + T0o)
        self.gas.TPY = Tbar, self.P, Yst
        self.gas.equilibrate('HP')
        Teq = self.gas.T
        Yeq = self.gas.Y

        # estimate strain rate
        zz = self.flame.grid
        dz = zz[-1] - zz[0]
        a = (u0o + u0f)/dz
        kOx = (self.gas.species_index('O2') if 'O2' in self.gas.species_names else
               self.gas.species_index('o2'))
        f = np.sqrt(a / (2.0 * self.gas.mix_diff_coeffs[kOx]))

        x0 = np.sqrt(mdotf*u0f) * dz / (np.sqrt(mdotf*u0f) + np.sqrt(mdoto*u0o))
        nz = len(zz)

        Y = np.zeros((nz, self.gas.n_species))
        T = np.zeros(nz)
        for j in range(nz):
            x = zz[j] - zz[0]
            zeta = f * (x - x0)
            zmix = 0.5 * (1.0 - erf(zeta))
            if zmix > zst:
                Y[j] = Yeq + (Yin_f - Yeq) * (zmix - zst) / (1.0 - zst)
                T[j] = Teq + (T0f - Teq) * (zmix - zst) / (1.0 - zst)
            else:
                Y[j] = Yin_o + zmix * (Yeq - Yin_o) / zst
                T[j] = T0o + (Teq - T0o) * zmix / zst

        T[0] = T0f
        T[-1] = T0o
        zrel = (zz - zz[0])/dz

        self.set_profile('u', [0.0, 1.0], [u0f, -u0o])
        self.set_profile('V', [0.0, x0/dz, 1.0], [0.0, a, 0.0])
        self.set_profile('T', zrel, T)
        for k,spec in enumerate(self.gas.species_names):
            self.set_profile(spec, zrel, Y[:,k])
Ejemplo n.º 53
0
def CDF(x):
    return 0.5 * (1 + math.erf((x - mean) / (sd * math.sqrt(2))))
Ejemplo n.º 54
0
import sys
import math

# def erf(x, z):
# return ((2/math.sqrt(math.pi)) * (integrate(exp(-x), (x, 0, z))))

ms = input('Enter mean and std dev:   ')
l = input('Enter less than   ')
between = input('Enter between   ')

MS = list([int(i) for i in ms.split()])
less = float(l)
Between = list([int(i) for i in between.split()])

mean = MS[0]
std = MS[1]
lower = Between[0]
higher = Between[1]

#cdf1 = .5 * (1 + ((less - mean)/(std*math.sqrt(2))))
#cdf2 = 1 - (.5 * (1 + ((lower - mean)/(std*math.sqrt(2)))) + (1 - .5 * (1 + ((higher - mean)/(std*math.sqrt(2))))))
cdf1 = .5 * (1 + math.erf(((less - mean) / (std * math.sqrt(2)))))
cdf2 = 1 - (.5 * (1 + math.erf(
    ((lower - mean) / (std * math.sqrt(2))))) + (1 - .5 * (1 + math.erf(
        ((higher - mean) / (std * math.sqrt(2)))))))

print("{0:0.3f}".format(cdf1))
print("{0:0.3f}".format(cdf2))
Ejemplo n.º 55
0
def erf(x):
    return math.erf(x)
import math


def cmf(case, mean, std):
    z = (case - mean) / (std * (2**0.5))
    error = math.erf(z)
    return (1 + error) * 0.5


mean = 100 * 2.4
std = 10 * 2
case1 = 250
result1 = cmf(case1, mean, std)
print(round(result1, 4))
 def g(self, z):
     return 1.0 * z * np.sqrt(math.pi) * math.erf(z) + np.exp(-(z**2))
def cdf(x, u, o):
    ''' u: mean
        o: standard deviation
    '''
    return (1 / 2) * (1 + erf((x - u) / (o * sqrt(2))))
Ejemplo n.º 59
0
def normal_cdf(x, mean=0, std_dev=1) -> float:
    ''' Returns the normal CDF (cumulative distribution function) of x
        given the mean and standard deviation.
    '''
    return 0.5 * (1 + math.erf((x - mean) / (std_dev * math.sqrt(2))))
Ejemplo n.º 60
0
 def norm_cdf(x):
     # Computes standard normal cumulative distribution function
     return (1. + math.erf(x / math.sqrt(2.))) / 2.