Esempio n. 1
0
def problem_3b():

    import numpy as np
    from scipy.integrate import quad as integrate
    from sympy.solvers import solve

    def imf_N(mass=0, alpha=2.35):
        if mass <= 1:
            return (1/mass)*np.exp(-(np.log10(mass) -
                np.log10(0.22))**2 / (2*0.57**2))
        elif mass > 1:
            return mass**-alpha

    alpha = 2.35

    coeff_high = 25. / integrate(imf_N, 20, 100, args=(alpha))[0]

    mass = 1
    coeff_low = coeff_high * mass**-alpha / \
                ((1/mass)*np.exp(-(np.log10(mass) -
                np.log10(0.22))**2 / (2*0.57**2)))

    def imf_M(mass=0, alpha=-2.35):
        if mass <= 1:
            return coeff_low*(mass/mass)*np.exp(-(np.log10(mass) -
                np.log10(0.22))**2 / (2*0.57**2))
        elif mass > 1:
            return coeff_high * mass**-alpha * mass

    mass = integrate(imf_M, 0.1, 100, args = (alpha))[0]

    print('Mass of cluster: %s' % mass)
def threshold_area(x, y, area_fraction=0.68):

    '''
    Finds the limits of a 1D array which includes a given fraction of the
    integrated data.

    Parameters
    ----------
    data : array-like
        1D array.
    area_fraction : float
        Fraction of area.

    Returns
    -------
    limits : tuple
        Lower and upper bound including fraction of area.

    '''

    import numpy as np
    from scipy.integrate import simps as integrate

    # Step for lowering threshold
    step = (np.max(y) - np.median(y)) / 1000.0

    # initial threshold
    threshold = np.max(y) - step
    threshold_area = 0.0

    # area under whole function
    area = integrate(y, x)

    # Stop when the area below the threshold is greater than the max area
    while threshold_area < area * area_fraction:

        threshold_indices = np.where(y > threshold)[0]

        try:
            bounds_indices = (threshold_indices[0], threshold_indices[-1])
        except IndexError:
            bounds_indices = ()

        try:
            threshold_area = integrate(y[bounds_indices[0]:bounds_indices[1]],
                                       x[bounds_indices[0]:bounds_indices[1]])
            threshold_area += threshold * (x[bounds_indices[1]] - \
                                           x[bounds_indices[0]])
        except IndexError:
            threshold_area = 0

        threshold -= step

    x_peak = x[y == y.max()][0]
    low_error, up_error = x_peak - x[bounds_indices[0]], \
                          x[bounds_indices[1]] - x_peak

    return (x_peak, low_error, up_error)
Esempio n. 3
0
def problem_3c():

    import numpy as np
    from scipy.integrate import quad as integrate


    # Compute Salpeter mass
    def imf_N(mass=0, alpha=2.35):
        return mass**-alpha

    alpha = 2.35

    coeff = 30. / integrate(imf_N, 20, 100, args=(alpha))[0]

    def imf_M(mass=0, alpha=-2.35):
        return mass**-alpha * mass

    mass_a = coeff*integrate(imf_M, 0.1, 100, args=(alpha))[0]


    # Compute Chabrier mass
    def imf_N(mass=0, alpha=2.35):
        if mass <= 1:
            return (1/mass)*np.exp(-(np.log10(mass) -
                np.log10(0.22))**2 / (2*0.57**2))
        elif mass > 1:
            return mass**-alpha
    alpha = 2.35

    coeff_high = 25. / integrate(imf_N, 20, 100, args=(alpha))[0]

    mass = 1
    coeff_low = coeff_high * mass**-alpha / \
                ((1/mass)*np.exp(-(np.log10(mass) -
                np.log10(0.22))**2 / (2*0.57**2)))

    def imf_M(mass=0, alpha=-2.35):
        if mass <= 1:
            return coeff_low*(mass/mass)*np.exp(-(np.log10(mass) -
                np.log10(0.22))**2 / (2*0.57**2))
        elif mass > 1:
            return coeff_high * mass**-alpha * mass

    mass_b = integrate(imf_M, 0.1, 100, args = (alpha))[0]


    # now calculate velocity dipsersion
    min_vel_disp = mass_a - mass_b

    print('Min velocity resolution: %s' % min_vel_disp)
Esempio n. 4
0
 def w(z):
     s, phi = z
     h = lambda u: Vf(np.maximum(G(x, phi), u)) * F.pdf(u)
     integral, err = integrate(h, a, b)
     q = pi(s) * integral + (1.0 - pi(s)) * Vf(G(x, phi))
     # == minus because we minimize == #
     return - x * (1.0 - phi - s) - beta * q
Esempio n. 5
0
    def gpc_make_prediction(self, tasks, x_star):
        task = tasks[0]
        y, f = self.Y[:,task], self.f
        num_tasks = len(tasks)
        # TODO: Revisar esta linea(puede fallar):
        I = np.matrix(np.eye(num_cols(self.X[task])))
        W = -self.sigmoid.hessian_log_likelihood(y, f)
        # TODO: Revisar las dos siguientes lineas:
        # self.K = self.cov_function.cov_matrix(self.hyperparameters, self.X[task])
        # K = self.K + eps*I
        K = self.cov_function.cov_matrix(self.hyperparameters, self.X[task])
        K = K + eps*I
        sqrt_W = np.sqrt(W)
        self.L = cholesky(I + sqrt_W*K*sqrt_W)
        L = self.L
        k_star = compute_k_star(self.cov_function.cov_function, self.hyperparameters, self.X[task], x_star)
        # f_mean = dot(k_star.T, self.sigmoid.gradient_log_likelihood(y, f))
        f_mean = (k_star.T)*(self.sigmoid.gradient_log_likelihood(y, f))
        v = backslash(L, sqrt_W*k_star)
        k_star_star = compute_k_star(self.cov_function.cov_function, self.hyperparameters, x_star, x_star)
        f_var = k_star_star - np.dot(v.T, v)

        # TODO:
        # esto se puede hacer mejor
        def aux_fun(z, i):
            return self.sigmoid.evaluate(z)*scipy.stats.norm(f_mean[i,0], f_var[i,i]).pdf(z)

        # pi_star = integrate(aux_fun, -np.inf, np.inf)
        self.pi_star = []
        for i in range(num_cols(x_star)):
            self.pi_star.append(integrate(aux_fun, -np.inf, np.inf, args=(i,))[0])
        return self.pi_star
        """def aux_fun(f_i):
Esempio n. 6
0
def problem_3a():

    from scipy.integrate import quad as integrate

    def imf_N(mass=0, alpha=2.35):
        return mass**-alpha

    alpha = 2.35

    coeff = 25. / integrate(imf_N, 20, 100, args=(alpha))[0]

    def imf_M(mass=0, alpha=-2.35):
        return mass**-alpha * mass

    mass = coeff*integrate(imf_M, 0.1, 100, args=(alpha))[0]

    print('Mass of cluster: %s' % mass)
Esempio n. 7
0
File: BHM.py Progetto: facom/BHMcalc
def AverageFlux(d,**args):
    args['D']=d
    intFlux=lambda x:Flux(x,**args)
    #print args
    F=integrate(intFlux,0.0,2*PI)[0]/(2*PI)
    #print F
    #exit(0)
    return F
Esempio n. 8
0
def ReadFiniteRadiusWaveform(n, filename, WaveformName, ChMass, InitialAdmEnergy, YLMRegex, LModes, DataType, Ws) :
    """
    This is just a worker function defined for ReadFiniteRadiusData,
    below, reading a single waveform from an h5 file of many
    waveforms.  You probably don't need to call this directly.

    """
    from scipy.integrate import cumtrapz as integrate
    from numpy import setdiff1d, empty, delete, sqrt, log, array
    from h5py import File
    import GWFrames
    try :
        f = File(filename, 'r')
    except IOError :
        print("ReadFiniteRadiusWaveform could not open the file '{0}'".format(filename))
        raise
    try :
        W = f[WaveformName]
        NTimes_Input = W['AverageLapse.dat'].shape[0]
        T = W['AverageLapse.dat'][:,0]
        Indices = MonotonicIndices(T)
        T = T[Indices]
        Radii = array(W['ArealRadius.dat'])[Indices,1]
        AverageLapse = array(W['AverageLapse.dat'])[Indices,1]
        CoordRadius = W['CoordRadius.dat'][0,1]
        YLMdata = [DataSet for DataSet in list(W) for m in [YLMRegex.search(DataSet)] if (m and int(m.group('L')) in LModes)]
        YLMdata = sorted(YLMdata, key=lambda DataSet : [int(YLMRegex.search(DataSet).group('L')), int(YLMRegex.search(DataSet).group('M'))])
        LM = sorted([[int(m.group('L')), int(m.group('M'))] for DataSet in YLMdata for m in [YLMRegex.search(DataSet)] if m])
        NModes = len(LM)
        # Lapse is given by 1/sqrt(-g^{00}), where g is the full 4-metric
        T[1:] = integrate(AverageLapse/sqrt(((-2.0*InitialAdmEnergy)/Radii) + 1.0), T) + T[0]
        T -= (Radii + (2.0*InitialAdmEnergy)*log((Radii/(2.0*InitialAdmEnergy))-1.0))
        Ws[n].SetTime(T/ChMass)
        # WRONG!!!: # Radii /= ChMass
        NTimes = Ws[n].NTimes()
        # Ws[n].SetFrame is not done, because we assume the inertial frame
        Ws[n].SetFrameType(GWFrames.Inertial) # Assumption! (but this should be safe)
        Ws[n].SetDataType(DataType)
        Ws[n].SetRIsScaledOut(True) # Assumption! (but it should be safe)
        Ws[n].SetMIsScaledOut(True) # We have made this true
        Ws[n].SetLM(LM)
        Data = empty((NModes, NTimes), dtype='complex')
        if(DataType == GWFrames.h) :
            UnitScaleFactor = 1.0 / ChMass
        elif(DataType == GWFrames.hdot) :
            UnitScaleFactor = 1.0
        elif(DataType == GWFrames.Psi4) :
            UnitScaleFactor = ChMass
        else :
            raise ValueError('DataType "{0}" is unknown.'.format(DataType))
        RadiusRatio = Radii / CoordRadius
        for m,DataSet in enumerate(YLMdata) :
            modedata = array(W[DataSet])
            Data[m,:] = (modedata[Indices,1] + 1j*modedata[Indices,2]) * RadiusRatio * UnitScaleFactor
        Ws[n].SetData(Data)
    finally :
        f.close()
    return Radii/ChMass
def GL_coupling_coefficient(r, w_0, a, p):
	#Assumes a corrugated horn waveguide, where the E-field at the horn aperture is known analytically (HE_11 mode).
	#With that, used Goldsmith equation 7.34 to calculate coefficients.
	x = 2.*r**2./w_0**2.
	s = w_0/a
	t = 1.7005
	integral = integrate(integrand,0.,2./s**2., args=(s,t,p))[0]
	c_p = 1.362*s*integral
	return c_p
Esempio n. 10
0
 def w(z):  # z = (s, phi)
     """
     Objective function, corresponding to the right-hand side of the
     Bellman equation. Negated because we will minimize.
     """
     s, phi = z
     integrand = lambda u: Vf(np.maximum(G(x, phi), u)) * F.pdf(u)
     integral, err = integrate(integrand, a, b)
     q = pi(s) * integral + (1 - pi(s)) * Vf(G(x, phi))
     return - x * (1 - phi - s) - beta * q
Esempio n. 11
0
def calc_avg_N_photons(mass_limits=(18,100), Gamma=-1.35):

    ''' Calculates number of photons produced in a stars lifetime in a given
    mass range of the IMF.

    Parameters
    ----------
    mass_limits : tuple
        (lower limit, upper limit) in M_sun.
    Gamma : float
        Gamma in IMF

    Returns
    -------
    N_photons_avg : float
        Number of photons produced in a stars lifetime in a given
        mass range of the IMF.

    '''

    # import external modules
    import numpy as np
    from scipy.integrate import quad as integrate

    def integrand(mass=0, Gamma=Gamma):
        return calc_N_photons(mass) * calc_star_lifetime(mass)\
                * calc_N_stars(mass=mass, Gamma=Gamma)

    N_photons_total = integrate(integrand,
            mass_limits[0], mass_limits[1],
            args=(Gamma))[0]

    N_stars = integrate(calc_N_stars,
            mass_limits[0], mass_limits[1],
            args=(Gamma))[0]

    N_photons_avg = N_photons_total / N_stars * 365 * 24 * 3600

    return N_photons_avg
def findArcLength(fromPoint,toPoint,definePt):
    # Rename the parameters given for brevity
    p0,p1,p2 = fromPoint, toPoint, definePt
    # Get the parameters that describe the parabola (ax^2+bx+c)
    (a,b,c) = findParabola(p0,p1,p2)
    # Import basic integrating tool
    from scipy.integrate import quad as integrate
    # Function for the arc length integrand: Sqrt(1+[y']^2)
    from numpy import sqrt
    func = lambda x : sqrt( 1 + (2*a*x + b) * (2*a*x + b) )
    # Integrate to find arc length
    arcLength = integrate(func,p0.x,p1.x)[0]
    return arcLength
Esempio n. 13
0
File: Ewald.py Progetto: etano/pagen
    def CalcXk(self, v_r_spline, r_cut, k, r_max):
        # Tolerances
        abs_tol = 1.e-11
        rel_tol = 1.e-11

        # Integrand
        v_integrand = lambda r: r*sin(k*r)*v_r_spline(r)

        # Calculate x_k
        r_first = r_cut + ((pi/k)-(r_cut % (pi/k)))
        if (self.n_d == 2): # FIXME: This probably isn't right
            prefactor = -2.*pi
        elif (self.n_d == 3):
            prefactor = -4.*pi/k
        x_k = 0.
        if (r_max >= r_first):
            # First segment
            x_k += prefactor * integrate(v_integrand, r_cut, r_first, divmax=20)

            # Other segments
            if (int(k) != 0):
                n_pi_k = int(k)*pi/k # TODO: Manually fixed number currently
            else:
                n_pi_k = pi/k
            n_seg = max(int(floor((r_max-r_first)/n_pi_k)),1)
            for i in range(n_seg):
                x_k += prefactor * integrate(v_integrand, r_first+i*n_pi_k, r_first+(i+1)*n_pi_k, divmax=20)
            r_end = r_first + n_seg*n_pi_k
        elif (r_max >= r_cut):
            x_k += prefactor * integrate(v_integrand, r_cut, r_max, divmax=20)
            r_end = r_max
        else:
            r_end = r_cut

        # Add in analytic part after r_end
        x_k += self.CalcXkCoul(k,r_end)

        return x_k
Esempio n. 14
0
def calc_sfr_coeff(mass_limits_ionizing=(18,100), mass_limits_total=(0.1,100),
        Gamma=-1.35, N_photons_avg=2.52e63):

    ''' Calculates coefficient to convert Halpha luminosity into a SFR.

    Parameters
    ----------
    mass_limits_ionizing : tuple
        (lower limit, upper limit) in M_sun of stars creating Halpha.
    mass_limits_total : tuple
        (lower limit, upper limit) in M_sun of all stars.
    Gamma : float
        Gamma in IMF.
    N_photons_avg : float
        Average number of photons per star emitted in one year.

    Returns
    -------
    sfr_coeff : float
        SFR (M_sun) = sfr_coeff * Halpha luminosity

    '''

    # import external modules
    import numpy as np
    from scipy.integrate import quad as integrate


    N_stars_ionizing = integrate(calc_N_stars,
            mass_limits_ionizing[0], mass_limits_ionizing[1],
            args=(Gamma))[0]

    print('N %s' % N_stars_ionizing)

    # Number of ionizing photons per second by massive stars
    efficiency = 2/3.
    sec2yr = 365 * 24 * 3600.
    n = 7.37e11 * efficiency / N_photons_avg * sec2yr

    sfr_coeff = n \
            / (mass_limits_ionizing[1]**Gamma - \
                    mass_limits_ionizing[0]**Gamma) \
            * (Gamma / (Gamma + 1)) \
            * (mass_limits_total[1]**(Gamma+1) - \
                    mass_limits_total[0]**(Gamma+1))

    return sfr_coeff
Esempio n. 15
0
def probdensity(func, x, x0=0, scale=True):
    """
    Probability Density Function.

    This function uses an integral to compute the probability
    density of a given function.

    Parameters
    ----------
    func:   function
            The function for which to calculate the PDF
    x:      numpy.ndarray
            The (array of) value(s) at which to calculate
            the PDF
    x0:     float, optional
            The lower-bound of the integral, starting point
            for the PDF to be calculated over, default=0
    scale:  bool, optional
            The scaling to be applied to the output,
            default=True

    Returns
    -------
    sumx:   numpy.ndarray
            The (array of) value(s) computed as the PDF at
            point(s) x
    """
    sumx = _np.array([])
    try:
        lx = len(x)  # Find length of Input
    except:
        lx = 1  # Length 1
        x = [x]  # Pack into list
    # Recursively Find Probability Density
    for i in range(lx):
        sumx = _np.append(sumx, integrate(func, x0, x[i])[0])
    # Return only the 0-th value if there's only 1 value available
    if (len(sumx) == 1):
        sumx = sumx[0]
    else:
        if (scale == True):
            mx = sumx.max()
            sumx /= mx
        elif (scale != False):
            sumx /= scale
    return (sumx)
Esempio n. 16
0
def main(argv):
    """
    Generates random data using specified parameters, integrates and saves plots as PDFs
    """

    ### Integration
    t = sc.linspace(0, 50, 1000)
    R0 = 10
    C0 = 5
    RC0 = sc.array([R0, C0])  #pops input
    pops, infodict = integrate(dCR_dt, RC0, t, full_output=True)

    ### Plotting pop density over time
    f1 = p.figure()
    p.plot(t, pops[:, 0], 'g-', label='Resource density')
    p.plot(t, pops[:, 1], 'b-', label='Consumer density')
    p.grid()
    p.legend(loc='best')
    p.xlabel('Time')
    p.ylabel('Population density')
    p.title('Consumer-Resource population dynamics')
    if len(sys.argv) == 5:
        p.text(
            5 / 9 * max(t), 6 / 7 * max(pops[:, 0:1]), "r=" + sys.argv[1] +
            ", a=" + sys.argv[2] + ", z=" + sys.argv[3] + ", e=" + sys.argv[4])
    else:
        p.text(5 / 9 * max(t), 6 / 7 * max(pops[:, 0:1]),
               "r=1.0, a=0.1, z=1.5, e=0.75")
    f1.savefig('../results/LV_model2.pdf')
    print("Final predator and prey populations are",
          round(pops[len(t) - 1, 1], 2), "and", round(pops[len(t) - 1, 0], 2),
          "respectively.")

    ### plotting Comsumer density by resource density
    f2 = p.figure()
    p.plot(pops[:, 0], pops[:, 1], 'r-', label='Resource density')  # Plot
    p.grid()
    p.xlabel('Resource density')
    p.ylabel('Consumer density')
    p.title('Consumer-Resource population dynamics')
    #p.show()# To display the figure
    f2.savefig('../results/LV_model2-1.pdf')
    p.close('all')

    return None
Esempio n. 17
0
def gausdist(x, mu=0, sigma=1):
    """
    Gaussian Distribution Function.

    This function is designed to calculate the generic
    distribution of a gaussian function with controls
    for mu and sigma.

    Parameters
    ----------
    x:      numpy.ndarray
            The input (array) x
    mu:     float, optional
            Optional control argument, default=0
    sigma:  float, optional
            Optional control argument, default=1

    Returns
    -------
    F:      numpy.ndarray
            Computed distribution of the gausian function at the
            points specified by (array) x
    """

    # Define Integrand
    def integrand(sq):
        return (_np.exp(-sq**2 / 2))

    try:
        lx = len(x)  # Find length of Input
    except:
        lx = 1  # Length 1
        x = [x]  # Pack into list
    F = _np.zeros(lx, dtype=_np.float64)
    for i in range(lx):
        x_tmp = x[i]
        # Evaluate X (altered by mu and sigma)
        X = (x_tmp - mu) / sigma
        integral = integrate(integrand, _np.NINF, X)  # Integrate
        result = 1 / _np.sqrt(2 * _np.pi) * integral[0]  # Evaluate Result
        F[i] = result
    # Return only the 0-th value if there's only 1 value available
    if (len(F) == 1):
        F = F[0]
    return (F)
Esempio n. 18
0
    def _rho_s_eqn(self, rho_s):
        """
        Returns the value of the rho_s equation (25) with all terms on the LHS,
        used to find rho_s via root-finding.

        Args:
            rho_s : float
                The resource utilization level such that phi_ivp = pbar;
                parameter we search for via root-finding
        """
        lb = self.s
        ub = self.alpha_min * rho_s
        int_val = integrate(lambda nu: (nu**(self.s - 1)) * np.exp(-nu), lb,
                            ub)
        rat1 = (self.s**self.s) / np.exp(self.s)
        rat2 = self.pbar * (self.s**self.s) / (self.c_ub *
                                               np.exp(self.alpha_min * rho_s))
        val = int_val - (rat1 - rat2)
        return val
Esempio n. 19
0
    def _u_cdt_huc1_eqn(self, u_cdt):
        """
        Returns the value of the HUC1 equation with all terms on the LHS, used
        to find u_cdt via root-finding.

        Args:
            u_cdt : float
                The critical dividing threshold; parameter we search for via
                root-finding
        """
        lb = u_cdt * self.alpha_min
        ub = self.alpha_min
        int_val = integrate(lambda nu: (nu**(self.s - 1)) * np.exp(-nu), lb,
                            ub)
        rat1 = (self.s**self.s) / np.exp(u_cdt * self.alpha_min)
        rat2 = self.pbar * (self.s**
                            self.s) / (self.c_ub * np.exp(self.alpha_min))
        val = int_val - (rat1 - rat2)
        return val
def rmse_of_non_central_chi_squared_polynomial_approximations():
    lambdas = [1, 5, 10, 50, 100, 200]
    nus = [1, 5, 10, 50, 100]
    poly_orders = [1, 3, 5]
    n_intervals = 16
    results = {
        poly_order: {nu: {}
                     for nu in nus}
        for poly_order in poly_orders
    }
    for poly_order in poly_orders:
        for nu in nus:
            ncx2_approx = construct_inverse_non_central_chi_squared_interpolated_polynomial_approximation(
                dof=nu,
                n_intervals=n_intervals + 1,
                polynomial_order=poly_order)
            discontinuities = sorted(
                [0.5**(i + 2) for i in range(n_intervals)] + [0.5] +
                [1.0 - 0.5**(i + 2) for i in range(n_intervals)])
            for l in lambdas:
                rmse = integrate(lambda u: (ncx2.ppf(
                    u, df=nu, nc=l) - ncx2_approx(u, non_centrality=l))**2,
                                 0,
                                 1,
                                 points=discontinuities,
                                 limit=50 + 10 * len(discontinuities))[0]**0.5
                results[poly_order][nu][l] = rmse

    for poly_order, result in results.items():
        df = pd.DataFrame(result)
        df.index = df.index.rename('lambda')
        df.columns = df.columns.rename('nu')
        print(poly_order, df.min().min(), df.max().max())
        print(round(df, 3))
        print('\n')
        print(
            round(df,
                  3).apply(lambda x: ' & '.join([str(i)
                                                 for i in list(x)]) + r' \\',
                           axis=1))
        print('\n' * 3)
Esempio n. 21
0
def funcrms(func, T):
    """
    Root-Mean-Square (RMS) Evaluator for Callable Functions.

    Integral-based RMS calculator, evaluates the RMS value
    of a repetative signal (f) given the signal's specific
    period (T)

    Parameters
    ----------
    func:   float
            The periodic function, a callable like f(t)
    T:      float
            The period of the function f, so that f(0)==f(T)

    Returns
    -------
    RMS:    The RMS value of the function (f) over the interval ( 0, T )
    """
    fn = lambda x: func(x)**2
    integral, _ = integrate(fn, 0, T)
    return _np.sqrt(1 / T * integral)
Esempio n. 22
0
def prob1b():

    import numpy as np
    import pandas as pd
    from scipy.integrate import simps as integrate
    from scipy.integrate import cumtrapz

    print('\n---------------')
    print('1b')
    print('---------------\n')

    df = pd.DataFrame.from_csv('cluster_data.csv', index_col=None)

    Ms = df['Mass [Msun]']
    Ls = df['Luminosity [Lsun]']
    Ts = df['Teff [K]']

    A = 5.297e3
    Gamma = 1.3
    phi = lambda m: A * m**-(Gamma + 1.0)
    N_m = phi(Ms)  #* tau_ms(masses)

    L_bol = integrate(N_m * Ls, Ms)

    print('\nBolometric luminosity = {0:e} Lsun'.format(L_bol))

    L_cdf = cumtrapz(N_m * Ls, Ms)

    M_half = np.interp(0.5 * L_bol, L_cdf, Ms[1:])

    if 0:
        import matplotlib.pyplot as plt
        plt.close()
        plt.clf()
        plt.plot(Ms[1:], L_cdf / L_cdf.max())
        plt.show()

    print('\nHalf light mass = {0:.2f} Msun'.format(M_half))
Esempio n. 23
0
def plotError(f, bounds, n):

    # calculate the exact definite integral
    exact = integrate(f, min(bounds), max(bounds))[0]

    # create an array of the exact value to plot
    exact = [exact for i in range(1, n)]

    # generate an array of approximate integrals with increasing sample size
    approx = [integrate_mc(f, bounds, i) for i in range(1, n)]

    # plot the approximate and exact results
    plt.plot([i for i in range(1, n)], approx)
    plt.plot([i for i in range(1, n)], exact)

    # plot details
    plt.legend(['Monte Carlo', 'Exact Solution'])
    plt.title('Integral Approximation of 2*x^2 + 3*x + 1 from 0 to 10')
    plt.xlabel('Number of estimations')
    plt.ylabel('Area Under the Curve')

    # show the plot
    plt.show()
Esempio n. 24
0
def main(argv):
    """
    Generates random data using specified parameters, integrates and saves plots as PDFs
    """
    ### Integration
    t = sc.linspace(0, 15, 1000) #from 0 to 15 (units not relevent in this eg), 1000 subdivisions
    R0 = 10
    C0 = 5
    RC0 = sc.array([R0, C0]) #pops input
    pops, infodict = integrate(dCR_dt, RC0, t, full_output=True)

    ### Plotting pop density over time
    f1 = p.figure() #open empty fihure object
    p.plot(t, pops[:,0], 'g-', label='Resource density') # Plot
    p.plot(t, pops[:,1]  , 'b-', label='Consumer density')
    p.grid()
    p.legend(loc='best')
    p.xlabel('Time')
    p.ylabel('Population density')
    p.title('Consumer-Resource population dynamics')
    f1.savefig('../results/LV_model1.pdf')
    
    ### Plotting Comsumer density by resource density
    f2 = p.figure()  # open empty fihure object
    p.plot(pops[:, 0], pops[:, 1], 'r-', label='Resource density')  # Plot
    p.grid()
    p.xlabel('Resource density')
    p.ylabel('Consumer density')
    p.title('Consumer-Resource population dynamics')
    #p.show()# To display the figures
    f2.savefig('../results/LV_model1-1.pdf')
    p.close('all')
    # Save and clear figure - prevents accumalation

    
    
    return None
Esempio n. 25
0
    def gpc_make_prediction(self, tasks, x_star):
        task = tasks[0]
        y, f = self.Y[:, task], self.f
        num_tasks = len(tasks)
        # TODO: Revisar esta linea(puede fallar):
        I = np.matrix(np.eye(num_cols(self.X[task])))
        W = -self.sigmoid.hessian_log_likelihood(y, f)
        # TODO: Revisar las dos siguientes lineas:
        # self.K = self.cov_function.cov_matrix(self.hyperparameters, self.X[task])
        # K = self.K + eps*I
        K = self.cov_function.cov_matrix(self.hyperparameters, self.X[task])
        K = K + eps * I
        sqrt_W = np.sqrt(W)
        self.L = cholesky(I + sqrt_W * K * sqrt_W)
        L = self.L
        k_star = compute_k_star(self.cov_function.cov_function,
                                self.hyperparameters, self.X[task], x_star)
        # f_mean = dot(k_star.T, self.sigmoid.gradient_log_likelihood(y, f))
        f_mean = (k_star.T) * (self.sigmoid.gradient_log_likelihood(y, f))
        v = backslash(L, sqrt_W * k_star)
        k_star_star = compute_k_star(self.cov_function.cov_function,
                                     self.hyperparameters, x_star, x_star)
        f_var = k_star_star - np.dot(v.T, v)

        # TODO:
        # esto se puede hacer mejor
        def aux_fun(z, i):
            return self.sigmoid.evaluate(z) * scipy.stats.norm(
                f_mean[i, 0], f_var[i, i]).pdf(z)

        # pi_star = integrate(aux_fun, -np.inf, np.inf)
        self.pi_star = []
        for i in range(num_cols(x_star)):
            self.pi_star.append(
                integrate(aux_fun, -np.inf, np.inf, args=(i, ))[0])
        return self.pi_star
        """def aux_fun(f_i):
Esempio n. 26
0
def rms(f, T):
    """
    rms Function
    
    Integral-based RMS calculator, evaluates the RMS value
    of a repetative signal (f) given the signal's specific
    period (T)

    Parameters
    ----------
    f:      float
            The periodic function, a callable like f(t)
    T:      float
            The period of the function f, so that f(0)==f(T)

    Returns
    -------
    RMS:    The RMS value of the function (f) over the interval ( 0, T )

    """
    fn = lambda x: f(x)**2
    integral = integrate(fn,0,T)
    RMS = np.sqrt(1/T*integral)
    return(RMS)
Esempio n. 27
0
File: BHM.py Progetto: facom/BHMcalc
def planckPower(lamb1,lamb2,T):
    R,dR=integrate(planckDistrib,lamb1,lamb2,args=(T,))
    return R
Esempio n. 28
0
from scipy.integrate import quadrature as integrate


def phi_l(xl, xr):

    return lambda x: (xr - x) / (xr - xl)


def phi_r(xl, xr):

    return lambda x: (x - xl) / (xr - xl)


def dphi_l(xl, xr):

    return lambda x: -1 / (xr - xl)


def dphi_r(xl, xr):

    return lambda x: 1 / (xr - xl)


l = phi_l(0, 1)
r = phi_r(0, 1)
dl = dphi_l(0, 1)
dr = dphi_r(0, 1)

print(integrate(lambda x: dr(x) * dl(x), 0, 1))
Esempio n. 29
0
def _false_negative_probability(threshold, b, r):
    _probability = lambda s : 1 - (1 - (1 - s**float(r))**float(b))
    a, err = integrate(_probability, threshold, 1.0)
    return a
def main():

    import numpy as np
    import numpy
    from scipy.integrate import quad as integrate

    # Parameters in script
    # --------------------------------------------------------------------------
    clouds = ('taurus', 'perseus',)# 'california')
    dec_ranges = ((35, 20), (35, 24), (44, 35))
    ra_ranges = ((5.5, 3.9), (4, 3), (70, 40))
    tspin_threshold = 200.0

    # Data locations in script
    # --------------------------------------------------------------------------
    filedir = '/home/ezbc/research/data/taurus/python_output/'

    # Read the data
    source_table = read_data(filedir + 'taurus_stanimirovic14_sources.txt')
    param_table = read_data(filedir + 'taurus_stanimirovic14_temps.txt')

    # Extract headers and data
    source_cols = source_table.colnames
    source_data = np.asarray(source_table._data)
    param_cols = param_table.colnames
    param_data = np.asarray(param_table._data)

    # Extract source names and RA / Dec
    source_decs = np.empty(source_data.shape)
    source_ras = np.empty(source_data.shape)
    source_names = ['' for x in range(source_data.shape[0])]
    source_names = np.empty(source_data.shape, dtype=object)

    for i, source in enumerate(source_data):
        source_dec = source[source_cols.index('Decl. (J2000)')]
        source_decs[i] = float(source_dec.split(':')[0])
        source_ra = source[source_cols.index('R.A. (J2000)')]
        source_ras[i] = float(source_ra.split(':')[0])
        source_names[i] = source[source_cols.index('Source')]

    # Find T_cnm temperatures for each cloud
    cloud_counts_list = []
    cloud_bins_list = []
    cloud_t_cnm_lists = []
    weights_list = []

    for i, cloud in enumerate(clouds):
    	dec_range = dec_ranges[i]
    	ra_range = ra_ranges[i]

        # Choose only sources within RA and Dec range
        indices = np.where((source_decs >= dec_range[1]) &\
                           (source_decs <= dec_range[0]) &\
                           (source_ras >= ra_range[1]) &\
                           (source_ras <= ra_range[0])
                           )

        cloud_decs = source_decs[indices]
        cloud_ras = source_ras[indices]
        cloud_sources = source_names[indices]

        # Get the spin temperatures of chosen sources
        t_cnm_list = []
        cloud_t_cnm_list = []
        source_list = []
        weights = []

        for j in xrange(len(param_data)):
            tspin = param_data[j][param_cols.index('Ts')]
            tau_error = param_data[j][param_cols.index('e_Ts')]
            if param_data[j][param_cols.index('Source')] in cloud_sources:
                if tspin < tspin_threshold and tau_error != 0.0:
                    cloud_t_cnm_list.append(tspin)

                    Tpeak = param_data[j][param_cols.index('TB')]
                    DelV = param_data[j][param_cols.index('DelV')]
                    VLSR = param_data[j][param_cols.index('VLSR')]

                    T_integ = integrate(gaussian,
                                        -1000,
                                        1000,
                                        args=(Tpeak,
                                              DelV/2.0,
                                              VLSR))

                    weights.append(T_integ[0])

            if tspin < tspin_threshold:
                t_cnm_list.append(tspin)

        if cloud=='perseus':
        	bins=3
        else:
        	bins=8

        cloud_counts, cloud_bins = np.histogram(cloud_t_cnm_list, bins=bins)
        cloud_counts = cloud_counts / np.sum(cloud_counts, dtype=numpy.float)
        cloud_counts = np.append(cloud_counts, 0)

        cloud_counts_list.append(cloud_counts)
        cloud_bins_list.append(cloud_bins)
        cloud_t_cnm_lists.append(cloud_t_cnm_list)
        weights_list.append(weights)

        print cloud, np.median(cloud_t_cnm_list), 'Median CNM temp [K]'
        print cloud, np.average(cloud_t_cnm_list,
                                weights=weights), 'Intensity-weighted mean CNM temp [K]'
Esempio n. 31
0
def _false_positive_probability(threshold, b, r):
    def _probability(s):
        return 1 - (1 - s**float(r))**float(b)
    a, err = integrate(_probability, 0.0, threshold)
    return a
Esempio n. 32
0
def f(s,t):
    x,y,z = s[:N]
    r = np.sqrt(x*x+y*y+z*z)
    v = s[N:]
    V = np.sqrt(np.sum(v*v))
    b = V/(np.sqrt(2.0)*sigma)
    fx,fy,fz = -K*rho(r)*v*(erf(b)-2.0*b*np.exp(-0.5*b)/np.sqrt(np.pi))/(V*V*V)
    return np.array([v[0],v[1],v[2],(M(r)*Fx(x,y,z)+fx),(M(r)*Fy(x,y,z)+fy),(M(r)*Fz(x,y,z)+fz)])


# Initial conditions
vlct = np.array([200,300,400])*1.022
t = np.linspace(0,1000.0,n_iter)
for vel in vlct:
    print vel
    s_0 = np.array([1,0,0,0,0,vel]) #[x,y,z,v_x,v_y,v_z]
    s = integrate(f,s_0,t)
    #E = 0.5*m*np.sum(v*v,axis=1)+2.0*np.pi*G*rho_0*np.log(x[:,0]*x[:,0]+x[:,1]*x[:,1]+x[:,2]*x[:,2])
    x,v = s[:,:N],s[:,N:]
    pylab.plot(t*1E6,np.sqrt(x[:,0]*x[:,0]+x[:,1]*x[:,1]+x[:,2]*x[:,2]),label='$\mathrm{'+str(vel)+'\ Pc/Myr}$')

pylab.legend(loc=2)
pylab.xlabel('$\mathrm{Time\ (Years)}$',fontsize=16)
pylab.ylabel('$\mathrm{Radial\ Distance\ (Pc)}$',fontsize=16)
pylab.xscale('log')
pylab.yscale('log')
pylab.ylim(ymin=3)
pylab.xlim(xmin=2E4)
pylab.savefig('r.png',dpi=200)
pylab.close()
Esempio n. 33
0
        return 4*np.pi*(m_tot-m)*rho(x,y,z)*G*G*m*m*(v_s*v_s+u*u+v*v+w*w)**-1.5
    return 0

def D(s,t):
    x,y,z = s[:3]
    u,v,w = s[3:-1]
    m = s[-1]
    dm = mdot(x,y,z,u,v,w,m)
    a_x = (Hx(x,y,z)+(m_tot-m)*Gx(x,y,z)-dm*u)/m
    a_y = (Hy(x,y,z)+(m_tot-m)*Gy(x,y,z)-dm*v)/m
    a_z = (Hz(x,y,z)+(m_tot-m)*Gz(x,y,z)-dm*w)/m
    return np.array([u,v,w,a_x,a_y,a_z,dm])

t = np.linspace(0,10000,n_iter)
s_0 = np.array([1,0,0,200/np.sqrt(3),200/np.sqrt(3),200/np.sqrt(3),m_0]) #[x,y,z,u,v,w,m]
s = integrate(D,s_0,t)
x,y,z = np.transpose(s[:,:3])
u,v,w = np.transpose(s[:,3:-1])
m = np.transpose(s[:,-1])
r = (x*x+y*y+z*z)**0.5

pylab.plot(t,r)
pylab.xlabel('$t$')
pylab.ylabel('$r(t)$')
pylab.savefig('r.png',dpi=200)
pylab.show()

r = 1.5e4
theta = np.linspace(0,2*np.pi,100)

pylab.plot(x,z)
Esempio n. 34
0
 def g(self, v):
     integrand = lambda vPrime: self.B(vPrime) / self.integralRadical(
         v, vPrime)
     return integrate(integrand, -0.5, v - self.delta)[0] + (
         self.B(v) * self.correctionFactor(v))
Esempio n. 35
0
def main():

    import numpy as np
    import numpy
    from scipy.integrate import quad as integrate

    # Parameters in script
    # --------------------------------------------------------------------------
    clouds = (
        'taurus',
        'perseus',
    )  # 'california')
    dec_ranges = ((35, 20), (35, 24), (44, 35))
    ra_ranges = ((80, 60), (60, 40), (70, 40))
    tspin_threshold = 200.0

    # Data locations in script
    # --------------------------------------------------------------------------
    filedir = '/d/bip3/ezbc/multicloud/data/cnm_data/heiles03/'

    # Read the data
    source_table = read_data(filedir + 'heiles03_sources.tsv')
    param_table = read_data(filedir + 'heiles03_fit_params.tsv')

    # Extract headers and data
    source_cols = source_table.colnames
    source_data = np.asarray(source_table._data)
    param_cols = param_table.colnames
    param_data = np.asarray(param_table._data)

    # Extract source names and RA / Dec
    source_decs = np.empty(source_data.shape)
    source_ras = np.empty(source_data.shape)
    source_names = ['' for x in range(source_data.shape[0])]
    source_names = np.empty(source_data.shape, dtype=object)

    for i, source in enumerate(source_data):
        source_decs[i] = source[source_cols.index('_DE.icrs')]
        source_ras[i] = source[source_cols.index('_RA.icrs')]
        source_names[i] = source[source_cols.index('Name')]

    # Find T_cnm temperatures for each cloud
    cloud_counts_list = []
    cloud_bins_list = []
    cloud_t_cnm_lists = []
    weights_list = []

    for i, cloud in enumerate(clouds):
        dec_range = dec_ranges[i]
        ra_range = ra_ranges[i]
        # Choose only sources within RA and Dec range
        indices = np.where((source_decs > dec_range[1]) &\
                           (source_decs < dec_range[0]) &\
                           (source_ras > ra_range[1]) &\
                           (source_ras < ra_range[0])
                           )[0]

        cloud_decs = source_decs[indices]
        cloud_ras = source_ras[indices]
        cloud_sources = source_names[indices]

        # Get the spin temperatures of chosen sources
        t_cnm_list = []
        cloud_t_cnm_list = []
        source_list = []
        weights = []

        for j in xrange(len(param_data)):

            tspin = param_data[j][param_cols.index('Tspin')]
            tau_error = param_data[j][param_cols.index('e_tau')]
            if param_data[j][param_cols.index('Name')] in cloud_sources:
                if tspin < tspin_threshold and tau_error != 0.0:
                    cloud_t_cnm_list.append(tspin)

                    Tpeak = param_data[j][param_cols.index('Tpeak')]
                    DelV = param_data[j][param_cols.index('DelV')]
                    VLSR = param_data[j][param_cols.index('VLSR')]

                    T_integ = integrate(gaussian,
                                        -1000,
                                        1000,
                                        args=(Tpeak, DelV / 2.0, VLSR))

                    weights.append(T_integ[0])

            if tspin < tspin_threshold:
                t_cnm_list.append(tspin)

        if cloud == 'perseus':
            bins = 3
        else:
            bins = 8

        cloud_counts, cloud_bins = np.histogram(cloud_t_cnm_list, bins=bins)
        cloud_counts = cloud_counts / np.sum(cloud_counts, dtype=numpy.float)
        cloud_counts = np.append(cloud_counts, 0)

        cloud_counts_list.append(cloud_counts)
        cloud_bins_list.append(cloud_bins)
        cloud_t_cnm_lists.append(cloud_t_cnm_list)
        weights_list.append(weights)

        print cloud, np.median(cloud_t_cnm_list), 'Median CNM temp [K]'
        print cloud, np.average(
            cloud_t_cnm_list,
            weights=weights), 'Intensity-weighted mean CNM temp [K]'

    print weights, cloud_t_cnm_list

    global_counts, global_bins = np.histogram(t_cnm_list, bins=20)
    global_counts = global_counts / np.sum(global_counts, dtype=numpy.float)
    global_counts = np.append(global_counts, 0)

    # Plot the spin temperatures
    plot_spin_temps(cloud_counts_list, cloud_bins_list=cloud_bins_list,
            global_spin_temps=global_counts, global_bins=global_bins,
            clouds=clouds,
            filename=\
                '/d/bip3/ezbc/multicloud/figures/heiles03_spin_temp_hist.png')
Esempio n. 36
0
import matplotlib.pyplot as plt
from math import e
from numpy import array,sin,cos,exp
from numpy import arange
from sympy import *
from scipy.integrate import quad
import scipy.integrate.odeint as integrate
x = Symbol('x')

y1 = cos(x)
y2 = sin(x)
gx = 1 + tan(x)
dy1 = y1.diff(x)
dy2 = y2.diff(x)

W = y1*dy2 - y2*dy1
W = simplify(W)

du1 = (y2*gx)/W

print du1
u1 = integrate(du1)
##func = lambda du1 : du1
##u1 = quad(func)

print u1
Esempio n. 37
0
 def w(z):  
     s, phi = z
     integrand = lambda u: Vf(np.maximum(G(x, phi), u)) * F.pdf(u)
     integral, err = integrate(integrand, a, b)
     q = pi(s) * integral + (1 - pi(s)) * Vf(G(x, phi))
     return - x * (1 - phi - s) - beta * q  # minus because we minimize
Esempio n. 38
0
File: Ewald.py Progetto: etano/pagen
    def OptimizedBreakup(self):
        print 'Performing optimized Ewald breakup...'

        # Read potential and spline it
        print '...reading potential...'
        data = np.loadtxt(self.prefix+'_sq_'+self.object_string+'_diag.dat')
        rs,r_min,r_max = data[:,0],data[0,0],data[-1,0]
        vs = data[:,1]
        v_r_spline = spline(rs, vs)

        # Set up basis
        print '...forming LPQHI basis...'
        basis = LPQHIBasis(self.L,self.r_cut,self.n_d,self.n_knots)

        # Extend k space to continuum
        print '...extending k space...'
        k_cont = 50.*self.k_avg
        k_max = 50.*pi/basis.delta
        self.ExtendKs(k_cont,k_max)
        n_k = len(self.opt_mag_ks)
        n_r = basis.GetNElements()

        # Determine r max from tolerances
        v_tol = 1.e-4 # TODO: This is fixed
        v_c = self.cofactor*self.z_1_z_2/r_max
        if (abs(v_r_spline(r_max) - v_c) > v_tol):
            print 'WARNING: |v(r_max) - v_{c}(r_max)| = ',abs(v_r_spline(r_max) - v_c),'>',v_tol,'with r_max =',r_max,' v(r_max) = ',v_r_spline(r_max),' v_{c}(r_max) = ',v_c
        else:
            i = 1
            r_max = rs[i]
            while (abs(v_r_spline(r_max) - self.cofactor*self.z_1_z_2/r_max) > v_tol) and (i+1 < len(rs)-1):
                i += 1
                r_max = rs[i]
        if (r_max < self.r_cut):
            r_max = self.r_cut
        v_c = self.cofactor*self.z_1_z_2/r_max
        print '...setting r_max = ',r_max,'with |v(r_max) - v_{c}(r_max)| <',v_tol,'...'

        # Calculate x_k
        print '...calculating Xk...'
        x_k = []
        tot_x_k = 0.
        percent_i = 1
        for k_i in range(n_k):
            k = self.opt_mag_ks[k_i][0]
            x_k.append(self.CalcXk(v_r_spline, self.r_cut, k, r_max)/self.vol)
            if (float(k_i)/float(n_k)) > percent_i*0.1:
                print '......', percent_i*10, '% complete...'
                percent_i += 1
            tot_x_k += x_k[k_i]

        # Fill in c_n_k
        print '...filling in c_n_k...'
        c_n_k = np.zeros((n_r,n_k))
        for n in range(n_r):
            for k_i in range(n_k):
                c_n_k[n,k_i] = basis.c(n,self.opt_mag_ks[k_i][0])

        # Fill in A and b
        print '...filling in A and b...'
        A = np.zeros((n_r,n_r))
        b = np.zeros((n_r))
        for l in range(n_r):
            for k_i in range(n_k):
                b[l] += self.opt_mag_ks[k_i][1] * x_k[k_i] * c_n_k[l,k_i]
                for n in range(n_r):
                    A[l,n] += self.opt_mag_ks[k_i][1] * c_n_k[l,k_i] * c_n_k[n,k_i]

        # Add constraints
        t = np.zeros((n_r))
        adjust = np.ones((n_r)) # TODO: Currently no constraints

        # Reduce for constraints
        n_r_c = n_r
        for i in range(n_r):
            if not adjust[i]:
                n_r_c -= 1

        # Build constrained A_c and b_c
        A_c = np.zeros((n_r_c,n_r_c))
        b_c = np.zeros((n_r_c))
        j = 0
        for col in range(n_r):
            if adjust[col]:
                i = 0
                for row in range(n_r):
                    if adjust[row]:
                        A_c[i,j] = A[row,col]
                        i += 1
                j += 1
            else:
                for row in range(n_r):
                    b[row] -= A[row,col]*t[col]
        j = 0
        for row in range(n_r):
            if adjust[row]:
                b_c[j] = b[row]
            j += 1

        # Do SVD
        print '...performing SVD...'
        U, S, V = np.linalg.svd(A_c, full_matrices=True)

        # Get maximum value in S
        s_max = S[0]
        for i in range(1,n_r_c):
            s_max = max(S[i],s_max)

        # Check for negative singular values
        for i in range(n_r_c):
            if S[i] < 0.:
                print 'WARNING: Negative singular value.'

        # Assign inverse S
        breakup_tol = 1.e-16
        i_S = np.zeros((n_r_c))
        n_singular = 0
        for i in range(n_r_c):
            if (S[i] < breakup_tol*s_max):
                i_S[i] = 0.
            else:
                i_S[i] = 1./S[i]
            if (i_S[i] == 0.):
                n_singular += 1
        if (n_singular > 0):
            print 'WARNING: There were',n_singular,'singular values.'

        # Compute t_n, removing singular values
        t_c = np.zeros((n_r_c))
        for i in range(n_r_c):
            coef = 0.
            for j in range(n_r_c):
                coef += U[j,i]*b_c[j]
            coef *= i_S[i]
            for k in range(n_r_c):
                t_c[k] += coef*V[i,k]

        # Copy t_c values into t
        j = 0
        for i in range(n_r):
            if adjust[i]:
                t[i] = t_c[j]
                j += 1

        # Calculate chi-squared
        chi_2 = 0.
        for k_i in range(n_k):
            y_k = x_k[k_i]
            for n in range(n_r):
                y_k -= c_n_k[n,k_i]*t[n]
            chi_2 += self.opt_mag_ks[k_i][1]*y_k*y_k
        print '...chi^2 = ', chi_2,'...'

        # Compose real space part
        print '...composing real space part...'
        v_l_0 = 0.
        for n in range(n_r):
            v_l_0 += t[n]*basis.h(n,0.)
        v_l = np.zeros((self.n_points))
        for i in range(self.n_points):
            r = self.rs[i]
            if (r <= self.r_cut):
                for n in range(n_r):
                    v_l[i] += t[n]*basis.h(n,r)
            else:
                v_l[i] = v_r_spline(r)
        v_l_spline = spline(self.rs, v_l)

        # Get k=0 components (short)
        print '...computing k=0 components...'
        def v_short_integrand(r):
            if r < self.r_min:
                r = self.r_min
            return r*r*(v_r_spline(r) - v_l_spline(r))
        f_v_s_0 = -integrate(v_short_integrand, 1.e-100, self.r_cut, divmax=100)
        if (self.n_d == 2):
            f_v_s_0 *= 2.*pi/self.vol # FIXME: Probably wrong for 2D
        elif (self.n_d == 3):
            f_v_s_0 *= 4.*pi/self.vol

        # Get k=0 components (long)
        def v_long_integrand(r):
            if r < self.r_min:
                r = self.r_min
            return r*r*v_l_spline(r)
        f_v_l_0 = -integrate(v_long_integrand, 1.e-100, self.r_cut, divmax=100)
        if (self.n_d == 2):
            f_v_s_0 *= 2.*pi/self.vol # FIXME: Probably wrong for 2D
        elif (self.n_d == 3):
            f_v_l_0 *= 4.*pi/self.vol

        # Write r space part to file
        f = open(self.prefix+'_sq_'+self.object_string+'_diag_r.dat','w')
        f.write('%.10E %.10E\n'%(0.,v_l_0))
        for i in range(self.n_points):
            r = self.rs[i]
            f.write('%.10E %.10E\n'%(r,v_l[i]))
        f.close()

        # Compose k space part
        f = open(self.prefix+'_sq_'+self.object_string+'_diag_k.dat','w')
        f.write('%.10E %.10E\n'%(0.,f_v_s_0))
        f_v_ls = []
        for k in self.ks:
            k_2 = np.dot(k,k)
            mag_k = np.sqrt(k_2)
            f_v_l = 0.
            for n in range(n_r):
                f_v_l += t[n]*basis.c(n,mag_k)
            f_v_l -= self.CalcXk(v_r_spline, self.r_cut, mag_k, self.r_max)/self.vol
            f_v_ls.append([mag_k, f_v_l])
        mag_k_prev = -1
        for [mag_k,f_v_l] in sorted(f_v_ls):
            if abs(mag_k-mag_k_prev) > 1.e-8:
                f.write('%.10E %.10E\n'%(mag_k,f_v_l))
            mag_k_prev = mag_k
        f.close()
def continuous_entropy(p, start, end):
    answer, err = integrate(lambda x: p(x) * np.log(p(x)), start, end)
    return -answer
Esempio n. 40
0
    if disttype != 3:
        #comoving distance out to scale factor a0: integral(da'/(a'^2 H(a')),a0,1)
        #H^2 a^4=omegaR +omegaM a^1 + omegaE a^4 + omegaK a^2
        def integrand(a, H0, R, M, L, K):  #1/(a^2 H)
            return (R + M * a + L * a**4 + K * a**2)**-0.5 / H0
    else:
        #lookback time
        def integrand(a, H0, R, M, L, K):  #1/(a^2 H)
            return a * (R + M * a + L * a**4 + K * a**2)**-0.5 / H0

    if isSequenceType(a0):
        integratevec = vectorize(
            lambda x: integrate(integrand,
                                x,
                                1,
                                args=(H0, omegaR, omegaM, omegaL, omegaK),
                                **intkwargs))
        res = integratevec(a0)
        intres, interr = res[0], res[1]
        try:
            if np.any(interr / intres > inttol):
                raise Exception(
                    'Integral fractional error for one of the integrals is beyond tolerance'
                )
        except ZeroDivisionError:
            pass

    else:
        res = integrate(integrand,
                        a0,
def main():

    import numpy as np
    import numpy
    from scipy.integrate import quad as integrate
    import pickle

    # Parameters in script
    # --------------------------------------------------------------------------
    clouds = ('taurus', 'perseus', 'california')
    dec_ranges = ((37, 19), (37, 19), (37, 35))
    ra_ranges = ((5.5, 3.8), (3.8, 2.9), (5.5, 3.5))
    tspin_threshold = 500.0

    # Data locations in script
    # --------------------------------------------------------------------------
    filedir = '/d/bip3/ezbc/multicloud/data/cnm_data/stanimirovic14/'

    # Read the data
    source_table = read_data(filedir + 'stanimirovic14_sources.txt')
    param_table = read_data(filedir + 'stanimirovic14_temps.txt')
    avg_temp_table = read_data(filedir + 'stanimirovic14_avg_temps.txt',
                               data_start=0,
                               delimiter=' ')

    # Extract headers and data
    source_cols = source_table.colnames
    source_data = np.asarray(source_table._data)
    param_cols = param_table.colnames
    param_data = np.asarray(param_table._data)

    print('cnm temp columns', param_cols)
    print('avg temp columns', avg_temp_table.colnames)
    #avg_temp_cols = np.asarray(avg_temp_table.colnames)
    #avg_temp_data = np.asarray(avg_temp_table._data)

    avg_temp_sources = []
    avg_temp_data = []
    for row in avg_temp_table._data:
        avg_temp_sources.append(row[0])
        avg_temp_data.append(row[1])
    avg_temp_data = np.asarray(avg_temp_data)
    avg_temp_sources = np.asarray(avg_temp_sources)

    # Extract source names and RA / Dec
    source_decs = np.empty(source_data.shape)
    source_ras = np.empty(source_data.shape)
    source_names = ['' for x in range(source_data.shape[0])]
    source_names = np.empty(source_data.shape, dtype=object)

    for i, source in enumerate(source_data):
        source_dec = source[source_cols.index('Decl. (J2000)')]
        source_decs[i] = float(source_dec.split(':')[0])
        source_ra = source[source_cols.index('R.A. (J2000)')]
        source_ras[i] = float(source_ra.split(':')[0])
        source_names[i] = source[source_cols.index('Source')]

    # Find T_cnm temperatures for each cloud
    cloud_counts_list = []
    cloud_bins_list = []
    cloud_t_cnm_lists = []
    weights_list = []
    temp_dict = {}

    sources_taurus = [
        '4C+27.14',
        '3C133',
        '3C132',
        '4C+25.14',
        '3C108',
        'B20400+25',
    ]

    for i, cloud in enumerate(clouds):
        temp_dict[cloud] = {}

        dec_range = dec_ranges[i]
        ra_range = ra_ranges[i]

        # Choose only sources within RA and Dec range
        indices = np.where((source_decs >= dec_range[1]) &\
                           (source_decs <= dec_range[0]) &\
                           (source_ras >= ra_range[1]) &\
                           (source_ras <= ra_range[0])
                           )

        cloud_decs = source_decs[indices]
        cloud_ras = source_ras[indices]
        cloud_sources = source_names[indices]

        # Get the spin temperatures of chosen sources
        t_cnm_list = []
        t_int_list = []
        tk_list = []
        tk_error_list = []
        t_cnm_error_list = []
        avg_tspin_list = []
        avg_tspin_error_list = []
        cloud_t_cnm_list = []
        source_list = []
        weights = []
        TB_dict = {}
        tau_dict = {}
        velocities = np.linspace(-1000, 1000, 1000)

        for j in xrange(len(param_data)):
            tspin = param_data[j][param_cols.index('Ts')]
            TB = param_data[j][param_cols.index('TB')]
            tau = param_data[j][param_cols.index('tau')]
            tspin_error = param_data[j][param_cols.index('e_Ts')]
            source = param_data[j][param_cols.index('Source')]

            Tpeak = param_data[j][param_cols.index('TB')]
            Tk = param_data[j][param_cols.index('Tkmax')]
            Tk_error = Tk * 0.1
            DelV = param_data[j][param_cols.index('DelV')]
            VLSR = param_data[j][param_cols.index('VLSR')]

            if source in cloud_sources:
                if source not in source_list:
                    avg_temp_added = False
                else:
                    avg_temp_added = True
                source_list.append(source)

                #print source

                # add the source avg Tspin
                if 1:
                    #if not avg_temp_added:
                    print ''
                    print source
                    print 'TB:', TB
                    print 'tau:', tau
                    print 'T_kinetic:', Tk
                    print 'sigma:', DelV / 2.0
                    print 'vlsr:', VLSR
                    avg_temp = \
                        avg_temp_data[avg_temp_sources == source][0]

                    # for error see section 3.1 of Kim et al. (2014) who
                    # describe the harmonic mean temperature is within 10% of
                    # the observed optical depth weighted spin temperature. This
                    # may be due to multiple clouds with varying optical depths
                    # along the LOS.
                    avg_temp_error = 0.1 * avg_temp

                    avg_tspin_list.append(avg_temp)
                    avg_tspin_error_list.append(avg_temp_error)

                #avg_temp = np.mean(TB) / np.mean(tau)
                else:

                    # Calculate brightness temp and tau spectra
                    TB_spectrum = gaussian(velocities, TB, DelV / 2.0, VLSR)
                    tau_spectrum = gaussian(velocities, tau, DelV / 2.0, VLSR)

                    # add them to the source list
                    if source not in TB_dict:
                        TB_dict[source] = TB_spectrum
                        tau_dict[source] = tau_spectrum
                    else:
                        TB_dict[source] = \
                            TB_dict[source] + TB_spectrum
                        tau_dict[source] = \
                            tau_dict[source] + tau_spectrum

                #avg_tspin_list.append(avg_temp)

                if tspin < tspin_threshold and tspin_error != 0.0:
                    cloud_t_cnm_list.append(tspin)

                    T_integ = integrate(gaussian,
                                        -1000,
                                        1000,
                                        args=(Tpeak, DelV / 2.0, VLSR))

                    #weights.append(T_integ[0])

                    if tspin < tspin_threshold and VLSR < 15 and VLSR > -5:
                        t_cnm_list.append(tspin)
                        t_cnm_error_list.append(tspin_error)
                        t_int_list.append(Tk)

                    if source in sources_taurus:
                        print source, tspin

                if VLSR < 15 and VLSR > -5:
                    tk_list.append(Tk)
                    tk_error_list.append(Tk_error)

        # convert TB and tau to average spin temperatue
        #avg_tspin_list = calc_avg_Ts(TB_dict, tau_dict, velocities)

        #print 'avg t spins:', avg_tspin_list

        temp_dict[cloud]['Ts_list'] = t_cnm_list
        temp_dict[cloud]['Ts_error_list'] = t_cnm_error_list
        #avg_tspin_list = []
        #for source in TB_dict:
        #    avg_tspin_list.append(TB_dict[source])
        temp_dict[cloud]['Ts_avg_list'] = avg_tspin_list
        temp_dict[cloud]['Ts_avg_error_list'] = avg_tspin_error_list
        temp_dict[cloud]['Tk_list'] = tk_list
        temp_dict[cloud]['Tk_error_list'] = tk_error_list

        if 0:
            if cloud == 'perseus':
                bins = 3
            else:
                bins = 8
            cloud_counts, cloud_bins = np.histogram(cloud_t_cnm_list,
                                                    bins=bins)
            cloud_counts = cloud_counts / np.sum(cloud_counts,
                                                 dtype=numpy.float)
            cloud_counts = np.append(cloud_counts, 0)

            cloud_counts_list.append(cloud_counts)
            cloud_bins_list.append(cloud_bins)
            cloud_t_cnm_lists.append(cloud_t_cnm_list)
            weights_list.append(weights)

            avg = np.average(cloud_t_cnm_list, weights=weights)
            med = np.median(cloud_t_cnm_list)

            print(cloud.capitalize())
            print('\tMedian CNM temp = ' + \
                  '{0:.2f} [K]'.format(med))
            print('\tIntensity-weighted mean CNM temp = ' + \
                  '{0:.2f} [K]\n'.format(avg))

    # Save data
    with open('/d/bip3/ezbc/multicloud/data/python_output/tables/' + \
              'stanimirovic14_temps.pickle', 'wb') as f:
        pickle.dump(temp_dict, f)

    filename = \
            '/d/bip3/ezbc/multicloud/data/python_output/tables/' + \
            'stanimirovic14_temps.npy'
    np.savetxt(filename, np.array(t_cnm_list))
    filename = \
            '/d/bip3/ezbc/multicloud/data/python_output/tables/' + \
            'stanimirovic14_int_temps.npy'
    np.savetxt(filename, np.array(t_int_list))
    filename = \
            '/d/bip3/ezbc/multicloud/data/python_output/tables/' + \
            'stanimirovic14_temp_errors.npy'
    np.savetxt(filename, np.array(t_cnm_error_list))
Esempio n. 42
0
 def test_gauss_pdf_integral(self):
     ''' Check that Gauss pdf integral is 1 '''
     result, _ = integrate(pylandau.get_gauss_pdf,
                           0, 10000, args=(10, 3))
     self.assertAlmostEqual(result, 1, delta=1e-3)
Esempio n. 43
0
import scipy as sp
from scipy.integrate import quad as integrate
import numpy as np
import matplotlib.pyplot as plt

a, b = 1, 2
p = lambda x: 1
f = lambda x: 0.8 * sp.log(x) + 0.6 * sp.cos(x)

basis = [(lambda power: lambda x: x**power)(power) for power in range(6)]

A = np.array([[
    integrate(lambda value: p(value) * x(value) * y(value), a, b)[0]
    for x in basis
] for y in basis])
B = np.array([
    integrate(lambda value: p(value) * x(value) * f(value), a, b)[0]
    for x in basis
])
coefs = np.linalg.solve(A, B)


def g(x):
    sum = 0
    for i in range(len(coefs)):
        sum += coefs[i] * basis[i](x)
    return sum


xRange = np.linspace(a, b, (b - a) * 100)
print("Accuracy: %e" % np.max(np.abs(f(xRange) - g(xRange))))
Esempio n. 44
0
#!/usr/bin/env python2.7

import math
from scipy.integrate import quad as integrate

if __name__ == '__main__':
    print integrate(lambda x: math.exp(-x)*(2.99792458e18/(x*x)), 1.0, 2.0)
    print integrate(lambda x: 2.0*(math.sin(10.0*x) + 1.0), 1.0, 2.0)
Esempio n. 45
0
def other(ks, n1, n2=float('inf')):
    # The probability of the two distributions taking on necessary values
    prob_val = lambda val,truth: (normal_pdf(val,truth,truth*(1-truth)) * 
                                  normal_pdf(val+ks,truth,truth*(1-truth)))
    prob_truth = lambda truth: integrate(lambda v: prob_val(v,truth), 0, 1-ks)[0]
    return 2 * prob_val(.5-(ks/2),.5) / np.sqrt(n1*n2)
def main():

    import numpy as np
    import numpy
    from scipy.integrate import quad as integrate
    import pickle

    # Parameters in script
    # --------------------------------------------------------------------------
    clouds = ('taurus', 'perseus', 'california')
    dec_ranges = ((37, 19), (37, 19), (37, 35))
    ra_ranges = ((5.5, 3.8), (3.8, 2.9), (5.5, 3.5))
    tspin_threshold = 500.0

    # Data locations in script
    # --------------------------------------------------------------------------
    filedir = '/d/bip3/ezbc/multicloud/data/cnm_data/stanimirovic14/'

    # Read the data
    source_table = read_data(filedir + 'stanimirovic14_sources.txt')
    param_table = read_data(filedir + 'stanimirovic14_temps.txt')
    avg_temp_table = read_data(filedir + 'stanimirovic14_avg_temps.txt',
                               data_start=0,
                               delimiter=' ')

    # Extract headers and data
    source_cols = source_table.colnames
    source_data = np.asarray(source_table._data)
    param_cols = param_table.colnames
    param_data = np.asarray(param_table._data)


    print('cnm temp columns', param_cols)
    print('avg temp columns', avg_temp_table.colnames)
    #avg_temp_cols = np.asarray(avg_temp_table.colnames)
    #avg_temp_data = np.asarray(avg_temp_table._data)

    avg_temp_sources = []
    avg_temp_data = []
    for row in avg_temp_table._data:
        avg_temp_sources.append(row[0])
        avg_temp_data.append(row[1])
    avg_temp_data = np.asarray(avg_temp_data)
    avg_temp_sources = np.asarray(avg_temp_sources)

    # Extract source names and RA / Dec
    source_decs = np.empty(source_data.shape)
    source_ras = np.empty(source_data.shape)
    source_names = ['' for x in range(source_data.shape[0])]
    source_names = np.empty(source_data.shape, dtype=object)

    for i, source in enumerate(source_data):
        source_dec = source[source_cols.index('Decl. (J2000)')]
        source_decs[i] = float(source_dec.split(':')[0])
        source_ra = source[source_cols.index('R.A. (J2000)')]
        source_ras[i] = float(source_ra.split(':')[0])
        source_names[i] = source[source_cols.index('Source')]

    # Find T_cnm temperatures for each cloud
    cloud_counts_list = []
    cloud_bins_list = []
    cloud_t_cnm_lists = []
    weights_list = []
    temp_dict = {}


    sources_taurus = ['4C+27.14',
                      '3C133',
                      '3C132',
                      '4C+25.14',
                      '3C108',
                      'B20400+25',]

    for i, cloud in enumerate(clouds):
        temp_dict[cloud] = {}

        dec_range = dec_ranges[i]
        ra_range = ra_ranges[i]

        # Choose only sources within RA and Dec range
        indices = np.where((source_decs >= dec_range[1]) &\
                           (source_decs <= dec_range[0]) &\
                           (source_ras >= ra_range[1]) &\
                           (source_ras <= ra_range[0])
                           )

        cloud_decs = source_decs[indices]
        cloud_ras = source_ras[indices]
        cloud_sources = source_names[indices]

        # Get the spin temperatures of chosen sources
        t_cnm_list = []
        t_int_list = []
        tk_list = []
        tk_error_list = []
        t_cnm_error_list = []
        avg_tspin_list = []
        avg_tspin_error_list = []
        cloud_t_cnm_list = []
        source_list = []
        weights = []
        TB_dict = {}
        tau_dict = {}
        velocities = np.linspace(-1000, 1000, 1000)

        for j in xrange(len(param_data)):
            tspin = param_data[j][param_cols.index('Ts')]
            TB = param_data[j][param_cols.index('TB')]
            tau = param_data[j][param_cols.index('tau')]
            tspin_error = param_data[j][param_cols.index('e_Ts')]
            source = param_data[j][param_cols.index('Source')]

            Tpeak = param_data[j][param_cols.index('TB')]
            Tk = param_data[j][param_cols.index('Tkmax')]
            Tk_error = Tk * 0.1
            DelV = param_data[j][param_cols.index('DelV')]
            VLSR = param_data[j][param_cols.index('VLSR')]

            if source in cloud_sources:
                if source not in source_list:
                    avg_temp_added = False
                else:
                    avg_temp_added = True
                source_list.append(source)

                #print source

                # add the source avg Tspin
                if 1:
                #if not avg_temp_added:
                    print ''
                    print source
                    print 'TB:', TB
                    print 'tau:', tau
                    print 'T_kinetic:', Tk
                    print 'sigma:', DelV/2.0
                    print 'vlsr:', VLSR
                    avg_temp = \
                        avg_temp_data[avg_temp_sources == source][0]

                    # for error see section 3.1 of Kim et al. (2014) who
                    # describe the harmonic mean temperature is within 10% of
                    # the observed optical depth weighted spin temperature. This
                    # may be due to multiple clouds with varying optical depths
                    # along the LOS.
                    avg_temp_error = 0.1 * avg_temp

                    avg_tspin_list.append(avg_temp)
                    avg_tspin_error_list.append(avg_temp_error)

                #avg_temp = np.mean(TB) / np.mean(tau)
                else:

                    # Calculate brightness temp and tau spectra
                    TB_spectrum = gaussian(velocities, TB, DelV/2.0, VLSR)
                    tau_spectrum = gaussian(velocities, tau, DelV/2.0, VLSR)

                    # add them to the source list
                    if source not in TB_dict:
                        TB_dict[source] = TB_spectrum
                        tau_dict[source] = tau_spectrum
                    else:
                        TB_dict[source] = \
                            TB_dict[source] + TB_spectrum
                        tau_dict[source] = \
                            tau_dict[source] + tau_spectrum

                #avg_tspin_list.append(avg_temp)

                if tspin < tspin_threshold and tspin_error != 0.0:
                    cloud_t_cnm_list.append(tspin)

                    T_integ = integrate(gaussian,
                                        -1000,
                                        1000,
                                        args=(Tpeak,
                                              DelV/2.0,
                                              VLSR))

                    #weights.append(T_integ[0])

                    if tspin < tspin_threshold and VLSR < 15 and VLSR > -5:
                        t_cnm_list.append(tspin)
                        t_cnm_error_list.append(tspin_error)
                        t_int_list.append(Tk)

                    if source in sources_taurus:
                        print source, tspin

                if VLSR < 15 and VLSR > -5:
                    tk_list.append(Tk)
                    tk_error_list.append(Tk_error)

        # convert TB and tau to average spin temperatue
        #avg_tspin_list = calc_avg_Ts(TB_dict, tau_dict, velocities)

        #print 'avg t spins:', avg_tspin_list

        temp_dict[cloud]['Ts_list'] = t_cnm_list
        temp_dict[cloud]['Ts_error_list'] = t_cnm_error_list
        #avg_tspin_list = []
        #for source in TB_dict:
        #    avg_tspin_list.append(TB_dict[source])
        temp_dict[cloud]['Ts_avg_list'] = avg_tspin_list
        temp_dict[cloud]['Ts_avg_error_list'] = avg_tspin_error_list
        temp_dict[cloud]['Tk_list'] = tk_list
        temp_dict[cloud]['Tk_error_list'] = tk_error_list

        if 0:
            if cloud=='perseus':
                bins=3
            else:
                bins=8
            cloud_counts, cloud_bins = np.histogram(cloud_t_cnm_list, bins=bins)
            cloud_counts = cloud_counts / np.sum(cloud_counts, dtype=numpy.float)
            cloud_counts = np.append(cloud_counts, 0)

            cloud_counts_list.append(cloud_counts)
            cloud_bins_list.append(cloud_bins)
            cloud_t_cnm_lists.append(cloud_t_cnm_list)
            weights_list.append(weights)

            avg = np.average(cloud_t_cnm_list,
                             weights=weights)
            med = np.median(cloud_t_cnm_list)

            print(cloud.capitalize())
            print('\tMedian CNM temp = ' + \
                  '{0:.2f} [K]'.format(med))
            print('\tIntensity-weighted mean CNM temp = ' + \
                  '{0:.2f} [K]\n'.format(avg))

    # Save data
    with open('/d/bip3/ezbc/multicloud/data/python_output/tables/' + \
              'stanimirovic14_temps.pickle', 'wb') as f:
        pickle.dump(temp_dict, f)

    filename = \
            '/d/bip3/ezbc/multicloud/data/python_output/tables/' + \
            'stanimirovic14_temps.npy'
    np.savetxt(filename, np.array(t_cnm_list))
    filename = \
            '/d/bip3/ezbc/multicloud/data/python_output/tables/' + \
            'stanimirovic14_int_temps.npy'
    np.savetxt(filename, np.array(t_int_list))
    filename = \
            '/d/bip3/ezbc/multicloud/data/python_output/tables/' + \
            'stanimirovic14_temp_errors.npy'
    np.savetxt(filename, np.array(t_cnm_error_list))
Esempio n. 47
0
 def w(z):
     s, phi = z
     integrand = lambda u: Vf(np.maximum(G(x, phi), u)) * F.pdf(u)
     integral, err = integrate(integrand, a, b)
     q = pi(s) * integral + (1 - pi(s)) * Vf(G(x, phi))
     return -x * (1 - phi - s) - beta * q  # minus because we minimize
Esempio n. 48
0
def _false_negative_probability(threshold, b, r):
    def _probability(s):
        return 1 - (1 - (1 - s**float(r))**float(b))
    a, err = integrate(_probability, threshold, 1.0)
    return a
Esempio n. 49
0
#!/usr/bin/python

# 2c

import numpy as np
from math import pi,exp
from scipy.integrate import quad as integrate

# SI units
e = 1.602e-19
me = 9.31e-31
c = 2.98e8
h = 6.652e-34
k = 1.38e-23
T = 5e3 * e / k
energy_low = 0.5e3 * e
energy_high = 3e3 * e
nu_low = energy_low / h
nu_high = energy_high / h
r = 1.52e22

constant = 4*pi*r**2 * 4*pi*8*e**6/(h*3*me*c**3) * (2/(pi*k*me))**0.5 * T**-0.5

def integrand(nu,T):
    return exp(-h*nu/(k*T)/nu)

result = integrate(integrand, nu_low, nu_high, args=(T))[0]

Esempio n. 50
0
def _false_positive_probability(threshold, b, r):
    _probability = lambda s : 1 - (1 - s**float(r))**float(b)
    a, err = integrate(_probability, 0.0, threshold) 
    return a
Esempio n. 51
0
def calc_symmetric_error(x, y=None, alpha=0.05):

    '''

    Parameters
    ----------
    x : array-like

    y : array-like, optional
        If provided, treated as the PDF of x

    '''


    import numpy as np

    from scipy.integrate import simps as integrate


    if len(x) < 4:
        #raise ValueError('x and y must have lengths > 3')
        return x[0], 0, 0

    # Create histogram with bin widths normalized by the density of values
    if y is None:
        x = np.sort(x)
        y = np.ones(x.shape)

    if np.any(y < 0):
        raise ValueError('y values mush be greater than 0')

    confidence = (1.0 - alpha)

    # area under whole function
    with warnings.catch_warnings():
        warnings.filterwarnings("ignore",category=DeprecationWarning)
        area = integrate(y, x)

    # Get weighted average of PDF
    mid_pos = np.argmin(np.abs(x - np.average(x, weights=y)))
    #mid_pos = np.argmin(np.abs(x - np.median(x, weights=y)))
    #mid_pos = np.interp

    # If the cum sum had duplicates, then multiple median pos will be derived,
    # take the one in the middle.
    try:
        if len(mid_pos) > 1:
            mid_pos = mid_pos[len(mid_pos) / 2]
    except TypeError:
        pass

    # Lower error
    pos = mid_pos - 1
    low_area = -np.Inf

    #area = integrate(y[0:mid_pos], x[0:mid_pos])

    while low_area <= area * confidence / 2.0 and pos > 0:
        y_clip = y[pos:mid_pos]# + 1]
        x_clip = x[pos:mid_pos]# + 1]

        low_area = integrate(y_clip, x_clip)

        # Catch the error if going to far
        if pos < 0:
            pos = 0
            break

        pos -= 1

    # set result to lower position
    low_pos = pos

    if pos == 0:
        low_pos = np.min(np.where(y != 0))

    # higher error
    pos = mid_pos + 1
    max_pos = len(x)
    high_area = -np.Inf

    #area = integrate(y[mid_pos:-1], x[mid_pos:-1])
    while high_area <= area * confidence / 2.0 and pos < max_pos:
        y_clip = y[mid_pos:pos]
        x_clip = x[mid_pos:pos]

        high_area = integrate(y_clip, x_clip)

        if pos > max_pos:
            pos = max_pos
            break

        pos += 1

    high_pos = pos

    if pos >= max_pos:
        high_pos = np.max(np.where(y != 0))

    median = x[mid_pos]
    low_error = x[mid_pos] - x[low_pos]
    high_error = x[high_pos] - x[mid_pos]

    return median, high_error, low_error
Esempio n. 52
0
def threshold_area(x, y, area_fraction=0.68):
    '''
    Finds the limits of a 1D array which includes a given fraction of the
    integrated data.

    Parameters
    ----------
    data : array-like
        1D array.
    area_fraction : float
        Fraction of area.

    Returns
    -------
    limits : tuple
        Lower and upper bound including fraction of area.

    '''

    import numpy as np
    from scipy.integrate import simps as integrate

    # Check if size of data
    if x.size == 1:
        return x[0], 0, 0

    # Step for lowering threshold
    step = (np.max(y) - np.median(y)) / 10000.0

    # initial threshold
    threshold = np.max(y) - step
    threshold_area = 0.0

    # area under whole function
    area = integrate(y, x)

    # Stop when the area below the threshold is greater than the max area
    while threshold_area < area * area_fraction and threshold > 0:

        threshold_indices = np.where(y > threshold)[0]

        try:
            bounds_indices = (threshold_indices[0], threshold_indices[-1])
        except IndexError:
            bounds_indices = ()

        try:
            threshold_area = integrate(y[bounds_indices[0]:bounds_indices[1]],
                                       x[bounds_indices[0]:bounds_indices[1]])
            threshold_area += threshold * (x[bounds_indices[1]] - \
                                           x[bounds_indices[0]])
        except IndexError:
            threshold_area = 0

        threshold -= step

    if threshold < 0:
        bounds_indices = (0, len(x) - 1)

    x_peak = x[y == y.max()][0]
    low_error, up_error = x_peak - x[bounds_indices[0]], \
                          x[bounds_indices[1]] - x_peak

    return (x_peak, low_error, up_error)
Esempio n. 53
0
    def __init__(self, val_func, pbar):
        """
        Args:
            val_func : ValuationFunc
                Valuation function instance we want optimal pricing for
            pbar : float
                The maximum possible bidder valuation
        """
        self.pbar = pbar

        # Copy params from cost function
        self.s = val_func.s
        self.a = val_func.a
        logger.debug(f'Creating optimal power pricing func '
                     f'(a = {self.a}, s = {self.s}, pbar = {pbar})')

        # Initialize params we'll need later
        self.c_lb = val_func.get_min_marginal_cost()
        self.c_ub = val_func.get_max_marginal_cost()
        assert self.pbar > self.c_lb, f'Need pbar={self.pbar} > {self.c_lb}'
        self._is_luc = (pbar > self.c_lb) and (pbar <= self.c_ub)
        self.alpha_min = self.s**(self.s / (self.s - 1))
        logger.debug(f'\tLUC = {self._is_luc}, alpha_min = {self.alpha_min}')
        logger.debug(f'\tc_lb = {self.c_lb}, c_ub = {self.c_ub}')

        # Compute Cs
        int_val = integrate(lambda nu: (nu**(self.s - 1)) * np.exp(-nu),
                            self.s, self.alpha_min)
        self.Cs = self.c_ub
        self.Cs *= (1 / np.exp(self.s)) - (int_val / (self.s**self.s))
        self.Cs *= np.exp(self.alpha_min)
        logger.debug(f'\tCs = {self.Cs}')

        # Detect operating regime (LUC or HUC) + one-time setup actions
        if self._is_luc:
            self.w = val_func.inverse(pbar / val_func.s)
            self.v = val_func.inverse(pbar)
            assert self.w > 0, 'Error: w > 0 must hold'
            assert self.w < self.v, 'Error: w < v must hold'
            assert self.v <= 1, 'Error: v <= 1 must hold'
            # TODO: what's a good init for m?
            self.set_m((self.v + self.w) / 2)
            logger.debug('\tDone LUC init. (w, v) = ({self.w}, {self.v})')
        else:
            # Determine whether HUC1 or HUC2
            if self.pbar > self.Cs:
                self._huc_mode = 2
            else:
                self._huc_mode = 1
            logger.debug(f'\tHUC mode {self._huc_mode}')

            # Compute CDT.
            # The following computations are prone to numerical instabilities
            # near the open end points...so try to avoid them for now and
            # assume it won't be a problem...
            #
            # WARNING: if brentq is complaining about the lb and ub not having
            # different signs, it is likely that the root we are searching for
            # is very close to the open end point (and not within the selected
            # epsilon tolerance). To compensate for this, you could do two
            # things:
            #  1. Decrease epsilon (not recommended, for numerical reasons)
            #  2. Perturb pbar. Likely, pbar is too close to Cs if you are
            #     experiencing this
            self.u_s = (1 / self.s)**(1 / (self.s - 1))
            EPS = 5e-3
            if abs(self.Cs - self.pbar) < self.Cs / 10:
                # If we detect that Cs and pbar are "very close", try
                # decreasing EPS to avoid the breqnt complained explained
                # above. This will NOT work flawlessly for all cases; ideally
                # we need to set EPS adaptively; this is, however, beyond the
                # current scope
                EPS /= 10
            logger.debug(f'\tStarting u_cdt search. u_s = {self.u_s}')
            if self._huc_mode == 1:
                lb = self.u_s
                ub = 1 - EPS
                logger.debug(f'\t\tlb = {lb}, ub = {ub}')
                self.u_cdt, r = brentq(self._u_cdt_huc1_eqn,
                                       lb,
                                       ub,
                                       full_output=True)
                assert r.converged, 'Root finder failed to converge'
                logger.debug(f'\t\tu_cdt = {self.u_cdt}')
            else:
                lb = EPS
                ub = self.u_s - EPS
                logger.debug(f'\t\tlb = {lb}, ub = {ub}')
                self.u_cdt, r = brentq(self._u_cdt_huc2_eqn,
                                       lb,
                                       ub,
                                       full_output=True)
                assert r.converged, 'Root finder failed to converge'
                logger.debug(f'\t\tu_cdt = {self.u_cdt}')

            # Find rho_s if HUC1. rho_s is a resource utilization level, so it
            # lies in [0, 1] (or maybe the open interval?)
            if self._huc_mode == 1:
                lb = 0
                ub = 1
                logger.debug(f'\tStarting rho_s search. lb = {lb}, ub = {ub}')
                self.rho_s, r = brentq(self._rho_s_eqn,
                                       lb,
                                       ub,
                                       full_output=True)
                assert r.converged, 'Root finder failed to converge'
                logger.debug(f'\t\trho_s = {self.rho_s}')

            # Set u parameter if HUC1
            if self._huc_mode == 1:
                # TODO: what's a good init for u?
                self.set_u((self.u_s + self.u_cdt) / 2)
Esempio n. 54
0
def planckPhotons(lamb1,lamb2,T):
    N,dN=integrate(planckPhotonDistrib,lamb1,lamb2,args=(T,))
    return N
Esempio n. 55
0
 def test_gauss_pdf_integral(self):
     ''' Check that Gauss pdf integral is 1 '''
     result, _ = integrate(pylandau.get_gauss_pdf,
                           0, 10000, args=(10, 3))
     self.assertAlmostEqual(result, 1, delta=1e-3)
 def find_coefficient_b(k):
     return 4 / period * integrate(
         lambda t, k: input_signal_function(t, t_i, amplitude, period) * sin(omega * k * t),
         0, period / 2, args=(k,))[0]
def main():

    import numpy as np
    import numpy
    from scipy.integrate import quad as integrate

    # Parameters in script
    # --------------------------------------------------------------------------
    clouds = ('taurus', 'perseus',)# 'california')
    dec_ranges = ((35, 20), (35, 24), (44, 35))
    ra_ranges = ((80, 60), (60, 40), (70, 40))
    tspin_threshold = 200.0

    # Data locations in script
    # --------------------------------------------------------------------------
    filedir = '/d/bip3/ezbc/multicloud/data/cnm_data/heiles03/'

    # Read the data
    source_table = read_data(filedir + 'heiles03_sources.tsv')
    param_table = read_data(filedir + 'heiles03_fit_params.tsv')

    # Extract headers and data
    source_cols = source_table.colnames
    source_data = np.asarray(source_table._data)
    param_cols = param_table.colnames
    param_data = np.asarray(param_table._data)

    # Extract source names and RA / Dec
    source_decs = np.empty(source_data.shape)
    source_ras = np.empty(source_data.shape)
    source_names = ['' for x in range(source_data.shape[0])]
    source_names = np.empty(source_data.shape, dtype=object)

    for i, source in enumerate(source_data):
        source_decs[i] = source[source_cols.index('_DE.icrs')]
        source_ras[i] = source[source_cols.index('_RA.icrs')]
        source_names[i] = source[source_cols.index('Name')]

    # Find T_cnm temperatures for each cloud
    cloud_counts_list = []
    cloud_bins_list = []
    cloud_t_cnm_lists = []
    weights_list = []

    for i, cloud in enumerate(clouds):
    	dec_range = dec_ranges[i]
    	ra_range = ra_ranges[i]
        # Choose only sources within RA and Dec range
        indices = np.where((source_decs > dec_range[1]) &\
                           (source_decs < dec_range[0]) &\
                           (source_ras > ra_range[1]) &\
                           (source_ras < ra_range[0])
                           )[0]

        cloud_decs = source_decs[indices]
        cloud_ras = source_ras[indices]
        cloud_sources = source_names[indices]

        # Get the spin temperatures of chosen sources
        t_cnm_list = []
        cloud_t_cnm_list = []
        source_list = []
        weights = []

        for j in xrange(len(param_data)):

            tspin = param_data[j][param_cols.index('Tspin')]
            tau_error = param_data[j][param_cols.index('e_tau')]
            if param_data[j][param_cols.index('Name')] in cloud_sources:
                if tspin < tspin_threshold and tau_error != 0.0:
                    cloud_t_cnm_list.append(tspin)

                    Tpeak = param_data[j][param_cols.index('Tpeak')]
                    DelV = param_data[j][param_cols.index('DelV')]
                    VLSR = param_data[j][param_cols.index('VLSR')]

                    T_integ = integrate(gaussian,
                                        -1000,
                                        1000,
                                        args=(Tpeak,
                                              DelV/2.0,
                                              VLSR))

                    weights.append(T_integ[0])

            if tspin < tspin_threshold:
                t_cnm_list.append(tspin)

        if cloud=='perseus':
        	bins=3
        else:
        	bins=8

        cloud_counts, cloud_bins = np.histogram(cloud_t_cnm_list, bins=bins)
        cloud_counts = cloud_counts / np.sum(cloud_counts, dtype=numpy.float)
        cloud_counts = np.append(cloud_counts, 0)

        cloud_counts_list.append(cloud_counts)
        cloud_bins_list.append(cloud_bins)
        cloud_t_cnm_lists.append(cloud_t_cnm_list)
        weights_list.append(weights)

        print cloud, np.median(cloud_t_cnm_list), 'Median CNM temp [K]'
        print cloud, np.average(cloud_t_cnm_list,
                                weights=weights), 'Intensity-weighted mean CNM temp [K]'

    print weights, cloud_t_cnm_list

    global_counts, global_bins = np.histogram(t_cnm_list, bins=20)
    global_counts = global_counts / np.sum(global_counts, dtype=numpy.float)
    global_counts = np.append(global_counts, 0)

    # Plot the spin temperatures
    plot_spin_temps(cloud_counts_list, cloud_bins_list=cloud_bins_list,
            global_spin_temps=global_counts, global_bins=global_bins,
            clouds=clouds,
            filename=\
                '/d/bip3/ezbc/multicloud/figures/heiles03_spin_temp_hist.png')
Esempio n. 58
0
def get_integrated_conductivity(material, T1, T2):
  model, Tlow, Thigh = conductivity_model(material)

  if type(T1) in [list, np.ndarray]:
    T1 = np.array(T1)

    if (T1 < Tlow).sum() or (T1 > Thigh).sum():
      pass
      #print 'WARNING: Low temperatures are outside model\'s range of ' \
      #      + 'validity: %.3f - %.3f K' %(Tlow,Thigh)

    if type(T2) in [list, np.ndarray]:
      T2 = np.array(T2)

      if (T2 < Tlow).sum() or (T2 > Thigh).sum():
        pass
        #print 'WARNING: High temperatures are outside model\'s range of ' \
        #      + 'validity: %.3f - %.3f K' %(Tlow,Thigh)

      K = np.zeros_like(T1)

      for i in range(len(T1)):
        K[i] = integrate(model, T1[i], T2[i],  epsabs = 0.0, epsrel = 1E-7)[0]

      return K

    else:
      K = np.zeros_like(T1)

      for i in range(len(T1)):
        K[i] = integrate(model, T1[i], T2,  epsabs = 0.0, epsrel = 1E-7)[0]

      return K

  elif type(T2) in [list, np.ndarray]:
    T2 = np.array(T2)

    if (T2 < Tlow).sum() or (T2 > Thigh).sum():
      pass
      #print 'WARNING: High temperatures are outside model\'s range of ' \
      #      + 'validity: %.3f - %.3f K' %(Tlow,Thigh)

    K = np.zeros_like(T2)

    for i in range(len(T2)):
      K[i] = integrate(model, T1, T2[i],  epsabs = 0.0, epsrel = 1E-7)[0]

    return K

  else:
    if T1 < Tlow or T1 > Thigh:
      pass
      #print 'WARNING: Low Temperature is outside model\'s range of validity: ' \
      #      + '%.3f - %.3f K' %(Tlow,Thigh)

    if T2 < Tlow or T2 > Thigh:
      pass
      #print 'WARNING: High Temperature is outside model\'s range of ' \
      #      + 'validity: %.3f - %.3f K' %(Tlow,Thigh)

    return integrate(model, T1, T2,  epsabs = 0.0, epsrel = 1E-7)[0]
Esempio n. 59
0
 def __get_sr_t_no_o4(self):
     new_matrix = self.__matrix[:self.m+1, :self.m+1]
     ftu = sum([sum([self.__matrix[i, j] for j in range(self.m+1, self.m+self.n+1)]) for i in range(self.m+1)])
     tu = integrate(lambda x: x * ftu, 0, np.inf)
     return tu[0]