Exemplo n.º 1
0
def energy(powers, default_timestep=8):
    """Compute the energy associated to a list of measures (in W)
    and associated timestamps (in s).
    """
    energy = {"night_rate": 0, "day_rate": 0, "value": 0}
    if len(powers) == 1:
        if powers[0].night_rate == 1:
            energy["night_rate"] = powers[0].value / 1000 * default_timestep / 3600
        else:
            energy["day_rate"] = powers[0].value / 1000 * default_timestep / 3600
        energy["value"] = energy["day_rate"] + energy["night_rate"]
    else:
        x = []
        day_rate = []
        night_rate = []
        for i in powers:
            x.append(i.timestamp)
            if i.night_rate == 1:
                night_rate.append(i.value)
                day_rate.append(0)
            else:
                day_rate.append(i.value)
                night_rate.append(0)
        energy["night_rate"] = numpy.trapz(night_rate, x) / 1000 / 3600
        energy["day_rate"] = numpy.trapz(day_rate, x) / 1000 / 3600
        energy["value"] = energy["day_rate"] + energy["night_rate"]
    return energy
Exemplo n.º 2
0
def particlesInVolumeLogNorm(vol_frac, mu, sigma, particle_diameters):
    '''
    Function that calculates particle densities in a volume element.
    The particles are diameters are log-normally distributed (sigma, mu)
    and they have a given volume fraction.
    '''

    D = particle_diameters

    # Calculate particle density(particles per um ^ 3)
    N = lognorm(sigma, scale=np.exp(mu))
    # Weight factors of each particle size
    pdf = N.pdf(D)

    # Volume of particle having radius R[m ^ 3]
    Vsph = 4.0 / 3.0 * np.pi * (D / 2.0) ** 3.0

    # Particle volumes multiplied with weight factors = > volume distribution
    WV = pdf * Vsph

    # Total volume of the volume distribution
    Vtot = np.trapz(WV, D)
    # Number of particles in um ^ 3
    n_part = vol_frac / Vtot

    print('Number of particles in cubic micrometer = %.18f' % n_part)

    # Check, should give the volume fraction in %
    print("Volume fraction was: %.1f %%" %
          (np.trapz(n_part * pdf * Vsph, D) * 100))
    bins = pdf * (D[1] - D[0])
    # print(bins.sum())
    return(n_part * bins)
Exemplo n.º 3
0
def averageAroundMidpoints(x,y,newX):
	if x.size != y.size:
		raise ValueError("X and Y have to be the same size")
	if newX.size > x.size:
		raise ValueError("newX has to have fewer elements than X")
	newY = []

	endpointA = x[0]
	endpointB = newX[0] + (newX[1]-newX[0])/2.0

	interpolatorFunc = interp1d(x,y)

	s = x < endpointB
	yends = interpolatorFunc([endpointA,endpointB])
	newY.append(np.trapz( np.concatenate( ([yends[0]],y[s],[yends[1]]) ) , np.concatenate( ([endpointA],x[s],[endpointB]) ) )/(endpointB-endpointA))
	#print "Endpoints: %s"%( [endpointA,endpointB] )
	for i in range(1,newX.size-1):
		endpointA = newX[i] - (newX[i]-newX[i-1])/2.0
		endpointB = newX[i] + (newX[i+1]-newX[i])/2.0
		s = np.logical_and(x >= endpointA,x < endpointB )
		yends = interpolatorFunc([endpointA,endpointB])
		newY.append(np.trapz( np.concatenate( ([yends[0]],y[s],[yends[1]]) ) , np.concatenate( ([endpointA],x[s],[endpointB]) ) )/(endpointB-endpointA))
		#print "Endpoints: %s"%( [endpointA,endpointB] )

	endpointA = newX[-1] - (newX[-1] - newX[-2])/2.0
	endpointB = x[-1]
	#print "Endpoints: %s"%( [endpointA,endpointB] )
	s = x >= endpointA
	yends = interpolatorFunc([endpointA,endpointB])
	newY.append(np.trapz( np.concatenate( ([yends[0]],y[s],[yends[1]]) ) , np.concatenate( ([endpointA],x[s],[endpointB]) ) )/(endpointB-endpointA))

	return np.array(newY)
Exemplo n.º 4
0
def synchroints(sdip, xtwissdip, disperdip, rho, E0):
    # SOME SYNCHRTRON CONSTANTS
    hbar = hb/qe
    Cq = 55*hbar*cl/32/sqrt(3)/E0            # Chao: 3.8319e-13 m
    Ca = cl*re/3/E0**3                       # Chao: 2.113e-24 m²/(eV)³/s
    Cy = 4*pi/cl*Ca                          # Chao: 8.846e-32 m/(eV)³

    # H FUNCTION: H(s) = beta*D'^2 + 2*alpha*D*D' + gamma*D^2
    Hsx = (xtwissdip[1, 1, :]*disperdip[0, :]**2
           - 2*xtwissdip[0, 1, :]*disperdip[0, :]*disperdip[1, :]
           + xtwissdip[0, 0, :]*disperdip[1, :]**2)

    # SYNCHROTRIN INTEGRALS
    SYNIN1 = trapz(disperdip[0, :], sdip)/rho
    SYNIN2 = 2*pi/rho
    SYNIN3 = 2*pi/rho**2
    SYNIN4x = SYNIN1/rho/rho
    SYNIN4y = 0
    SYNIN5x = trapz(Hsx, sdip)/rho/rho/rho

    # DAMPING PARTITION NUMBERS
    Jx = 1 - SYNIN4x/SYNIN2
    Jy = 1 - SYNIN4y/SYNIN2
    Js = 2 + (SYNIN4x+SYNIN4y)/SYNIN2
    return SYNIN1, SYNIN2, SYNIN3, SYNIN4x, SYNIN4y, SYNIN5x, Jx, Jy, Js, Cq, Ca
Exemplo n.º 5
0
    def beam_phase_sharpWindow(self):
        '''
        *Beam phase measured at the main RF frequency and phase. The beam is 
        averaged over a window. The coefficients of sine and cosine components
        determine the 
        beam phase, projected to the range -Pi/2 to 3/2 Pi. Note that this beam
        phase is already w.r.t. the instantaneous RF phase.*
        '''    
        
        # Main RF frequency at the present turn
        omega_rf = self.rf_station.omega_rf[0,self.rf_station.counter[0]]
        phi_rf = self.rf_station.phi_rf[0,self.rf_station.counter[0]]
        
        if self.alpha != 0.0:
            left_boundary = self.profile.bin_centers \
                >= self.time_offset - np.pi/omega_rf
            right_boundary = self.profile.bin_centers \
                <= -1/self.alpha + self.time_offset - 2*np.pi/omega_rf
            indexes = left_boundary * right_boundary
        else:
            indexes = np.ones(self.profile.n_slices, dtype=bool)
        
        # Convolve with window function
        scoeff = np.trapz( np.sin(omega_rf*self.profile.bin_centers[indexes]\
                                   + phi_rf) \
                           *self.profile.n_macroparticles[indexes],
                           dx=self.profile.bin_size )
        ccoeff = np.trapz( np.cos(omega_rf*self.profile.bin_centers[indexes]\
                                   + phi_rf) \
                           *self.profile.n_macroparticles[indexes],
                           dx=self.profile.bin_size )

        # Project beam phase to (pi/2,3pi/2) range
        self.phi_beam = np.arctan(scoeff/ccoeff) + np.pi
Exemplo n.º 6
0
def list_of_tuples_rm_flat_avg_signal(ret_values,lot_list,average_signal,discr_coefficient):
	oper_list=list()
	if len(ret_values[0,:]) >= len(average_signal):
		v_length=len(average_signal)
	else:
		v_length=len(ret_values[0,:])
	for x in ret_values:
		oper_list.append(np.trapz(np.nan_to_num(np.abs(x[:v_length])),x=np.arange(v_length)))

	i=lot_list[0][1][0]
	lot_rm_index=list()
	lotcopy=list()
	count=0
	#rmcount=0
	#rescaling
	average_signal=average_signal[:v_length]
	#integrating average signal
	avg_integral=np.trapz(np.nan_to_num(np.abs(average_signal)),x=np.arange(v_length))

	for a in lot_list:
		j=a[1][1]
		if ( np.abs(avg_integral - oper_list[j])/oper_list[j] > discr_coefficient) :
			lot_rm_index.append(count)
			#rmcount+=1
			count+=1
			continue
		lotcopy.append(a) # czyli jesli zlapie na coefficent, to nie wpisze do nowej tabeli dixlist, a jesli nie zlapie to po prostu przepisze go (ten row ze zgadzajacym sie oper value)
		count+=1
	#print 'Removed a total of ', rmcount, ' signals'
	#print 'Reached count:', count
	return lotcopy
Exemplo n.º 7
0
 def __init__(self, fct, makenorm=True, makeunlog=False, fct_log=False,
              npts=1000, *args, **kwargs):
     if not callable(fct):
         raise Exception("Not callable")
     self.fct = fct
     self.args = args
     self.kwargs = kwargs
     # apply exp on a ln-function at callback or not
     self.makeunlog = makeunlog is True
     # renorm at callback or not
     self.makenorm = makenorm is True
     # whether to add or divide the normalization
     self.fct_log = fct_log is not False and makeunlog is False
     self.prior_log = kwargs.get('prior_log', False)  # jeffrey
     if kwargs.get('prior_bounds', None) is None:
         raise Exception("No prior bounds found")
     if self.prior_log:
         self.x = np.logspace(np.log(kwargs.get('prior_bounds')[0]),
                              np.log(kwargs.get('prior_bounds')[1]),
                              npts,
                              base=np.e)
     else:
         self.x = np.linspace(kwargs.get('prior_bounds')[0],
                              kwargs.get('prior_bounds')[1],
                              npts)
     if makenorm is True:
         resfct = self.fct(self.x, *self.args, **self.kwargs)
         if self.fct_log:
             self.normval = np.log(np.trapz(np.exp(resfct), self.x))
         elif self.makeunlog:
             self.normval = np.trapz(np.exp(resfct), self.x)
         else:
             self.normval = np.trapz(resfct, self.x)
Exemplo n.º 8
0
def writeVpEq(par, tstart):
    """
    This function computes the time-averaged surface zonal flow (and Rolc) and
    format the output

    >>> # Reads all the par.* files from the current directory
    >>> par = MagicTs(field='par', iplot=False, all=True)
    >>> # Time-average
    >>> st = writeVpEq(par, tstart=2.1)
    >>> print(st)

    :param par: a :py:class:`MagicTs <magic.MagicTs>` object containing the par file
    :type par: :py:class:`magic.MagicTs`
    :param tstart: the starting time of the averaging
    :type tstart: float
    :returns: a formatted string
    :rtype: str
    """
    mask = np.where(abs(par.time - tstart) == min(abs(par.time - tstart)), 1, 0)
    ind = np.nonzero(mask)[0][0]
    fac = 1.0 / (par.time.max() - par.time[ind])
    avgReEq = fac * np.trapz(par.reEquat[ind:], par.time[ind:])
    roEq = avgReEq * par.ek * (1.0 - par.radratio)
    avgRolC = fac * np.trapz(par.rolc[ind:], par.time[ind:])
    st = "%10.3e%5.2f%6.2f%11.3e%11.3e%11.3e" % (par.ek, par.strat, par.pr, par.ra, roEq, avgRolC)
    return st
Exemplo n.º 9
0
def test_spectra_get_flux_contributions():
    timestepmin = 40
    timestepmax = 80
    dfspectrum = at.spectra.get_spectrum(
        specfilename, timestepmin=timestepmin, timestepmax=timestepmax, fnufilterfunc=None)

    integrated_flux_specout = np.trapz(dfspectrum['f_lambda'], x=dfspectrum['lambda_angstroms'])

    specdata = pd.read_csv(specfilename, delim_whitespace=True)
    arraynu = specdata.loc[:, '0'].values
    arraylambda_angstroms = const.c.to('angstrom/s').value / arraynu

    contribution_list, array_flambda_emission_total = at.spectra.get_flux_contributions(
        modelpath, timestepmin=timestepmin, timestepmax=timestepmax)

    integrated_flux_emission = -np.trapz(array_flambda_emission_total, x=arraylambda_angstroms)

    # total spectrum should be equal to the sum of all emission processes
    print(f'Integrated flux from spec.out:     {integrated_flux_specout}')
    print(f'Integrated flux from emission sum: {integrated_flux_emission}')
    assert math.isclose(integrated_flux_specout, integrated_flux_emission, rel_tol=4e-3)

    # check each bin is not out by a large fraction
    diff = [abs(x - y) for x, y in zip(array_flambda_emission_total, dfspectrum['f_lambda'].values)]
    print(f'Max f_lambda difference {max(diff) / integrated_flux_specout}')
    assert max(diff) / integrated_flux_specout < 2e-3
Exemplo n.º 10
0
    def get_entropy(self, temperature, verbose=True):
        """Returns the entropy, in eV/K, of crystalline solid
        at a specified temperature (K)."""

        self.verbose = verbose
        write = self._vprint
        fmt = '%-15s%13.7f eV/K%13.4f eV'
        if self.formula_units == 0:
            write('Entropy components at '
                  'T = %.2f K,\non a per-unit-cell basis:' % temperature)
        else:
            write('Entropy components at '
                  'T = %.2f K,\non a per-formula-unit basis:' % temperature)
        write('=' * 49)
        write('%15s%13s     %13s' % ('', 'S', 'T*S'))

        omega_e = self.phonon_energies
        dos_e = self.phonon_DOS
        if omega_e[0] == 0.:
            omega_e = np.delete(omega_e, 0)
            dos_e = np.delete(dos_e, 0)

        B = 1. / (units.kB * temperature)
        S_vib = (omega_e / (temperature * (np.exp(omega_e * B) - 1.))
                 - units.kB * np.log(1. - np.exp(-omega_e * B)))
        if self.formula_units == 0:
            S = np.trapz(S_vib * dos_e, omega_e)
        else:
            S = np.trapz(S_vib * dos_e, omega_e) / self.formula_units

        write('-' * 49)
        write(fmt % ('S', S, S * temperature))
        write('=' * 49)
        return S
Exemplo n.º 11
0
def avgField(time, field, tstart=None, std=False):
    """
    This subroutine computes the time-average (and the std) of a time series

    >>> ts = MagicTs(field='misc', iplot=False, all=True)
    >>> nuavg = avgField(ts.time, ts.topnuss, 0.35)
    >>> print(nuavg)

    :param time: time
    :type time: numpy.ndarray
    :param field: the time series of a given field
    :type field: numpy.ndarray
    :param tstart: the starting time of the averaging
    :type tstart: float
    :param std: when set to True, the standard deviation is also calculated
    :type std: bool
    :returns: the time-averaged quantity
    :rtype: float
    """
    if tstart is not None:
        mask = np.where(abs(time - tstart) == min(abs(time - tstart)), 1, 0)
        ind = np.nonzero(mask)[0][0]
    else:  # the whole input array is taken!
        ind = 0
    fac = 1.0 / (time[-1] - time[ind])
    avgField = fac * np.trapz(field[ind:], time[ind:])

    if std:
        stdField = np.sqrt(fac * np.trapz((field[ind:] - avgField) ** 2, time[ind:]))
        return avgField, stdField
    else:
        return avgField
Exemplo n.º 12
0
  def massLossRate(self, dat, phToSHow = 1):

     #inner boundary
     mdot_a = nm.zeros(dat.nz)
     mdot_a[:] = 2.*nm.pi* dat.Rsc*dat.x[dat.js]* dat.Mx[:, phToSHow,dat.js]*dat.Usc*dat.Dsc                                         
     mdotTot_a =-nm.trapz(mdot_a, dat.z*dat.Rsc)         

     #  outer x boundary
     mout1 = nm.zeros(dat.nz)           
     mout1[:] =  2.*nm.pi* dat.Rsc*dat.x[dat.je]* dat.Mx[:, phToSHow,dat.js]*dat.Usc*self.Dsc                                       
     mout1[:]=[0. if x<0. else x for x in mout1 ]  
     
     mdotTot1 = nm.trapz(mout1, dat.z*dat.Rsc)
     mout2 = nm.zeros(dat.nx)           
     
     #  upper z boundary
     mout2[:] = 2.*nm.pi* dat.Rsc*dat.x[:]*self.Mz[dat.ie, phToSHow,:]*self.Usc*self.Dsc                                    
     mdotTotUp = nm.trapz(mout2, dat.x*dat.Rsc)
     
     mout2[:] = 0.
     #  lower z boundary
     mout2[:] = -2.*nm.pi* dat.Rsc*dat.x[:]*self.Mz[dat.i_s, phToSHow,:]*self.Usc*self.Dsc                           
     mdotTotBot = nm.trapz(mout2, dat.x*dat.Rsc)
          
     return(mdotTot1, mdotTotUp, mdotTotBot, mdotTot_a )
Exemplo n.º 13
0
def get_cracking_history():
    # XK = [0.]  # position of the first crack
    #     sig_c_K = [0., 3.0]
    #     eps_c_K = [0., 3.0 / (vf * Ef + (1 - vf) * Em)]

    XK = []
    sig_c_K = [0.]
    eps_c_K = [0.]

    cs = 20.

    # introduce the predefined cracks
    cracks = cs * np.arange(L / cs + 1)
    d = np.abs(x[:, None] - cracks[None, :])
    min_idx = np.argmin(d, axis=0)
    crack_postion = x[min_idx]

    for i, crack in enumerate(crack_postion):
        XK.append(crack)
        # add a small number to the min matrix strength to avoid numerical
        # problems
        sig_c_K.append(3.0 + i * 1e-8)
        eps_c_K.append(
            np.trapz(cb(get_z_x(x, XK), 3.0 + i * 1e-8)[1], x) / 1000.)  # Eq. (10)

    for sig_c in np.linspace(3, sig_cu, 1000):
        sig_c_K.append(sig_c)
        eps_c_K.append(np.trapz(cb(get_z_x(x, XK), sig_c)[1], x) / 1000.)
    return sig_c_K, eps_c_K
Exemplo n.º 14
0
def kcorr(l_o, fl_o, band, z, axis=0):
    '''

    '''
    # read in filter table
    band_tab = t.Table.read('filters/{}_SDSS.res'.format(band),
                            names=['lam', 'f'], format='ascii')

    # set up interpolator
    band_interp = interp1d(x=band_tab['lam'].quantity.value,
                           y=band_tab['f'], fill_value=0.,
                           bounds_error=False)
    l_o = l_o.to('AA')
    l_e = l_o / (1. + z)

    R_o = band_interp(l_o)
    R_e = band_interp(l_e)

    fl_e_ = interp1d(x=l_e, y=fl_o,
                     bounds_error=False, fill_value='extrapolate')
    fl_o_ = interp1d(x=l_o, y=fl_o,
                     bounds_error=False, fill_value='extrapolate')

    n = np.trapz(x=l_o,
                 y=(R_o * l_o * fl_o_(l_o / (1. + z))),
                 axis=axis)
    d = np.trapz(x=l_e,
                 y=(R_e * l_e * fl_e_(l_e)),
                 axis=axis)

    F = n / d

    K_QR = -2.5 * np.log10(F.to('').value / (1. + z))

    return K_QR
Exemplo n.º 15
0
def qini(y_ordered, percent_targeted = np.linspace(0,1,10), y_rand = None):
    '''
    :param y_ordered: the target variable ordered by uplift model score
    :param percent_targeted:
    :param y_rand: if we want to test a different model, place those ordered outcomes here. If none,
                    plot random outcome
    :return:
    '''

    if y_rand == None:
        y_rand = y_ordered

    nsamp = len(y_ordered)

    lift = []
    random = []
    for pt in percent_targeted:

        n_targ = np.round(nsamp*pt)

        yl = y_ordered[:n_targ].sum()
        yr = y_rand[np.random.randint(0,nsamp,n_targ)].sum()

        lift.append(yl)
        random.append(yr)

    lift = np.array(lift)
    random = np.array(random)

    q = np.trapz(lift, x = percent_targeted) - np.trapz(random, x = percent_targeted)

    return q
Exemplo n.º 16
0
 def test_marginal_likelihood(self):
     """
     Test that the maximum likelihood estimates for the marginal likelihood
     pdfs are correct.
     """
     m = np.linspace(2, 8, 31)
     r = np.linspace(0, 2.0, 21)
     M, R = np.meshgrid(m, r)
     pos = np.empty(M.shape + (2,))
     pos[:, :, 0] = R
     pos[:, :, 1] = M
     mean = np.zeros((2))
     cov = np.zeros((2, 2))
     self.g.process(self.fbdata, 0.5, 0)
     self.g.get_mean_cov(mean, cov)
     rv = stats.multivariate_normal(mean, cov)
     p = rv.pdf(pos)
     mp = np.trapz(p, x=r, axis=0)
     rp = np.trapz(p, x=m, axis=1)
     mp_normed = mp / np.trapz(mp, m)
     rp_normed = rp / np.trapz(rp, r)
     mhat = m[np.argmax(mp_normed)]
     rhat = r[np.argmax(rp_normed)]
     np.testing.assert_almost_equal(mhat, 5.8, decimal=1)
     np.testing.assert_almost_equal(rhat, 1.6, decimal=1)
Exemplo n.º 17
0
def test_AR_LD():
    """

    Test the Levinson Durbin estimate of the AR coefficients against the
    expercted PSD

    """
    arsig,_,_ = utils.ar_generator(N=512)
    avg_pwr = (arsig*arsig.conjugate()).real.mean()
    order = 8
    ak, sigma_v = tsa.AR_est_LD(arsig, order)
    w, psd = tsa.AR_psd(ak, sigma_v)

    # the psd is a one-sided power spectral density, which has been
    # multiplied by 2 to preserve the property that
    # 1/2pi int_{-pi}^{pi} Sxx(w) dw = Rxx(0)

    # evaluate this integral numerically from 0 to pi
    dw = np.pi/len(psd)
    avg_pwr_est = np.trapz(psd, dx=dw) / (2*np.pi)
    npt.assert_almost_equal(avg_pwr, avg_pwr_est, decimal=0)

    # Test for providing the autocovariance as an input:
    ak,sigma_v = tsa.AR_est_LD(arsig, order, utils.autocov(arsig))
    w, psd = tsa.AR_psd(ak, sigma_v)
    avg_pwr_est = np.trapz(psd, dx=dw) / (2*np.pi)
    npt.assert_almost_equal(avg_pwr, avg_pwr_est, decimal=0)
Exemplo n.º 18
0
def domfreq(st, win=None):
    """
    Calculate the dominant frequency of a time series using Douma and Sneider (2006) definition, which is equivalent to a weighted mean, and estimate the variance using the weighted variance formula (amplitudes are weights)
    USAGE
    fd = domfreq(st, win=None)
    INPUTS
    st = obspy stream object, or trace object
    win = tuple of time window in seconds (e.g. win=(3., 20.)) over which to compute dominant frequency, None computes for entire time window
    OUTPUTS
    fd = numpy array of dominant frequencies (Hz)
    """
    st = Stream(st)  # turn into a stream object in case st is a trace
    fd = np.empty(len(st))  # preallocate
    #var = np.empty(len(st))
    #fd2 = np.empty(len(st))
    for i, trace in enumerate(st):
        tvec = maketvec(trace)[:-1]  # Time vector
        vel = trace.data[:-1]
        acc = np.diff(trace.data)*trace.stats.sampling_rate
        if win is not None:
            if win[1] > tvec.max() or win[0] < tvec.min():
                print 'Time window specified not compatible with length of time series'
                return
            vel = vel[(tvec >= win[0]) & (tvec <= win[1])]
            acc = acc[(tvec >= win[0]) & (tvec <= win[1])]
            tvec = tvec[(tvec >= win[0]) & (tvec <= win[1])]
        fd[i] = (np.sqrt(np.trapz(acc**2, tvec)/np.trapz(vel**2, tvec)))/(2*np.pi)
    return fd
Exemplo n.º 19
0
def integrate_3D(x, y, z, xlin, ylin, zlin, csd):
    # TODO: NOT YET WORKING AS EXPECTED
    X, Y, Z = np.meshgrid(xlin, ylin, zlin)
    Nz = zlin.shape[0]
    Ny = ylin.shape[0]

    m = np.sqrt((x - xlin)**2 + (y - ylin)**2 + (z - zlin)**2)
    m[m < 0.00001] = 0.00001
    csd = csd / m
    #print 'CSD:'
    #print csd

    J = np.zeros((Ny, Nz))
    for i in xrange(Nz):
        J[:, i] = np.trapz(csd[:, :, i], zlin)

    #print '1st integration'
    #print J

    Ny = ylin.shape[0]
    I = np.zeros(Ny)
    for i in xrange(Ny):
        I[i] = np.trapz(J[:, i], ylin)

    #print '2nd integration'
    #print I

    norm = np.trapz(I, xlin)
    
    #print '3rd integration'
    #print norm
    
    return norm
Exemplo n.º 20
0
	def solve( self, t, D, VdotO20 ):
		self.Time_Grid = t
		self.Demand = D
		self.VdotO2aerobic = self.solveAerobic( VdotO20 )
		self.VdotO2anaerobic = self.solveAnaerobic()
		self.VO2aerobic = trapz( self.VdotO2aerobic, x=self.Time_Grid )
		self.VO2anaerobic = trapz( self.VdotO2anaerobic, x=self.Time_Grid )
Exemplo n.º 21
0
    def effective_wavelength(self, binned=True, wavelengths=None,
                             mode='efflerg'):
        """Calculate :ref:`effective wavelength <synphot-formula-effwave>`.

        Parameters
        ----------
        binned : bool
            Sample data in native wavelengths if `False`.
            Else, sample binned data (default).

        wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None`
            Wavelength values for sampling.
            If not a Quantity, assumed to be in Angstrom.
            If `None`, ``self.waveset`` or `binset` is used, depending
            on ``binned``.

        mode : {'efflerg', 'efflphot'}
            Flux is first converted to the unit below before calculation:

                * 'efflerg' - FLAM
                * 'efflphot' - PHOTLAM (deprecated)

        Returns
        -------
        eff_lam : `~astropy.units.quantity.Quantity`
            Observation effective wavelength.

        Raises
        ------
        synphot.exceptions.SynphotError
            Invalid mode.

        """
        mode = mode.lower()
        if mode == 'efflerg':
            flux_unit = units.FLAM
        elif mode == 'efflphot':
            warnings.warn(
                'Usage of EFFLPHOT is deprecated.', AstropyDeprecationWarning)
            flux_unit = units.PHOTLAM
        else:
            raise exceptions.SynphotError(
                'mode must be "efflerg" or "efflphot"')

        if binned:
            x = self._validate_binned_wavelengths(wavelengths).value
            y = self.sample_binned(wavelengths=x, flux_unit=flux_unit).value
        else:
            x = self._validate_wavelengths(wavelengths).value
            y = units.convert_flux(x, self(x), flux_unit).value

        num = np.trapz(y * x ** 2, x=x)
        den = np.trapz(y * x, x=x)

        if den == 0.0:  # pragma: no cover
            eff_lam = 0.0
        else:
            eff_lam = abs(num / den)

        return eff_lam * self._internal_wave_unit
Exemplo n.º 22
0
    def set_angle_width(self, width):
        # Calculates the normalizing coefficient for the distribution. From
        # Bringi & Chandrasekar 2001, pg. 68 (which cite Mardia 1972) for an
        # axial distribution.
        self._angle_width = width
        if width <= 0.:
            return
        t = np.linspace(0, 1, 500)
        self._angle_b = 1. / (2. * np.trapz(
            np.exp(-self._angle_width * t**2), t))

        # We neglect here the factor of 1/2pi, as without it:
        # * The graphs on page 69 can be reproduced correctly
        # * The distribution is normalized (sums to 1)
        # Size 90 below *must* match the number returned from T-matrix
        self._angle_vals = np.linspace(np.pi/2, np.pi, TMATRIX_ANGLES)
        self._angle_weights = self._angle_b * (np.exp(
            -self._angle_width * np.cos(self._angle_vals)**2)
            * np.sin(self._angle_vals))

        # Since the distribution and scattering calculations are symmetric, we
        # just calculate the right half of the distribution and double the
        # weights
        self._angle_weights[1:] *= 2
        norm_factor = np.trapz(self._angle_weights, self._angle_vals)
        self._angle_weights /= norm_factor
Exemplo n.º 23
0
    def get_cracking_history():
        XK = []  # position of the first crack
        sig_c_K = [0.]
        eps_c_K = [0.]

        idx_0 = np.argmin(sig_mu_x)
        XK.append(x[idx_0])
        sig_c_0 = sig_mu_x[idx_0] * Ec / Em
        sig_c_K.append(sig_c_0)
        eps_c_K.append(sig_mu_x[idx_0] / Em)

        while True:
            z_x = get_z_x(x, XK)
            sig_c_k, y_i = get_sig_c_K(z_x, sig_c_K[-1])
            if sig_c_k == sig_cu:
                break
            XK.append(y_i)
            sig_c_K.append(sig_c_k)
            eps_c_K.append(
                np.trapz(eps_f(get_z_x(x, XK), sig_c_k), x) / np.amax(x))  # Eq. (10)

            # save the figure
#             plt.figure()
#             plt.plot(x, sig_m(get_z_x(x, XK), sig_c_k))
#             plt.plot(x, sig_mu_x)
#             plt.savefig("D:\\cracking_history\\" + str(len(sig_c_K)) + ".png")
#             plt.close()

        sig_c_K.append(sig_cu)
        eps_c_K.append(np.trapz(eps_f(get_z_x(x, XK), sig_cu), x) / np.amax(x))
        return sig_c_K, eps_c_K
Exemplo n.º 24
0
def dDCR_moments(SED1, SED2, bandpass):
    zenith_angle = np.pi/4.0 * galsim.radians
    R500 = galsim.dcr.get_refraction(500, zenith_angle)

    # analytic first moment differences
    R = lambda w:(galsim.dcr.get_refraction(w, zenith_angle) - R500) / galsim.arcsec
    x1 = np.union1d(bandpass.wave_list, SED1.wave_list)
    x1 = x1[(x1 >= bandpass.blue_limit) & (x1 <= bandpass.red_limit)]
    x2 = np.union1d(bandpass.wave_list, SED2.wave_list)
    x2 = x2[(x2 >= bandpass.blue_limit) & (x2 <= bandpass.red_limit)]
    numR1 = np.trapz(R(x1) * bandpass(x1) * SED1(x1), x1)
    numR2 = np.trapz(R(x2) * bandpass(x2) * SED2(x2), x2)
    den1 = SED1.calculateFlux(bandpass)
    den2 = SED2.calculateFlux(bandpass)

    R1 = numR1/den1
    R2 = numR2/den2
    dR_analytic = R1 - R2

    # analytic second moment differences
    V1_kernel = lambda w:(R(w) - R1)**2
    V2_kernel = lambda w:(R(w) - R2)**2
    numV1 = np.trapz(V1_kernel(x1) * bandpass(x1) * SED1(x1), x1)
    numV2 = np.trapz(V2_kernel(x2) * bandpass(x2) * SED2(x2), x2)
    V1 = numV1/den1
    V2 = numV2/den2
    dV_analytic = V1 - V2

    return dR_analytic, dV_analytic, len(x2)
Exemplo n.º 25
0
def run(data, sweep_rate=20.,
        c_range=(0.4, 0.6), co_range=(0.6, 0.9),
        exe='', graph=True, baseline=False, copy=False):
    # INIT data
    params = {}
    area_CO = None
    area_H = None

    cycle_CO = data.get_scan(1)
    cycle_baseline = data.get_scan(2)

    if data.sweep_rate:
        sr = data.sweep_rate
    else:
        sr = sweep_rate

    # RUN stuff
    if "CO" in exe:
        params["CO"] = CO(cycle_CO, cycle_baseline, c_range, co_range, add_baseline=baseline, copy=copy)
        x, y = params["CO"][0]
        Q_CO = np.trapz(y, x)  # V C / s
        factor_CO = 420e-6 * sr  # C V / s cm2
        area_CO = Q_CO / factor_CO  # cm2
    if "H" in exe:
        params["H"] = H(cycle_CO, cycle_baseline, co_range[0], copy)
        x, y = params["H"]
        Q_H = np.trapz(y, x)  # V C / s
        factor_H = 210e-6 * sr * 1.e-3  # C V / s cm2
        area_H = Q_H / factor_H  # cm2
    if graph:
        plot(cycle_CO, cycle_baseline, paramsCO=params.get("CO"),
             paramsH=params.get("H"), exe=exe, graph=graph)

    return area_CO, area_H
Exemplo n.º 26
0
    def getIQAmplitudes(self):
        """Calculate complex signal from data and reference"""
        # get parameters
        dFreq = self.getValue('Modulation frequency')
        skipStart = self.getValue('Skip start')
        nSegment = int(self.getValue('Number of segments'))
        skipIndex = int(round(skipStart/self.dt))
        nTotLength = self.lTrace[0].size
        length = 1 + int(round(self.getValue('Length')/self.dt))
        length = min(length, nTotLength/nSegment-skipIndex)
        bUseRef = bool(self.getValue('Use Ch2 as reference'))
        # define data to use, put in 2d array of segments
        vData = np.reshape(self.lTrace[0], (nSegment, nTotLength/nSegment))
        # calculate cos/sin vectors, allow segmenting
        vTime = self.dt * (skipIndex + np.arange(length, dtype=float))
        vCos = np.cos(2*np.pi * vTime * dFreq)
        vSin = np.sin(2*np.pi * vTime * dFreq)
        # calc I/Q
        dI = 2. * np.trapz(vCos * vData[:,skipIndex:skipIndex+length]) / float(length-1)
        dQ = 2. * np.trapz(vSin * vData[:,skipIndex:skipIndex+length]) / float(length-1)
        signal = dI + 1j*dQ
        if bUseRef:
            vRef = np.reshape(self.lTrace[1], (nSegment, nTotLength/nSegment))
            dIref = 2. * np.trapz(vCos * vRef[:,skipIndex:skipIndex+length]) / float(length-1)
            dQref = 2. * np.trapz(vSin * vRef[:,skipIndex:skipIndex+length]) / float(length-1)
            # subtract the reference angle
            dAngleRef = np.arctan2(dQref, dIref)
            signal /= (np.cos(dAngleRef) + 1j*np.sin(dAngleRef))
#        elif nSegment>1:
#            # return absolute value if segmenting without reference
#            signal = np.abs(signal)
#        signal = np.mean(signal)
        return signal
Exemplo n.º 27
0
    def solve_nonlinear(self, params, unknowns, resids):
        '''a VERY coarse approximation that takes the speed profile given in the original proposal as given. It just serves as a place holder for now. It's better than nothing, but a real analysis is needed here'''
        t1 = (300 - 0) * 1609 / (9.81 * 0.5) / 3600 # time needed to accelerate
        t2 = (555 - 300) * 1609 / (9.81 * 0.5) / 3600 # time needed to accelerate
        t3 = (params['max_velocity'] - 555) * 1609 / (9.81 * 0.5) / 3600 # time needed to accelerate
        #speed profile data from hyperloop alpha proposal, pg43
        dataStart = np.array([
            [0, 0],
            [t1, 300 * 1.609], # (300[mi/h] * 1609[m/mi]) / (9.81[m/s] * 0.5) / 3600[s/h]
            [167, 300 * 1.609],
            [167 + t2, 555 * 1.609], # (555 - 300[mi/h] * 1609[m/mi]) / (9.81[m/s] * 0.5) / 3600[s/h]
            [435, 555 * 1.609],
            [435 + t3, params['max_velocity']]])
        startUp = np.trapz(dataStart[:, 1] / 3600, dataStart[:, 0]) * 1000 # km covered during start up Los Angeles Grapevine
        dataEnd = np.array([
            [0, params['max_velocity']],
            [t3, 555 * 1.609],
            [t3 + 100, 555 * 1.609],
            [t3 + 100 + t2, 300 * 1.609],
            [t3 + 100 + t2 + 400, 300 * 1.609],
            [t3 + 100 + t2 + 400 + t1, 0]])
        windDown = np.trapz(dataEnd[:, 1] / 3600, dataEnd[:, 0]) * 1000 # km covered during wind down along I-580 to SF      
        len_middle = params['tube_length'] - (startUp + windDown)
        time_middle = middleLength / params['max_velocity']
        unknowns['time_mission'] = time_middle + 435 + t3 + t3 + 100 + t2 + 400 + t1

        unknowns['energy'] = (params['pwr_req'] * unknowns['time_mission'] / 3600.0) * (1 + params['pwr_marg']) # convert to hours
Exemplo n.º 28
0
def solve_linalg(k, T, F0, F1, f):
    N, h = len(X), L/len(X)
    I = np.eye(N)
    S,Y = np.meshgrid(X,X)
    abs_func = np.vectorize(apply_abs)
    F0, F1 = partial(F0, k), partial(F1, k)
    G0 = lambda i,j: abs_func(i, j, F0, F0) - b*h
    G1 = lambda i,j: abs_func(i, j, F1, lambda x: -F1(x)) - b*h*h*(i+j-.5)
    A = weight_matrix(
        lambda i,j: (j-i)*G0(-i,j) - G1(-i,j)/h,
        lambda i,j: G1(-i,j)/h - (j-i-1)*G0(-i,j)
    )
    B = weight_matrix(
        lambda i,j: (j+i)*G0(i,j) - G1(i,j)/h,
        lambda i,j: G1(i,j)/h - (j+i-1)*G0(i,j)
    )
    #splot(X, A*T(np.abs(S-Y)/k)/k)
    #splot(X, B*T((S+Y)/k)/k)
    #py.show()
    phi = solve(a*I - A*T(np.abs(S-Y)/k)/k + B*T((S+Y)/k)/k, f(X))
    p_xy = -(k*(T2(0)-T2(1./k)) + np.trapz((T1((L-X)/k) - T1((L+X)/k))*phi, X))*2/a
    Phi = np.outer(phi,np.ones(N))
    Q = np.trapz(T2((L-X)/k)-T2((L+X)/k) + np.trapz((T1(np.abs(S-Y)/k) - T1((S+Y)/k))*Phi, X)/k, X)/2/a
    #splot(X, K(*XX))
    #py.plot(X, phi)
    w = np.vectorize(lambda x: 0 if x==0 else x*np.log(x)/a)
    ww = lambda x: k*x*x*(2*np.log(x)-1)/4/a
    #print >> sys.stderr, k, np.trapz(phi, X), np.trapz(phi - w((L-X)/k), X) + ww(L/k)
    print >> sys.stderr, k, p_xy, np.trapz(phi, X)/2, Q
    #np.savetxt(sys.stdout, np.transpose((X, phi)), fmt='%1.4e')
    return k, p_xy, np.trapz(phi, X)/2, Q
Exemplo n.º 29
0
def get_band_phot( spectrum , bandpass, z, a=1, zero_point=48.6 ):
    import numpy as np

    c = (3.0e18)      #  Angs / s

  #  These conditionals import text files if necessary

    if isinstance( spectrum , type('string') ): spectrum = np.loadtxt(spectrum)
    if isinstance( bandpass , type('string') ): bandpass = np.loadtxt(bandpass)

    interp_flux = np.interp(bandpass[:,0], spectrum[:,0]*(1.0+z),
        spectrum[:,a] )

    weighted_flux = []
    nu_arr = []
    weights = []

    nu_arr = c / bandpass[:,0]
    weighted_flux = interp_flux * bandpass[:,1] * bandpass[:,0]/nu_arr/nu_arr
    weights = bandpass[:,1] / nu_arr

    numer = np.trapz( weighted_flux , x=nu_arr )
    denom = np.trapz( weights , x=nu_arr)

    #return -2.5*np.log10( numer / denom ) + zero_point   # <--- "MAGNITUDE"
    return -2.5*np.log10(numer/denom/zero_point)# - zero_point   # <--- "MAGNITUDE"
Exemplo n.º 30
0
def plotRocCurves(file_legend):
	pylab.clf()
	pylab.figure(1)
	pylab.xlabel('1 - Specificity', fontsize=12)
	pylab.ylabel('Sensitivity', fontsize=12)
	pylab.title("Need for Referral")
	pylab.grid(True, which='both')
	pylab.xticks([i/10.0 for i in range(1,11)])
	pylab.yticks([i/10.0 for i in range(0,11)])
	pylab.tick_params(axis="both", labelsize=15)

	for file, legend in file_legend:
		points = open(file,"rb").readlines()
		x = [float(p.split()[0]) for p in points]
		y = [float(p.split()[1]) for p in points]
		dev = [float(p.split()[2]) for p in points]
		x = [0.0] + x
		y = [0.0] + y
		dev = [0.0] + dev
	
		auc = np.trapz(y, x) * 100
		aucDev = np.trapz(dev, x) * 100

		pylab.grid()
		pylab.errorbar(x, y, yerr = dev, fmt='-')
		pylab.plot(x, y, '-', linewidth = 1.5, label = legend + u" (AUC = {0:0.1f}% \xb1 {1:0.1f}%)".format(auc,aucDev))

	pylab.legend(loc = 4, borderaxespad=0.4, prop={'size':12})
	pylab.savefig("referral/referral-curves.pdf", format='pdf')
Exemplo n.º 31
0
        fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(18, 8))
        ax1.imshow(data[0, :, :, 0], aspect='auto')
        ax2.imshow(mask, aspect='auto')
        ax3.imshow(pred, aspect='auto')

        np.save(pred_dir + fname + '_mask', mask)
        np.save(pred_dir + fname + '_pred', pred)

        plt.subplots_adjust(left=0.04, right=0.99, top=0.99, bottom=0.04)
        plt.savefig(pred_dir + fname + '.jpg', dpi=30)
        plt.close()

        y_true = mask.reshape(-1).astype(int)
        y_score = pred.reshape(-1)
        y_score /= y_score.max()

        recall, precision = prc(y_true, y_score, trsh)
        pr_list.append(np.stack([recall, precision]).T)

        fpr, tpr = rocc(y_true, y_score, trsh)
        roc_list.append(np.stack([fpr, tpr]).T)
        auc_list.append(np.trapz(tpr, fpr))

        mcc_list.append(matthews_corrcoef(y_true, y_score.round()))

    np.save(res_file + '_pr', np.array(pr_list))
    np.save(res_file + '_roc', np.array(roc_list))
    np.save(res_file + '_mcc', np.array(mcc_list))
    print(np.mean(auc_list), np.mean(mcc_list))
Exemplo n.º 32
0
x, p = plot_histogram(energy, weight[:, 0], generated, plot=plt.loglog)
x2, p2 = plot_histogram(energy,
                        weight[:, 1],
                        generated,
                        plot=plt.loglog,
                        new=False,
                        clr="r")
earthskimming = sum(p2) > 0.

plt.xlabel(r"energy, E$_\tau$ (GeV)")
plt.ylabel(r"E$_\tau \times$ rate (a$^{-1}$)")
plt.savefig("tau-energy.png")

plt.figure()
plt.semilogx(x, cumtrapz(p, x, initial=0.) / numpy.trapz(p, x), "k-")
if earthskimming:
    plt.semilogx(x2, cumtrapz(p2, x2, initial=0.) / numpy.trapz(p2, x2), "r-")
plt.xlabel(r"energy limit, E$_\tau$ (GeV)")
plt.ylabel(r"ratio")
plt.savefig("tau-ratio.png")

plot_histogram(theta, weight[:, 0], generated)
plt.xlabel(r"zenith, $\theta_\tau$ (deg)")
plt.ylabel(r"rate (deg$^{-1}$ a$^{-1}$)")
plt.savefig("tau-zenith.png")

plot_histogram(phi, weight[:, 0], generated)
plt.xticks(numpy.linspace(-180., 180., 5))
if earthskimming:
    plt.axis((-200., 200., 0., 2E-02))
Exemplo n.º 33
0
def runOkushiri(par, n):
    # ------------------------------------------------------------------------------
    # Setup computational domain
    # ------------------------------------------------------------------------------
    xleft = 0
    xright = 5.448
    ybottom = 0
    ytop = 3.402

    # rectangular cross mesh
    points, vertices, boundary = anuga.rectangular_cross(
        int(n), int(n), xright - xleft, ytop - ybottom, (xleft, ybottom))

    newpoints = points.copy()

    # make refinement in x direction
    x = np.multiply([0., 0.1, 0.2, 0.335, 0.925, 1.], max(points[:, 0]))
    y = [0., 3., 4.25, 4.7, 5.3, max(points[:, 0])]
    f1 = interp1d(x, y, kind='quadratic')
    newpoints[:, 0] = f1(points[:, 0])

    # make refinement in y direction
    x = np.multiply([0., .125, .3, .7, .9, 1.], max(points[:, 1]))
    y = [0., 1.25, 1.75, 2.15, 2.65, max(points[:, 1])]
    f2 = interp1d(x, y, kind='quadratic')
    newpoints[:, 1] = f2(points[:, 1])

    c = abs(newpoints[:, 0] - 5.0) + .5 * abs(newpoints[:, 1] - 1.95)
    c = 0.125 * c

    points[:, 0] = c * points[:, 0] + (1 - c) * newpoints[:, 0]
    points[:, 1] = c * points[:, 1] + (1 - c) * newpoints[:, 1]

    # create domain
    domain = anuga.Domain(points, vertices, boundary)

    # don't store .sww file
    # domain.set_quantities_to_be_stored(None)

    # ------------------------------------------------------------------------------
    # Initial Conditions
    # ------------------------------------------------------------------------------
    domain.set_quantity('friction', 0.01)  # 0.0
    domain.set_quantity('stage', 0.0)
    domain.set_quantity(
        'elevation',
        filename='/home/rehmemk/git/anugasgpp/Okushiri/data/bathymetry.pts',
        alpha=0.02)

    # ------------------------------------------------------------------------------
    # Set simulation parameters
    # ------------------------------------------------------------------------------
    domain.set_name('output_okushiri')  # Output name
    domain.set_minimum_storable_height(0.001)  # Don't store w < 0.001m
    domain.set_flow_algorithm('DE0')

    # ------------------------------------------------------------------------------
    # Modify input wave
    # ------------------------------------------------------------------------------
    # rescale input parameter
    try:
        dummy = len(par)
    except:
        par = [par]
    par = np.dot(2, par)

    # load wave data
    # shutil.copyfile('boundary_wave_header.txt', 'boundary_wave_input.txt')
    data = np.loadtxt(
        '/home/rehmemk/git/anugasgpp/Okushiri/data/boundary_wave_original.txt',
        skiprows=1)
    t = data[:, 0]
    y = data[:, 1]
    energy = np.trapz(y**2, t)

    # define bumps [create input wave based on parameters]
    def bump(c):
        theta = c[0]
        position = c[1]
        weight = c[2]
        ybump = weight * np.exp(-.5 * (t - position)**2 * theta**-2)
        return ybump

    nbump = len(par)
    residual = y.copy()
    c = np.zeros((nbump, 3))
    for k in range(nbump):
        maxid = np.argmax(np.abs(residual))
        c0 = np.array([1.5, t[maxid], residual[maxid]])

        def cost(c):
            ybump = bump(c)
            cost = np.sqrt(np.mean((ybump - residual)**2))
            return cost

        c[k, :] = fmin(cost, c0, disp=False)
        residual -= bump(c[k, :])

    # deform wave
    ynew = residual.copy()
    for k in range(nbump):
        ynew += par[k] * bump(c[k, :])
    energynew = np.trapz(ynew**2, t)
    ynew = np.sqrt(energy / energynew) * ynew

    # write data
    data[:, 1] = ynew.copy()
    import scipy
    wave_function = scipy.interpolate.interp1d(t,
                                               ynew,
                                               kind='zero',
                                               fill_value='extrapolate')

    # MR: uncomment to plot input wave
    # points = np.linspace(-10, 30, 10000)
    # evals = np.zeros(len(points))
    # for i in range(len(evals)):
    #     evals[i] = wave_function(points[i])
    # plt.figure()
    # # plt.plot(points, evals)
    # # plt.plot(t, residual, 'r')
    # for k in range(nbump):
    #     plt.plot(t, par[k]*bump(c[k, :]), label='bum {}'.format(k))
    # plt.title('Okushiri Input Wave')
    # plt.show()

    # ------------------------------------------------------------------------------
    # Setup boundary conditions
    # ------------------------------------------------------------------------------

    # Create boundary function from input wave [replaced by wave function]

    # Create and assign boundary objects
    Bts = anuga.Transmissive_momentum_set_stage_boundary(domain, wave_function)
    Br = anuga.Reflective_boundary(domain)
    domain.set_boundary({'left': Bts, 'right': Br, 'top': Br, 'bottom': Br})

    # ------------------------------------------------------------------------------
    # Evolve system through time
    # ------------------------------------------------------------------------------

    # area for gulleys
    x1 = 4.85
    x2 = 5.25
    y1 = 2.05
    y2 = 1.85

    # gauges
    gauges = [[4.521, 1.196], [4.521, 1.696], [4.521, 2.196]]

    # index in gulley area
    x = domain.centroid_coordinates[:, 0]
    y = domain.centroid_coordinates[:, 1]
    v = np.sqrt((x - x1) ** 2 + (y - y1) ** 2) + \
        np.sqrt((x - x2) ** 2 + (y - y2) ** 2) < 0.5

    dplotter = Domain_plotter(domain, min_depth=0.001)

    k = 0
    # original number of timesteps is 451
    numTimeSteps = int(
        np.loadtxt(
            '/home/rehmemk/git/anugasgpp/Okushiri/data/numTimeSteps.txt'))
    meanstage = np.nan * np.ones((1, numTimeSteps))
    yieldstep = 0.05
    finaltime = (numTimeSteps - 1) * yieldstep
    meanlayer = 0

    # Do the actual calculation
    # for t in domain.evolve(yieldstep=yieldstep, finaltime=finaltime):
    #     # domain.write_time()

    #     # stage [=height of water]
    #     stage = domain.quantities['stage'].centroid_values[v]
    #     # averaging for smoothness
    #     meanstage[0, k] = np.mean(stage)
    #     # k is time
    #     k += 1

    # # PLOTTING

    # # Make movie of each timestep
    #     dplotter.save_depth_frame()
    # anim = dplotter.make_depth_animation()
    # anim.save('okushiri_%i.mp4' % n)
    # meanlayer = meanstage - meanstage[0, 0]

    # Plot the domain
    plt.figure()
    xya = np.loadtxt(
        '/home/rehmemk/git/anugasgpp/Okushiri/plots/Benchmark_2_Bathymetry.xya',
        skiprows=1,
        delimiter=',')
    X = xya[:, 0].reshape(393, 244)
    Y = xya[:, 1].reshape(393, 244)
    Z = xya[:, 2].reshape(393, 244)
    # Remove the white part of the seismic
    # Steves original code uses cmap('gist_earth')
    from matplotlib.colors import LinearSegmentedColormap
    interval = np.hstack([np.linspace(0.0, 0.3), np.linspace(0.5, 1.0)])
    colors = plt.cm.seismic(interval)
    my_cmap = LinearSegmentedColormap.from_list('name', colors)
    # Multiply heights by 400 so that we getreal scale, not model scale
    N1, N2 = np.shape(Z)
    for n1 in range(N1):
        for n2 in range(N2):
            Z[n1, n2] *= 400
    plt.contourf(X, Y, Z, 20, cmap=my_cmap)
    # plt.title('Bathymetry')
    cbar = plt.colorbar()
    cbar.ax.tick_params(labelsize=colorbarfontsize)
    # cbar.set_label('elevation', rotation=270)
    import matplotlib.patches
    from matplotlib.patches import Ellipse
    # plt.plot(x1, y1, 'o')
    # plt.plot(x2, y2, 'o')
    ellipse = Ellipse(((x2 + x1) / 2., (y1 + y2) / 2.),
                      width=0.5,
                      height=0.2,
                      angle=-20,
                      edgecolor='k',
                      fill=False,
                      label='area of interest',
                      linewidth=4)
    plt.gca().add_patch(ellipse)
    # plt.plot(gauges[0][0], gauges[0][1], 'ok')
    # plt.plot(gauges[1][0], gauges[1][1], 'ok')
    # plt.plot(gauges[2][0], gauges[2][1], 'ok', markersize=8, label='gauge')

    # plt.axis('off')
    plt.legend(loc='upper left', fontsize=legendfontsize)
    plt.gca().tick_params(axis='both', which='major', labelsize=tickfontsize)
    plt.tight_layout()
    # ---------------- hack to get ellpse shaped ellipse in legend------------
    import matplotlib.patches as mpatches
    from matplotlib.legend_handler import HandlerPatch
    colors = ["k"]
    texts = ["area of interest"]

    class HandlerEllipse(HandlerPatch):
        def create_artists(self, legend, orig_handle, xdescent, ydescent,
                           width, height, fontsize, trans):
            center = 0.5 * width - 0.5 * xdescent, 0.5 * height - 0.5 * ydescent
            p = mpatches.Ellipse(xy=center,
                                 width=width + xdescent,
                                 height=height + ydescent)
            self.update_prop(p, orig_handle, legend)
            p.set_transform(trans)
            return [p]

    c = [
        mpatches.Circle((0.5, 0.5),
                        1,
                        facecolor='None',
                        edgecolor='k',
                        linewidth=3) for i in range(len(texts))
    ]
    plt.legend(c,
               texts,
               bbox_to_anchor=(0., 1.),
               loc='upper left',
               ncol=1,
               fontsize=16,
               handler_map={mpatches.Circle: HandlerEllipse()})
    # ----------------------------
    plt.savefig('okushiri_domain.pdf')

    # Plot the triangle mesh
    plt.figure()
    mittelblau = (0. / 255, 81. / 255, 158. / 255)
    plt.triplot(dplotter.triang, linewidth=0.3, color=mittelblau)
    plt.axis('off')
    plt.tight_layout()
    plt.savefig('okushiri_mesh_%i.pdf' % n)
    # Plot the domain and the triangle mesh
    plt.figure()
    plt.tripcolor(dplotter.triang,
                  facecolors=dplotter.elev,
                  edgecolors='k',
                  cmap='gist_earth')
    plt.colorbar()
    plt.tight_layout()
    plt.savefig('okushiri_domainandmesh_%i.pdf' % n)

    # make video from sww file
    # swwplotter = SWW_plotter('output_okushiri.sww', min_depth=0.001)
    # lilo = len(swwplotter.time)
    # for k in range(lilo):
    #     if k % 10 == 0:
    #         print ' '
    #     swwplotter.save_stage_frame(frame=k, vmin=-0.02, vmax=0.1)
    #     print '(', swwplotter.time[k], k, ')',
    # print ' '
    # swwanim = swwplotter.make_stage_animation()
    # swwanim.save('okushiri_fromswwfile.mp4')

    return meanlayer
Exemplo n.º 34
0
    def calculate_mass(A, ri=np.arange(0, 6., 0.05), beta=0.25, r200=2.0, crit=2.2e11, conc1=None, fbr=None):
        """
        calculate_mass(A,ri,r200,conc1=None,crit,beta=None,fbr=None)


        input:
            ri : rgrid values
               
            A : caustic profile values
            
            r200 = 2.0 : critical radius of cluster. Default is 2.0, but advised to take the output r200 and rerun
                        the analysis with this better estimate.

            conc1 = None : concentration of cluster. If None given then calculated from relationship
            
            crit = 2.2e11 : Critical density of the Universe. crit ~ 2.7745946e11*(self.cosmo.h)**2.0*(0.25*(1+clus_z)**3.0 + 0.75)

            beta = 0.2 : Anisotrpy parameter. Default value is 0.2, but a profile can be given that has same rvalues as ri.

            fbr = None : An exact guess of Fbeta by whatever means. Usually not used.
        
        returns:
            mass_info: MassInfo data object
                variables: g_b, conc, f_beta, massprofile, avg_density, r200_est, M200

        """
        "Calculate the mass profile"
        # vdisp = self.gal_vdisp
        G = astconsts.G.value
        solmass = astconsts.M_sun.value
        mass_info = MassInfo()
        r2 = ri[ri >= 0]
        A2 = A[ri >= 0]
        Mpc2km = astunits.Mpc.to(astunits.km)
        sumtot = np.zeros(A2.size)
        # print 'Using beta = %.2f'%(beta)
        if conc1 is None:
            # conc = 4.0*(vdisp/700.0)**(-0.306)
            conc = 5.0 + np.random.normal(0, 2.0)
            if conc <= 0: conc = 5.0
        else:
            conc = conc1
        beta = 0.5 * (ri / (ri + r200 / conc))
        mass_info.g_b = (3 - 2.0 * beta) / (1 - beta)
        if fbr is None:
            f_beta = 0.5 * ((r2 / r200 * conc) ** 2) / ((1 + ((r2 / r200 * conc))) ** 2 * np.log(1 + ((r2 / r200 * conc)))) * mass_info.g_b
            f_beta[0] = 0
            for i in range(A2.size - 1):
                i += 1
                sumtot[i] = np.trapz(f_beta[1:i + 1] * (A2[1:i + 1] * 1000) ** 2, (r2[1:i + 1]) * Mpc2km * 1000)
                # sum[i] = np.trapz((A2[:i+1]*1000)**2,(r2[:i+1])*Mpc2km*1000)
                # sum = integrate.cumtrapz(f_beta*(A2[:f_beta.size]*1000)**2,r2[:f_beta.size]*Mpc2km*1000,initial=0.0)
        else:
            if type(fbr) == float or type(fbr) == int or type(fbr) == np.float64:
                f_beta = np.zeros(A2.size) + fbr * 1.0
            else:
                f_beta = fbr
            f_beta[0] = 0
            for i in range(A2.size - 1):
                i += 1
                sumtot[i] = np.trapz(f_beta[1:i + 1] * (A2[1:i + 1] * 1000) ** 2, (r2[1:i + 1]) * Mpc2km * 1000)
                # sum[i] = np.trapz((A2[:i+1]*1000)**2,(r2[:i+1])*Mpc2km*1000)
                # sum = integrate.cumtrapz(f_beta*(A2[:f_beta.size]*1000)**2,r2[:f_beta.size]*Mpc2km*1000,initial=0.0)
        mass_info.massprofile = sumtot / (G * solmass)
        f_beta_size = f_beta.size
        # return the caustic r200
        mass_info.avg_density = mass_info.massprofile / (4.0 / 3.0 * np.pi * (ri[:f_beta_size]) ** 3.0)
        try:
            # mass_info.r200_est = (ri[:f_beta_size])[np.where(mass_info.avg_density >= 200*crit)[0]+1][-1]
            finterp = interp1d(mass_info.avg_density[::-1], ri[:f_beta_size][::-1])
            mass_info.r200_est = finterp(np.asarray(np.asarray(200 * crit)))
            mass_info.r500_est = finterp(np.asarray(np.asarray(500 * crit)))
        except IndexError:
            mass_info.r200_est = 0.0
            mass_info.r500_est = 0.0
        # mass_info.M200_est = mass_info.massprofile[np.where(ri[:f_beta_size] <= mass_info.r200_est)[0][-1]]
        finterp = interp1d(ri[:f_beta_size], mass_info.massprofile)
        mass_info.M200_est = finterp(mass_info.r200_est)
        mass_info.M500_est = finterp(mass_info.r500_est)
        mass_info.M200 = mass_info.massprofile[np.where(ri[:f_beta_size] <= r200)[0][-1]]
        mass_info.f_beta = f_beta
        mass_info.conc = conc
        return mass_info
Exemplo n.º 35
0
def petrov_real_tim_rk4_mat(phi, mu, r, dr, dt, N, V, int_gas, t_steps, mode):
    # GPE COEFFICIENTS
    if int_gas == 0:
        int_coef = 0
        LHY_coef = 0
    elif int_gas == 1:
        int_coef = -3 * N
        LHY_coef = (5 / 2) * N**(3 / 2)

    # DIFFERENTIAL OPERATORS
    # first order derivative in the form of a sparse matrix (centrally defined)
    Dr = (1 / (2 * dr)) * (-1 * eye(phi.size - 2, phi.size, k=0, dtype=float) +
                           eye(phi.size - 2, phi.size, k=2, dtype=float))
    # second order derivative in the form of a sparse matrix (centrally defined 3-point formula)
    Dr2 = (1 / dr**2) * (eye(phi.size - 2, phi.size, k=0, dtype=float) -
                         2 * eye(phi.size - 2, phi.size, k=1, dtype=float) +
                         eye(phi.size - 2, phi.size, k=2, dtype=float))

    # INITIALISING ARRAYS
    H_KE = np.zeros(phi.size).astype(complex)
    H_LHY = np.zeros(phi.size).astype(complex)
    H_int = np.zeros(phi.size).astype(complex)
    H_trap = np.zeros(phi.size).astype(complex)
    KE = np.zeros(phi.size).astype(complex)
    k1 = np.zeros(phi.size).astype(complex)
    k2 = np.zeros(phi.size).astype(complex)
    k3 = np.zeros(phi.size).astype(complex)
    k4 = np.zeros(phi.size).astype(complex)
    phi = phi.astype(
        complex
    )  # set groundstate wavefunction to now be complex rather than real-valued

    # INITIALISING TIME AND DATA SAVING ARRAYS
    t_save = 100  # save density, phase, etc. every t_save steps
    t = 0  # intial time
    count = 0  # intialise counter
    spacetime = np.zeros((r.size, (t_steps // t_save))).astype(
        complex)  # array to save snapshots of wavefunction
    phase = np.zeros((r.size, (t_steps // t_save))).astype(
        complex)  # array to save snapshots of the real time phase
    t_array = np.zeros((t_steps // t_save))  # array to save time stamps
    mean_r2 = np.zeros((t_steps // t_save))  # observable used here <r^2>
    # swap to a smaller time step in real time
    dt = 0.1 * dr**2

    # invoke breathing mode
    if mode == 1:
        lamb = 1e-4  # small constant
        phi = np.exp(
            1j * lamb * r**2
        ) * phi  # small phase imprint of the form exp(i*lambda*F) where F = r^2 for breathing mode

    for l in range(0, t_steps):
        # k1 CALCULATION
        KE[1:-1] = (2 / r[1:-1]) * (
            Dr @ phi) + Dr2 @ phi  # Kinetic Energy derivatives
        # HAMILTONIAN TERMS
        H_KE[1:-1] = -0.5 * KE[1:-1]  # KE term
        H_LHY[1:-1] = LHY_coef * np.abs(phi[1:-1])**3 * phi[1:-1]  # LHY term
        H_int[1:-1] = int_coef * np.abs(
            phi[1:-1])**2 * phi[1:-1]  # s-wave term
        H_trap[1:-1] = V[1:-1] * phi[1:-1]  # potential term

        k1[1:-1] = -1j * dt * (H_KE[1:-1] + H_trap[1:-1] + H_LHY[1:-1] +
                               H_int[1:-1] - mu * phi[1:-1])

        # Neumann Boundary Conditions
        k1[0] = k1[1]
        k1[-1] = k1[-2]

        # k2 CALCULATION
        KE[1:-1] = (2 / r[1:-1]) * (Dr @ phi) + Dr2 @ phi + 0.5 * (
            (2 / r[1:-1]) * (Dr @ k1) + Dr2 @ k1)  # Kinetic Energy derivatives
        # HAMILTONIAN TERMS
        H_KE[1:-1] = -0.5 * KE[1:-1]  # KE term
        H_LHY[1:-1] = LHY_coef * np.abs(phi[1:-1] + k1[1:-1] / 2)**3 * (
            phi[1:-1] + k1[1:-1] / 2)  # LHY term
        H_int[1:-1] = int_coef * np.abs(phi[1:-1] + k1[1:-1] / 2)**2 * (
            phi[1:-1] + k1[1:-1] / 2)  # s-wave term
        H_trap[1:-1] = V[1:-1] * (phi[1:-1] + k1[1:-1] / 2)  # potential term

        k2[1:-1] = -1j * dt * (H_KE[1:-1] + H_trap[1:-1] + H_LHY[1:-1] +
                               H_int[1:-1] - mu * (phi[1:-1] + k1[1:-1] / 2))

        # Neumann Boundary Conditions
        k2[0] = k2[1]
        k2[-1] = k2[-2]

        # k3 CALCULATION
        KE[1:-1] = (2 / r[1:-1]) * (Dr @ phi) + Dr2 @ phi + 0.5 * (
            (2 / r[1:-1]) * (Dr @ k2) + Dr2 @ k2)  # Kinetic Energy derivatives
        # HAMILTONIAN TERMS
        H_KE[1:-1] = -0.5 * KE[1:-1]  # KE term
        H_LHY[1:-1] = LHY_coef * np.abs(phi[1:-1] + k2[1:-1] / 2)**3 * (
            phi[1:-1] + k2[1:-1] / 2)  # LHY term
        H_int[1:-1] = int_coef * np.abs(phi[1:-1] + k2[1:-1] / 2)**2 * (
            phi[1:-1] + k2[1:-1] / 2)  # s-wave term
        H_trap[1:-1] = V[1:-1] * (phi[1:-1] + k2[1:-1] / 2)  # potential term

        k3[1:-1] = -1j * dt * (H_KE[1:-1] + H_trap[1:-1] + H_LHY[1:-1] +
                               H_int[1:-1] - mu * (phi[1:-1] + k2[1:-1] / 2))

        # Neumann Boundary Conditions
        k3[0] = k3[1]
        k3[-1] = k3[-2]

        # k4 CALCULATION
        KE[1:-1] = (2 / r[1:-1]) * (Dr @ phi) + Dr2 @ phi + (
            (2 / r[1:-1]) * (Dr @ k3) + Dr2 @ k3)  # Kinetic Energy derivatives
        # HAMILTONIAN TERMS
        H_KE[1:-1] = -0.5 * KE[1:-1]  # KE term
        H_LHY[1:-1] = LHY_coef * np.abs(phi[1:-1] + k3[1:-1])**3 * (
            phi[1:-1] + k3[1:-1])  # LHY term
        H_int[1:-1] = int_coef * np.abs(phi[1:-1] + k3[1:-1])**2 * (
            phi[1:-1] + k3[1:-1])  # s-wave term
        H_trap[1:-1] = V[1:-1] * (phi[1:-1] + k3[1:-1])  # potential term

        k4[1:-1] = -1j * dt * (H_KE[1:-1] + H_trap[1:-1] + H_LHY[1:-1] +
                               H_int[1:-1] - mu * (phi[1:-1] + k3[1:-1]))

        # Neumann Boundary Conditions
        k4[0] = k4[1]
        k4[-1] = k4[-2]

        # FINAL RUNGE-KUTTA STEP
        phi[1:-1] = phi[1:-1] + (1. / 6) * (k1[1:-1] + 2 * k2[1:-1] +
                                            2 * k3[1:-1] + k4[1:-1])

        # NEUMANN BOUNDARY CONDITIONS
        # phi(j+1) - phi(j) = 0
        phi[0] = phi[1]
        phi[-1] = phi[-2]

        # SAVING DATA AND OBSERVABLES
        if (l % t_save == 0):
            spacetime[:, l // t_save] = phi  # save current wavefunction
            phase[:, l // t_save] = np.angle(phi)  # save current phase
            t_array[l // t_save] = t  # save current time
            mean_r2[l // t_save] = 4 * pi * np.trapz(
                r**4 * np.abs(phi)**2) * dr  # save current observable <r^2>

        # ITERATE TIME
        t = t + dt

        # ITERATE COUNTER
        count = count + 1

    return phi, spacetime, t_array, mean_r2
Exemplo n.º 36
0
def Equilibrium_Temperature(c_abs,
                            lbda,
                            F_nu_r,
                            F_dust_emi,
                            F_dust_sca,
                            scatter=False):
    '''
    Subroutine to obtain equilibrium temperature as a function of dust 
    size and shell radius. Dust emission is also returned.

    Parameters
    ----------
    c_abs : 2-D array
        c_abs[i, j] means absorption cross section of i-th size at j-th 
        wavelength. (unit: cm^2)
    lbda : array_like
        Wavelength in cm.
    F_nu_r : 2-D array
        F_nu_r[i, j] means central star flux at j-th wavelength received 
        by grains in i-th layer. (unit: erg/s/cm^3)
    F_dust_emi, F_dust_sca : 2-D array
        Dust emission and scattering flux. Both have the same dimensions
        as F_nu_r. (unit: erg/s/cm^3)
    scatter: bool
        If True, F_dust_sca will be added into F_dust_r. Default `False`.

    Returns
    -------
    T_dust : 2-D array
        Equilibrium temperature of grains of j-th size in i-th layer
        is stored in T_dust[i, j]. (unit: K)
    F_nu_r_a_dust : 3-D array
        Wavelength dependent flux from grains of j-th size in i-th layer.
        Wavelength is the third axis. (unit: erg/s/cm^3)
    '''
    from scipy.optimize import brentq

    F_nu_r_tot = F_nu_r + F_dust_emi

    if scatter:
        F_nu_r_tot += F_dust_sca

    heating_rate_nu = np.einsum('ij, kj -> kij', c_abs,
                                F_nu_r_tot)  # erg/s @ different wavelengths
    # Intergrate wavelengths
    heating_rate = np.trapz(heating_rate_nu, lbda, axis=2)  # erg/s

    # Initialization
    T_dust = np.zeros([heating_rate.shape[0], heating_rate.shape[1]])
    F_nu_r_a_dust = np.zeros(
        [heating_rate.shape[0], heating_rate.shape[1], lbda.shape[0]])
    for i in range(heating_rate.shape[0]):  # Iter over shell radius
        for j in range(heating_rate.shape[1]):  # Iter over dust size
            # Look for zero point
            T_dust[i, j] = brentq(delta_rate,
                                  args=(lbda, heating_rate[i, j], c_abs[j, :]),
                                  a=10,
                                  b=4000,
                                  xtol=1e-8,
                                  maxiter=100,
                                  disp=True)
            F_nu_r_a_dust[i, j, :] = np.pi * blackbody(lbda, T_dust[i, j])

    return T_dust, F_nu_r_a_dust
Exemplo n.º 37
0
 def satellite_intensity(self,nu,zs,mhalos):
     satellite_masses=mhalos.copy()[::4]   # ::4 or ::2 or :: depending on how many masses you have, just to make the integration faster by taking lower resolution
 
     dndms=self.subhalo_mass_function(satellite_masses,mhalos[:,np.newaxis])
     
     return np.trapz((dndms[:,:,np.newaxis]*self.Scentral(nu,zs,satellite_masses[:,np.newaxis])[np.newaxis,:,:]),np.log(satellite_masses),axis=1)
Exemplo n.º 38
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import netCDF4 as nc
import sys
import numpy as np

if __name__ == "__main__":
    B0 = 0.005  # Surface buoyancy flux in simulation units.
    N0 = 3.**0.5  # Buoyancy frequency in the free atmosphere, in simulation units.
    L0 = (B0 / N0**3.)**0.5
    #
    sdata = nc.Dataset(sys.argv[1], 'r')
    sdata.set_auto_mask(False)
    it = sdata.variables['it'][:]
    t = sdata.variables['t'][:]
    y = sdata.variables['y'][:]
    b = sdata.variables['rS'][:, :]
    b_bg = N0**2.0 * y
    z_enc = np.sqrt(
        np.trapz(b[:, :] - b_bg[None, :], y, axis=1) * 2.0 / N0**2.0)
    #
    for i in range(np.size(it)):
        print(it[i], t[i], z_enc[i] / L0)
Exemplo n.º 39
0
def column_density_grid(r_grid,
                        theta_grid,
                        radial_dist,
                        normalization,
                        dist_params,
                        delta=0.1):
    '''
    Subroutine to obtain optical depth that outgoing light ray
    experiences from each radial layer.

    Parameters
    ----------
    r_grid : array_like
        Shell radius in cm.
    theta_grid : array_like
        Angle.
    radial_dist : function
        Radial number density distribution n(r).
    normalization : Scalar
        Normalization factor for radial_dist which guarantees 
        ∫ n(r) * dr = 1.
    dist_params : array_like
        Distribution parameters passed into radial_dist.
    delta : Scalar
        Differential element along |r - r'| in au. Default 0.1.

    Returns
    -------
    N_grid_normed : 3-D array
        N_grid_normed[i, j, k] stores unit column density along |r - r'| where
        r and r' are vectors of length r_grid[i] and r_grid[j], respectively.
        The angle between two vectors is theta_grid[j].
    r_to_r : 3-D array
        |r - r'|.
    '''

    # Convert delta into CGS unit
    au = 1.49597871e+13  #
    delta *= au

    # Scaling
    scaling_factor = r_grid.min()
    r_grid_scaled = r_grid / scaling_factor
    delta_scaled = delta / scaling_factor

    # Initialize tau grid
    N_grid_scaled = np.zeros(
        (r_grid.shape[0], theta_grid.shape[0], r_grid.shape[0]))
    # r to r'
    r_to_r = np.zeros((r_grid.shape[0], theta_grid.shape[0], r_grid.shape[0]))

    # Cos(theta)
    cos_theta_grid = np.cos(theta_grid)

    for i, r_strt in enumerate(r_grid_scaled):
        for j, cos_theta in enumerate(cos_theta_grid):
            for k, r_end in enumerate(r_grid_scaled):
                if (r_strt == r_end) & (cos_theta == 1.0):
                    N_grid_scaled[i, j, k] = 0.0
                    r_to_r[i, j, k] = 0.0
                else:
                    r_to_r[i, j, k] = np.sqrt(r_strt**2 + r_end**2 -
                                              2 * r_strt * r_end * cos_theta)
                    cos_phi = (r_strt - r_end * cos_theta) / r_to_r[i, j, k]
                    d = np.arange(delta_scaled, r_to_r[i, j, k], delta_scaled)
                    r2 = r_strt**2 + (r_to_r[i, j, k] - d)**2 - 2 * r_strt * (
                        r_to_r[i, j, k] - d) * cos_phi
                    r2 *= (r2 > 0)
                    r = np.sqrt(r2)
                    N_grid_scaled[i, j,
                                  k] = np.trapz(radial_dist(r, dist_params), d)

    return N_grid_scaled * scaling_factor * normalization, r_to_r * scaling_factor
Exemplo n.º 40
0
def area_between(f, g, dx):

    h = abs(g - f) / g
    A = np.trapz(h, dx=dx)

    return A
Exemplo n.º 41
0
    def fitCurve(self, startpot, endpot, forward, scanrate):
        #FIXME: get all lines
        #line = self.canvas.theplot[0]
        xdata, ydata = self.canvas.theplot[0].get_data()

        ydata = savgol_filter(np.array(ydata).astype(np.float), 11, 1)


        #FIXME: when not cyclic voltammetry (same number of forward/backward data points)
        if forward:
            limit0 = 0
            limit1 = int(len(xdata)/2)
            offset = len(xdata) - limit1
        else:
            limit0 = int(len(xdata)/2)
            limit1 = len(xdata)
            offset = limit1-limit0


        #limit0 = 0
        #limit1 = len(xdata)
        #offset = limit1-limit0

        print("limits: ", limit0, "  ", limit1)
        print("potentials: ", startpot, "  ", endpot)

        ind1, indvalue1 = find_nearest(np.array(xdata[limit0:limit1]).astype(float), float(startpot))
        ind2, indvalue2 = find_nearest(np.array(xdata[limit0:limit1]).astype(float), float(endpot))

        print (ind1, indvalue1)
        print (ind2, indvalue2)

        if not forward:
            ind1 += offset
            ind2 += offset

        print("ind1, ind2: ", ind1, ind2)

        xdata = xdata[ind1:ind2]
        ydata = ydata[ind1:ind2]

        xdata = np.array(xdata).astype(float)
        ydata = np.array(ydata).astype(float)

        if not forward:
            ydata = -ydata

        baseline = peakutils.baseline(ydata, 1)
        diff = np.array(ydata) - np.array(baseline)

        from scipy.integrate import simps


        # Compute the area using the composite trapezoidal rule.
        xdatatime = np.array(xdata).astype(float) / float(scanrate)  # convert to s
        print ("potentia range: ", xdata)
        print("scanrate: ", scanrate)
        print("time: ", xdatatime)
        ydataamps = np.array(diff).astype(float) / 1000  # convert to A
        print ("currents: ", ydataamps)
        area = trapz(y=ydataamps, x=xdatatime, dx=100)
        print("area = %.6f C" % area)


        indexes = peakutils.indexes(ydata, thres=0.01, min_dist=0.01)
        print(indexes)

        peaks_x = peakutils.interpolate(xdata, ydata, ind=indexes)
        print("peaks: ", peaks_x)

        peakheights = []

        for i in range(len(indexes)):
            height = ydata[indexes[i]] - baseline[indexes[i]]
            peakheights.append(height)

        peakpotentials = xdata[indexes]

        print("peak potentials: ", peakpotentials)
        print("peak heights: ", peakheights)

        #
        # from scipy import optimize
        #
        # def gaussian(x, height, center, width, offset):
        #     #return height * np.exp(-(x - center) ** 2 / (2 * width ** 2)) + offset
        #
        #     return height * width ** 2 / ((x - center) ** 2 + width ** 2)
        #
        # def three_gaussians(x, h1, c1, w1, h2, c2, w2, h3, c3, w3, offset):
        #     return (gaussian(x, h1, c1, w1, offset=0) +
        #             gaussian(x, h2, c2, w2, offset=0) +
        #             gaussian(x, h3, c3, w3, offset=0) + offset)
        #
        # def two_gaussians(x, h1, c1, w1, h2, c2, w2, offset):
        #     return three_gaussians(x, h1, c1, w1, h2, c2, w2, 0, 0, 1, offset)
        #
        # #errfunc3 = lambda p, x, y: (three_gaussians(x, *p) - y) ** 2
        # errfunc2 = lambda p, x, y: (two_gaussians(x, *p) - y) ** 2
        #
        # #guess3 = [0.49, 0.55, 0.01, 0.6, 0.61, 0.01, 1, 0.64, 0.01, 0]
        # # I guess there are 3 peaks, 2 are clear, but between them there seems to be another one, based on the change in slope smoothness there
        # guess2 = [8, 0.07, 0.01, 2, 0.27, 0.01, 0]  # I removed the peak I'm not too sure about
        # #optim3, success = optimize.leastsq(errfunc3, guess3[:], args=(xdata, diff))
        # optim2, success = optimize.leastsq(errfunc2, guess2[:], args=(xdata, diff))
        #
        # plt.plot(xdata, diff, lw=5, c='g', label='measurement')
        # #plt.plot(xdata, three_gaussians(xdata, *optim3), lw=3, c='b', label='fit of 3 Gaussians')
        # plt.plot(xdata, two_gaussians(xdata, *optim2), lw=1, c='r', ls='--', label='fit of 2 Gaussians')
        # plt.legend(loc='best')
        # #plt.savefig('result.png')
        # plt.show()

        # from scipy.optimize import curve_fit
        #
        # from scipy.special import erf
        #
        # def asym_peak(t, pars):
        #     'from Anal. Chem. 1994, 66, 1294-1301'
        #     a0 = pars[0]  # peak area
        #     a1 = pars[1]  # elution time
        #     a2 = pars[2]  # width of gaussian
        #     a3 = pars[3]  # exponential damping term
        #     f = (a0 / 2 / a3 * np.exp(a2 ** 2 / 2.0 / a3 ** 2 + (a1 - t) / a3)
        #          * (erf((t - a1) / (np.sqrt(2.0) * a2) - a2 / np.sqrt(2.0) / a3) + 1.0))
        #     return f
        #
        # def two_peaks(t, *pars):
        #     'function of two overlapping peaks'
        #     a10 = pars[0]  # peak area
        #     a11 = pars[1]  # elution time
        #     a12 = pars[2]  # width of gaussian
        #     a13 = pars[3]  # exponential damping term
        #     a20 = pars[4]  # peak area
        #     a21 = pars[5]  # elution time
        #     a22 = pars[6]  # width of gaussian
        #     a23 = pars[7]  # exponential damping term
        #     p1 = asym_peak(t, [a10, a11, a12, a13])
        #     p2 = asym_peak(t, [a20, a21, a22, a23])
        #     return p1 + p2
        #
        # parguess = (50, 0.07, 0.05, 0.1,    50, 0.27, 0.05, 0.1)
        # popt, pcov = curve_fit(two_peaks, xdata, diff, parguess)
        #
        # pars1 = popt[0:4]
        # pars2 = popt[4:8]
        #
        # peak1 = asym_peak(xdata, pars1)
        # peak2 = asym_peak(xdata, pars2)
        #
        # plt.figure()
        # plt.plot(xdata, diff)
        # plt.plot(xdata, peak1, 'r-')
        # plt.plot(xdata, peak2, 'g-')
        # plt.show()

        #a,b,c = peakutils.gaussian_fit(xdata, ydata, center_only=False)
        #print("gauss: ", a, " ", b, " ", c)
        #ygauss = peakutils.gaussian(xdata, a,b,c)


        # from scipy.optimize import curve_fit
        # from scipy import asarray as ar, exp
        #
        # n = len(xdata)  # the number of data
        # mean = sum(xdata * ydata) / n  # note this correction
        # sigma = sum(ydata * (xdata - mean) ** 2) / n  # note this correction
        #
        # def gaus(x, a, x0, sigma):
        #     #return a * sigma ** 2 / ((x - x0) ** 2 + sigma ** 2)
        #     return a * exp(-(x - x0) ** 2 / (2 * sigma ** 2))
        #
        # popt, pcov = curve_fit(gaus, xdata, ydata, p0=[1, mean, sigma])
        #
        # plt.plot(xdata, ydata, 'b+:', label='data')
        # plt.plot(xdata, gaus(xdata, *popt), 'ro:', label='fit')
        # plt.legend()
        # plt.title('Fig. 3 - Fit for Time Constant')
        # plt.xlabel('Time (s)')
        # plt.ylabel('Voltage (V)')
        # plt.show()



        # import fitraman
        # # TODO: adjust number of peaks to find, initialwidth, curve type, sigmavalue
        # peaks_to_find = 2
        # initialwidth = 0.05
        # fitraman.CURVE = "Gaussian"
        # # fitraman.SIGMAVALUE = np.full(len(subtract), 5)
        # params, fit, ys, n_peaks = fitraman.predict_and_plot_lorentzians(xdata, diff, peaks_to_find, initialwidth)
        # print ('params: ', params)
        #
        # peakdata = []
        # for j in range(0, len(params), 3):
        #     ctr = params[j]
        #     amp = params[j + 1]
        #     width = params[j + 2]
        #     peakdata.append(["%.2f" % ctr, "%.2f" % amp, "%.2f" % width])
        #     ysignal = fitraman.lorentzian(xdata, amp, ctr, width)
        #     ymax = np.max(ysignal)
        #     idxmax = np.argmax(ysignal)
        #     # plot max points in fitted curves
        #     # plt.plot(xdata[idxmax], ymax, ls='', marker='x')
        #     #Plot max points in experimental curve
        #     plt.plot(xdata[idxmax], diff[idxmax], ls='', marker='x')
        #     plt.plot(xdata, ysignal, ls='-', label="ysignal")
        #
        # #self.printpeakdata(peakdata)
        # #plt.plot(xdata, fit, 'r-', label='fit', c='red', lw=1.2, ls='--')
        # plt.plot(xdata, diff, label="data")
        #
        # plt.legend(loc='upper right', fontsize=10, shadow=True)
        # plt.show()



        if not forward:
            ydata = -ydata
            diff = -diff
            baseline = -baseline




        fig, axis = plt.subplots(1, 1)
        #lognumber = np.log10(np.array(ydata).astype(float))
        line2, = axis.plot(xdata, ydata, ls="-", label="data")
        line3, = axis.plot(xdata, baseline, ls=":", label="baseline")
        line3, = axis.plot(xdata, diff, ls=":", label="corrected")
        #line4, = axis.plot(xdata[indexes], ydata[indexes], "r+")
        #line5, = axis.plot(xdata, ygauss, ls=":", label="gauss")
        #slope1, intercept1, r_value1, p_value1, std_err1 = stats.linregress(lognumber, np.array(xdata).astype(float))
        #linefit = slope1 * np.array(lognumber) + intercept1
        #axis.plot(lognumber, linefit, ':', label=str(int(round(slope1, 3) * 1000)) + " mV/dec")
        # print ('slope: ', slope1, '; intercept: ', intercept1)
        axis.set_xlabel("E")
        axis.set_ylabel("i")
        plt.title("Charge = %.6f C" % area, fontsize=12)
        leg = axis.legend(loc='best', shadow=False)
        fig.tight_layout()
        fig.show()
Exemplo n.º 42
0
def integralColossusM2(z, Mmin):
    marray = 10**np.arange(Mmin, 20, 0.05)
    to_integrate = np.array([m * MfunctionColossusLog(m, z) for m in marray])
    return np.trapz(to_integrate, marray)
Exemplo n.º 43
0
    #     ave_sig_xz[k] = (1/S) * np.trapz(np.trapz(srf_sig_xz,dx=dz,axis=0),dx=dx)

    #     ave_eps_zz[k] = (1/S) * np.trapz(np.trapz(srf_eps_zz,dx=dz,axis=0),dx=dx)
    #     ave_eps_xz[k] = (1/S) * np.trapz(np.trapz(srf_eps_xz,dx=dz,axis=0),dx=dx)

    # # Summing the whole surface
    #     su_sig_xx[k] = np.sum(sig_xx)
    #     su_sig_zz[k] = np.sum(sig_zz)
    #     su_sig_xz[k] = np.sum(sig_xz)

    #     su_eps_zz[k] = np.sum(eps_zz)
    #     su_eps_xz[k] = np.sum(eps_xz)

    # # Boundary for surface of interest
    bndAve_sig_zz[k] = (1 / S) * (
        ((zcent) * np.trapz(srf_sig_zz[0, :], dx=dx)) +
        (-(-zcent) * np.trapz(srf_sig_zz[-1, :], dx=dx)) + (-np.trapz(
            (srf_sig_xz[:, 0] * kkkk_z), dx=dx)) + (np.trapz(
                (srf_sig_xz[:, -1] * kkkk_z), dx=dx)))

    bndAve_sig_xz[k] = (1 / (2 * S)) * (
        ((zcent) * np.trapz(srf_sig_xz[0, :], dx=dx)) + (np.trapz(
            (srf_sig_zz[0, :] * (-kkkk)), dx=dx)) + (-np.trapz(
                (srf_sig_xx[:, 0] * (kkkk_z)), dx=dz)) +
        (-(-xi / 2) * np.trapz(srf_sig_xz[:, 0], dx=dz)) +
        (-(-zcent) * np.trapz(srf_sig_xz[-1, :], dx=dx)) + (-np.trapz(
            (srf_sig_zz[-1, :] * (-kkkk)), dx=dx)) + (np.trapz(
                (srf_sig_xx[:, -1] * (kkkk_z)), dx=dz)) +
        ((xi / 2) * np.trapz(srf_sig_xz[:, -1], dx=dz)))

    uz2[k] = np.trapz(U_z[it, 0:len(U_z[0, :]) / 2], xcT) - np.trapz(