Exemple #1
0
    def calc_stream(self):
        """computes and returns the stream function

        only make sense with vp fields
        """
        # should add test if vp fields or not
        vphi = self.fields['v'][:, :, 0]
        # interpolate to the same phi
        vph2 = -0.5 * (vphi + np.roll(vphi, 1, 1))
        v_r = self.fields['w'][:, :, 0]
        n_r, nph = np.shape(v_r)
        stream = np.zeros(np.shape(vphi))
        # integrate first on phi
        stream[0, 1:nph - 1] = self.rcmb * \
            integrate.cumtrapz(v_r[0, 0:nph - 1], self._ph_coord)
        stream[0, 0] = 0
        # use r coordinates where vphi is defined
        rcoord = self.rcmb + np.array(
            self.rgeom[0:np.shape(self.rgeom)[0] - 1:2])
        for iph in range(0, np.shape(vph2)[1] - 1):
            stream[1:n_r, iph] = stream[0, iph] + \
                integrate.cumtrapz(vph2[:, iph], rcoord)  # integrate on r
        stream = stream - np.mean(stream[n_r / 2, :])
        # remove some typical value.
        # Would be better to compute the global average
        # taking into account variable grid spacing
        return stream
Exemple #2
0
    def calculate_pressure_perturbation(self):
        """Perturbation pressure divided by density.
        Assumes hydrostatic balance.
        See: Kunze et. al. 2002 JPO.
        See: Nash et. al. 2005 JPO."""

        self.Pprime = self.P.copy()

        for i in xrange(len(self.hpid)):

            nans = np.isnan(self.P[:, i])

            z = self.z[~nans, i]
            b = self.b[~nans, i]

            # z should be increasing.
            if z[0] > z[-1]:
                z = np.flipud(z)
                b = np.flipud(b)

                bi = cumtrapz(b, z, initial=0.)
                bii = cumtrapz(bi, z, initial=0.)

                Pprime = bi + (bii[0] - bii[-1])/(-z[0])

                self.Pprime[~nans, i] = np.flipud(Pprime)

            else:
                bi = cumtrapz(b, z, initial=0.)
                bii = cumtrapz(bi, z, initial=0.)

                self.Pprime[~nans, i] = bi + (bii[0] - bii[-1])/(-z[0])
 def geo_amin( self, damage, Lmax ):
         l_ez = self.sorted_l_ez
         depsmax = self.sorted_depsf[0]
         a = np.linspace( 0, np.min( ( self.amin_it[-1] * 1.2, Lmax + 1e-6 ) ) , self.discr_amin )
         Kf = self.Kf
         a_shaped = a.reshape( len( a ), 1 )
         l_ez2D = l_ez.reshape( 1, len( l_ez ) )
         m_p = a_shaped / l_ez2D
         p = np.abs( H( 1 - m_p ) * ( 1 - m_p ) )
         ####
         muT = np.sum( self.sorted_depsf * ( 1 - damage ) * Kf * p , 1 )
         Kf_intact = np.sum( ( 1 - damage ) * Kf * ( 1 - p ) , 1 ) 
         Kf_broken = np.sum( Kf * damage )
         Emtrx = ( 1. - self.V_f_tot ) * self.E_m + Kf_broken + Kf_intact
         depsm = muT / Emtrx
         em = np.hstack( ( 0, cumtrapz( depsm, a ) ) )
         um = np.hstack( ( 0, cumtrapz( em , a ) ) )
         ######
         # Aphi = pi * 0.03 ** 2 / np.cos( self.sorted_phi )
         # llambda = ( 0.03 - 0.03 / np.cos( self.sorted_phi) ) / ( 0.03 + 0.03/ np.cos( self.sorted_phi ) )
         # Uphi = pi * ( 0.03 + 0.03/ np.cos( self.sorted_phi ) ) * ( 1. + 3.*llambda ** 2. / ( 10. + ( 4 - 3. * llambda ** 2. ) ) )
         ######
         condI = self.w / 2. - ( depsmax * a ** 2. / 2. + em * a - um )
         ip_f = interp1d( condI[::-1], a[::-1], bounds_error = False, fill_value = a[-1] )
         cut_at = np.nonzero( H( ip_f( 0 ) - a ) )
         
         a_new = np.hstack( ( a[cut_at], ip_f( 0 ) ) )
         amin = a_new
         self.amin_it = a_new
         # interp depsm and em
         ind = len( amin )
         interp_data = ( a_new[-1] - a[ind - 2] ) / ( a[ind - 1 ] - a[ind - 2] )
         diff = depsm[ind - 1 ] - depsm[ind - 2]
         depsm[ind - 1] = depsm[ind - 2] + interp_data * diff
         return amin, depsm[:len( amin ) ], Emtrx[:len( amin ) ]
Exemple #4
0
def prepare_mf(mpart, grid, mf_kwargs):
    M = np.linspace(np.log10(mpart), np.log10(grid.max()), 2000)
    mf_obj = MassFunction(M=M, **mf_kwargs)

    mf = mf_obj.dndm
    m_outside_range = mf_obj.mltm[0] + mf_obj.mgtm[-1]

    cumfunc = cumtrapz(10 ** M * mf, M, initial=0) * np.log(10)

    cdf = spline(M, cumfunc, k=3)
    icdf = spline(cumfunc, M, k=3)

    if MAKE_PLOTS:
        plt.clf()
        plt.plot(M, cumfunc)

        plt.plot(M, cdf(M))
        plt.savefig("cumfunc.pdf")

        plt.clf()
        mcumfunc = cumtrapz(10 ** (2 * M) * mf, dx=M[1] - M[0], initial=1e-20) * np.log(10)
        plt.plot(M, mcumfunc)
        plt.savefig("mcumfunc.pdf")

        # How much mass is above 10**12.5
        minvcumfunc = cumtrapz(10 ** (2 * M[::-1]) * mf[::-1], dx=M[1] - M[0]) * np.log(10)
        minvcumfunc = np.concatenate((np.array([minvcumfunc[0]]), minvcumfunc))
        minvcumfunc /= minvcumfunc[-1]
        plt.clf()
        plt.plot(M, minvcumfunc[::-1])
        plt.yscale('log')
        plt.grid(True)
        plt.savefig("minvcumfunc.pdf")

    return cdf, icdf, M, mf, m_outside_range
 def amin_i( self, demsi, depsf0, depsf_smaller, a1, damage, idx ):
         if a1 > np.max( self.sorted_l_ez[self.sf_mask] ):
             amin = 1. / ( demsi + depsf0 ) * ( ( demsi + depsf0 ) * ( demsi + depsf_smaller ) ) ** ( .5 ) * a1
         else:
             l_ez_behind_a1 = self.sorted_l_ez[idx + 1:] 
             a = np.linspace( a1, 1.2 * self.amin_i_guess  , 500 )
             Kf = self.Kf
             a_shaped = a.reshape( len( a ), 1 )
             l_ez_behind_a1 = l_ez_behind_a1.reshape( 1, len( l_ez_behind_a1 ) )
             m_p = a_shaped / l_ez_behind_a1
             p = np.abs( H( 1 - m_p ) * ( 1 - m_p ) )
             ####
             muT = np.sum( self.sorted_depsf[idx + 1:] * ( 1 - damage[idx + 1:] ) * Kf[idx + 1:] * p, 1 )
             Kf_intact = np.sum( ( 1 - damage[idx + 1:] ) * Kf[idx + 1:] * ( 1 - p ) , 1 ) + np.sum( ( 1 - damage[:idx + 1] ) * Kf[:idx + 1] ) 
             Kf_broken = np.sum( Kf * damage )
             Emtrx = ( 1. - self.V_f_tot ) * self.E_m + Kf_broken + Kf_intact
             depsm = muT / Emtrx
             em = np.hstack( ( 0, cumtrapz( depsm, a ) ) )
             um = np.hstack( ( 0, cumtrapz( em , a ) ) )
             condI = a ** 2. / 2. * depsf0 + a * em - um - a1 ** 2 / 2.*depsf_smaller
             ip_f = interp1d( condI, a, bounds_error = False, fill_value = a[-1] )
             cut_at = np.nonzero( H( ip_f( 0 ) - a ) )
             amin = np.hstack( ( a[cut_at], ip_f( 0 ) ) )
             amin = amin[-1]
             self.amin_i_guess = amin
         return amin
 def geo_amin(self, damage, Lmax):
     phi = self.sorted_phi
     Vf = self.sorted_V_f
     lf = self.sorted_lf
     al = self.sorted_l_ez
     depsmax = self.sorted_depsf[0]
     Ef = self.sorted_E_f
     a = np.linspace(0, np.min((self.amin_it[-1] * 1.2, Lmax + 1e-6)), self.discr_amin)
     Kf = Vf * self.sorted_nu_r * self.sorted_stats_weights * Ef * self.soc
     a_shaped = a.reshape(len(a), 1)
     al = al.reshape(1, len(al))
     m_p = a_shaped / al
     m_p = m_p
     p = np.abs(H(1 - m_p) * (1 - m_p))
     ####
     muT = np.sum(self.sorted_depsf * (1 - damage) * Kf * p, 1)
     Kf_intact = np.sum((1 - damage) * Kf * (1 - p), 1)
     Kf_broken = np.sum(Kf * damage)
     Emtrx = (1.0 - self.V_f_tot) * self.E_m + Kf_broken + Kf_intact
     depsm = muT / Emtrx
     em = np.hstack((0, cumtrapz(depsm, a)))
     um = np.hstack((0, cumtrapz(em, a)))
     condI = self.w / 2.0 - (depsmax * a ** 2.0 / 2.0 + em * a - um)
     ip_f = interp1d(condI[::-1], a[::-1], bounds_error=False, fill_value=a[-1])
     cut_at = np.nonzero(H(ip_f(0) - a))
     a_new = np.hstack((a[cut_at], ip_f(0)))
     amin = a_new
     self.amin_it = a_new
     # interp depsm and em
     ind = len(amin)
     interp_data = (a_new[-1] - a[ind - 2]) / (a[ind - 1] - a[ind - 2])
     diff = depsm[ind - 1] - depsm[ind - 2]
     depsm[ind - 1] = depsm[ind - 2] + interp_data * diff
     return amin, depsm[: len(amin)]
 def geo_amin_lmin(self, damage, depsf, umLmin, emLmin, Lmin, idx):
     phi = self.sorted_phi
     Vf = self.sorted_V_f
     depsmax = depsf
     Ef = self.sorted_E_f
     al = self.sorted_l_ez
     a = np.linspace(Lmin, Lmin * 1.5, self.discr_amin)
     Kf = Vf * self.sorted_nu_r * self.sorted_stats_weights * Ef * self.soc
     a_shaped = a.reshape(len(a), 1)
     al = al.reshape(1, len(al))
     m_p = a_shaped / al
     m_p = m_p
     p = np.abs(H(1 - m_p) * (1 - m_p))
     ####
     muT = np.sum(self.sorted_depsf[idx:] * (1 - damage[idx:]) * Kf[idx:] * p[:, idx:], 1)
     Kf_intact = np.sum(((1 - damage) * Kf * (1 - p))[:, idx:], 1)
     Kf_broken = np.sum(Kf * damage)
     Emtrx = (1.0 - self.V_f_tot) * self.E_m + Kf_broken + Kf_intact
     depsm = muT / Emtrx
     emr = np.hstack((emLmin, cumtrapz(depsm, a)))
     umr = np.hstack((umLmin, cumtrapz(emr, a)))
     condI = self.w - (
         Lmin * (emr - emLmin + (a - Lmin) * depsmax)
         + depsmax * Lmin ** 2.0 / 2.0
         + emLmin * Lmin
         - umLmin
         + (depsmax * a ** 2.0 / 2.0 + emr * a - umr)
     )
     ip_f = interp1d(condI[::-1], a[::-1], bounds_error=False, fill_value=a[-1])
     cut_at = np.nonzero(H(ip_f(0) - a))
     amin = np.hstack((a[cut_at], ip_f(0)))
     return amin[-1]
def cumtrapzmid(x, y, c):
    """
    cumulative trapezoidal numerical integration taken from midpoint

    :param x: vector of size N describing the time samples
    :param y: vector of size N describing the function
    :param c: midpoint

    :rtype: vector
    :return fa: cumulative integration

    """
    a = x.shape[0]
    mid = int(round(a / 2.))

    # case < mid
    fa = zeros(a)
    tmpx = x[0:mid]
    tmpy = y[0:mid]
    tmp = c + cumtrapz(tmpy[::-1], tmpx[::-1], initial=0)
    fa[0:mid] = tmp[::-1]

    # case >= mid
    fa[mid:a] = c + cumtrapz(y[mid - 1:a - 1], x[mid - 1:a - 1], initial=0)

    return fa
 def amin_i(self, demsi, depsf0, depsf_smaller, a1, damage, idx):
     Vf = self.sorted_V_f
     Ef = self.sorted_E_f
     al = self.sorted_l_ez[idx + 1 :]
     # print al
     a = np.linspace(a1, self._a_long[idx + 1] * 1.2, self.discr_amin)
     Kf = Vf * self.sorted_nu_r * self.sorted_stats_weights * Ef * self.soc
     a_shaped = a.reshape(len(a), 1)
     al = al.reshape(1, len(al))
     m_p = a_shaped / al
     p = np.abs(H(1 - m_p) * (1 - m_p))
     # print p
     ####
     muT = np.sum(self.sorted_depsf[idx + 1 :] * (1 - damage[idx + 1 :]) * Kf[idx + 1 :] * p, 1)
     # print muT
     Kf_intact = np.sum((1 - damage[idx + 1 :]) * Kf[idx + 1 :] * (1 - p), 1) + np.sum(
         (1 - damage[: idx + 1]) * Kf[: idx + 1]
     )
     Kf_broken = np.sum(Kf * damage)
     Emtrx = (1.0 - self.V_f_tot) * self.E_m + Kf_broken + Kf_intact
     depsm = muT / Emtrx
     em = np.hstack((0, cumtrapz(depsm, a)))
     um = np.hstack((0, cumtrapz(em, a)))
     condI = a ** 2.0 / 2.0 * depsf0 + a * em - um - a1 ** 2 / 2.0 * depsf_smaller
     ip_f = interp1d(condI, a, bounds_error=False, fill_value=a[-1])
     cut_at = np.nonzero(H(ip_f(0) - a))
     amin = np.hstack((a[cut_at], ip_f(0)))
     # print self.sorted_depsf[self.sf_mask][0]
     # print self.sorted_depsf[self.c_mask][0]
     return amin[-1]
Exemple #10
0
	def simulateBackward(self, direction=1):
		'''
		Propagate the signal in backward direction using the population
		found in the previous forward iteration. Since N2 and N1 are constant
		we can solve each equations with a simple integration
		'''		

		# Get the initiale conditions
		Pp_ini = self.P_p_b[:,-1]
		Ps_ini = self.P_s_b[:,-1]
		Pase_ini = self.P_ase_b[:,-1]
		
		self.invSptProfil()

		for m in arange(self.nbrPump):
			integrant = sign(direction)*(-self.sigma_abs_p[m]*self.N1[::-1] - self.alpha_p) * self.dopedFiber.pumpOverlap(self.pumpWL[m])
			self.P_p_b[m,::-1] = r_[Pp_ini[m], Pp_ini[m]*exp(integrate.cumtrapz(integrant, self.z))]

		for l in arange(self.nbrSignal):
			integrant = sign(direction)*(self.sigma_em_s[l]*self.N2[::-1] - self.sigma_abs_s[l]*self.N1[::-1] - self.alpha_s)
			integrant *= self.dopedFiber.modeOverlap(self.signalWL[l], self.sigDC[l])
			self.P_s_b[l,::-1] = r_[Ps_ini[l], Ps_ini[l]*exp(integrate.cumtrapz(integrant, self.z))]

		for v in arange(self.nbrAse):
			integrant = sign(direction)*(self.sigma_em_ase[v]*self.N2[::-1] - self.sigma_abs_ase[v]*self.N1[::-1] - self.alpha_ase)
			integrant *= self.dopedFiber.modeOverlap(self.aseWL[v])
			integrant2 = sign(direction)*2*(h*c/(self.aseWL[v]*1E-6)) * self.delta_nu[v] * self.sigma_em_ase[v]*self.N2[::-1]
			integrant2 *= self.dopedFiber.modeOverlap(self.aseWL[v])

			sol = integrate.cumtrapz(integrant, self.z)
			solTerme1 = exp(sol)
			solTerme1b = r_[1.0, exp(-sol)]
			solTerme2 = solTerme1 * integrate.cumtrapz(integrant2*solTerme1b, self.z)
			self.P_ase_b[v,::-1] = r_[Pase_ini[v], Pase_ini[v]*solTerme1 + solTerme2]
def exchange(a, b, grid, end_point=-1):
    abc, asc, al, aj = a  # radial and angular for a
    bbc, bsc, bl, bj = b  # radial and angular for b
    jmax, jmin = max(aj, bj), min(aj, bj)
    ro, w, h = grid  # radial grid and weights
    end_ind = end_point < 0 and len(ro) or (sc.where(ro > end_point)[0][0]+1)
    cabc = abc[:end_ind]
    casc = asc[:end_ind]
    cbbc = bbc[:end_ind]
    cbsc = bsc[:end_ind]
    cro = ro[:end_ind]
    cw = w[:end_ind]
    k = jmax - jmin
    if (k+al+bl) % 2 != 0:
        k += 1
    dens0 = cabc*cbbc+casc*cbsc

    dens_kg = dens0/cro**(k+1)
    dens_kl = dens0*cro**k
    res = 0e0
    while (k <= (jmax+jmin)):
        dens_g_int = cumtrapz(dens_kg*cw, initial=0e0)*h
        # внешнее интегрирование от r до бесконечности
        dens_g_int = dens_g_int[-1] - dens_g_int
        dens_l_int = cumtrapz(dens_kl*cw, initial=0e0)*h
        res_k = sc.trapz((dens_kg*dens_l_int+dens_kl*dens_g_int)*cw)*h
        res += res_k*w3js0[(jmax, jmin, k)]
        k += 2
        dens_kg = dens_kg/cro**2
        dens_kl = dens_kl*cro**2
    return res
Exemple #12
0
 def test_kernels_stddev(self):
     """
     Test that the standard deviation calculated from the kernel (almost)
     equals the parameter sigma with which the kernel was constructed.
     """
     sigma = 0.5 * pq.s
     kernel_resolution = sigma / 50.0
     for invert in (False, True):
         kernel_list = [kernel_type(sigma, invert) for
                        kernel_type in self.kernel_types]
         for kernel in kernel_list:
             b = kernel.boundary_enclosing_area_fraction(self.fraction).magnitude
             restric_defdomain = \
                 np.linspace(-b, b, 2*b/kernel_resolution.magnitude) * \
                 sigma.units
             kern = kernel(restric_defdomain)
             av_integr = kern * restric_defdomain
             average = spint.cumtrapz(y=av_integr.magnitude,
                                      x=restric_defdomain.magnitude)[-1] * \
                       sigma.units
             var_integr = (restric_defdomain-average)**2 * kern
             variance = spint.cumtrapz(y=var_integr.magnitude,
                                       x=restric_defdomain.magnitude)[-1] * \
                        sigma.units**2
             stddev = np.sqrt(variance)
             self.assertAlmostEqual(stddev, sigma, delta=0.01*sigma)
Exemple #13
0
 def str_str(self, f):
     """
     Gets str_str.out file and analyze it
     """
     lines = f.read()
     lines = lines.split('\n')
     EVM, SVM, E11, E22, E33, S11, S22 = [], [], [], [], [], [], []
     nline = len(lines) - 1
     for i in range(nline + 5):
         try:
             EVM.append(float(lines[i+1].split()[0]))
             SVM.append(float(lines[i+1].split()[1]))
             E11.append(float(lines[i+1].split()[2]))
             E22.append(float(lines[i+1].split()[3]))
             E33.append(float(lines[i+1].split()[4]))
             S11.append(float(lines[i+1].split()[8]))
             S22.append(float(lines[i+1].split()[9]))
         except IndexError:
             break
     workx = integrate.cumtrapz(y=S11, x=E11)
     worky = integrate.cumtrapz(y=S22, x=E22)
     workTotal = []
     for k in range(len(workx)):
         workTotal.append(workx[k]+worky[k])
     return S11, S22, E11, E22, workTotal, workx, worky
Exemple #14
0
    def plot(self, wlrange=[1.2e-6, 1.6e-6]):
        wls = np.linspace(wlrange[0], wlrange[1], 100)
        omega = 2 * pi * 3e8 / (wls)

        betas = np.array([self.beta(wl, [1, 2, 3]) for wl in wls])
        gammas = np.array([self.gamma(wl) for wl in wls])

        pl.subplot(311)
        pl.plot(wls / u_, betas[:, 0], "b--", label=r"$\beta_1$")
        pl.subplot(312)
        pl.plot(wls / u_, betas[:, 1], "g--", label=r"$\beta_2$")
        pl.subplot(313)
        pl.plot(wls / u_, betas[:, 2], "r--", label=r"$\beta_2$")

        # Integrate:
        D = 0.25 * self.S0 * (wls - self.lambda0 ** 4 / wls ** 3)
        b1_calc = integrate.cumtrapz(D, wls)
        b1_calc = np.append(b1_calc, b1_calc[-1])

        b_calc = integrate.cumtrapz(b1_calc, omega)
        b_calc = np.append(b_calc, b_calc[-1])
        print b1_calc + betas[0, 0]

        # betaw = betas[:,0]*omega+betas[:,1]*omega**2+betas[:,2]*omega**3
        pl.subplot(311)
        pl.plot(wls / u_, b1_calc + betas[0, 0], "r-", label=r"$\beta_1 calc$")
 def geo_amin( self, lf, phi, depsmax, Kc, Ef, Vf, damage ):
         # print self.amin_it
         a = np.linspace( 0, self.amin_it, 300 )
         Kf = Vf * self.sorted_nu_r * self.sorted_stats_weights * Ef
         a_shaped = a.reshape( len( a ), 1 )
         phi = phi.reshape( 1, len( phi ) )
         m_p = a_shaped * 2 / np.cos( phi ) / lf
         mask1 = m_p > 0
         m_p = m_p * mask1
         p = np.abs( H( 1 - m_p ) * ( 1 - m_p ) )
         muT = np.sum( self.sorted_depsf * ( 1 - damage ) * Kf * p , 1 )
         Kf_intact = np.sum( ( 1 - damage ) * Kf * p , 1 )[::-1]
         Kf_broken = np.sum( Kf * damage )
         Emtrx = ( 1. - self.V_f_tot ) * self.E_m + Kf_broken + Kf_intact
         depsm = muT / Emtrx
         # print depsm
         em = np.hstack( ( 0, cumtrapz( depsm, a ) ) )
         um = np.hstack( ( 0, cumtrapz( em , a ) ) )
         plt.plot( a, em )
         plt.plot( a, um )
         plt.show()
         ind = np.argmin( np.abs( self.w - depsmax * a ** 2. / 2. + em * a - um ) )
         amin = a[:ind + 1]
         self.amin_it = 1.2 * a[ind]
         # print amin
         return amin, em[:ind + 1]
Exemple #16
0
def properdistance(z,omegam=0.3,omegax=0.7,w0=-1,w1=0,wz=None):
    """
    Gives the proper distance in the defined cosmology
    The c/Ho factor is ommited
    Returns dist(z), w(z), omegax(z), H(z), curvature
    """
    # if no wz on input the calculate it from w0 and w1
    if wz is None: wz=w0+(z*1./(1.+z))*w1
    # calculate evolution of omegax accounting for its equation of state
    omegaxz=zeros(z.size)
    omegaxz[0]=omegax
    omegaxz[1:z.size]=omegax*exp(3*integrate.cumtrapz((1.+wz)/(1.+z),x=z))

    # curvature
    omega=omegam+omegax
    
    # calculation of H(z)
    hz=sqrt((1.-omega)*(1+z)**2+omegaxz+omegam*(1+z)**3)

    # calculate chi
    chi=zeros(z.size)
    chi[1:z.size]=integrate.cumtrapz(1./hz,x=z)

    #calculate proper distance
    if omega>1: curv=1
    if omega<1: curv=-1
    if omega==1: curv=0
    kk=abs(1.-omega)
    if curv==1: dist=sin(sqrt(kk)*chi)/sqrt(kk)
    if curv==-1: dist=sinh(sqrt(kk)*chi)/sqrt(kk)
    if curv==0: dist=chi

    return dist,wz,omegaxz,hz,curv,chi
def compute_jdot_grav_profile(snapshot,rad_list,code="AREPO", alpha = 0.1, h0 = 0.1):
    
    X0, Y0 = 0.5 * snapshot.header.boxsize, 0.5 * snapshot.header.boxsize
    vol = snapshot.gas.MASS/snapshot.gas.RHO
    # add gradients
    jdotdens = -snapshot.gas.RHO * ((snapshot.gas.POS[:,0] - X0) * (-snapshot.gas.ACCE[:,1]) - \
                                    (snapshot.gas.POS[:,1] - Y0) * (-snapshot.gas.ACCE[:,0])) 
    ind = snapshot.gas.ID > -2
    jdotdens_av=compute_profiles(jdotdens[ind],snapshot.gas.R[ind],vol[ind],rad_list)
    jdot_av = 2 * np.pi *  cumtrapz((jdotdens_av[:] * rad_list[:])[::-1],x = -rad_list[::-1],initial=0)[::-1]
    #jdot_av = 2 * np.pi *  cumtrapz((jdotdens_av[:] * rad_list[:]),x = rad_list,initial=0)
    
    return np.array(jdot_av)



    
    # Compute the cell-centered quantities
    jdotdens_per_cell = -snapshot.gas.RHO * ((snapshot.gas.POS[:,0] - X0) * (-snapshot.gas.ACCE[:,1]) - \
                                             (snapshot.gas.POS[:,1] - Y0) * (-snapshot.gas.ACCE[:,0])) 
    snapshot.add_data(jdotdens_per_cell,'TORQUEDENS')
    # interpolate onto the grid
    jdotdens_interp = disk_interpolate_primitive_quantities(snapshot,[gridX,gridY],\
                                                            quantities=['TORQUEDENS'],method = 'nearest')[0]

    del snapshot.gas.TORQUEDENS
    
    # In the case of gravity, we need to carry out an additional integration step
    gridR = grid.R.mean(axis=0)
    jdot_interp = cumtrapz((jdotdens_interp * grid.R)[:,::-1],x = -gridR[::-1],initial=0,axis=1)[:,::-1] / grid.R

    
    return jdot_interp
Exemple #18
0
def estimatelogn(a,draw):
	de=loadtxt('exp/bc_x_0p5/pdf.csv',delimiter=',')[:,0]*1e-6
	pe=loadtxt('exp/bc_x_0p5/pdf.csv',delimiter=',')[:,1]*1e6
	
	E=a[0]
	V=a[1]**2
	
	mu=log(E)-0.5*log(1+V/E**2)
	sd=sqrt(log(1.+V/E**2))
	
	dn=linspace(0,max(de),200)

	pn=1./(dn*sd*sqrt(2*pi))*exp(-(log(dn)-mu)**2/(2.*sd**2))
	#clogn=0.5*sp.erfc(-(log(dn)-mu)/sqrt(2)/sd)
	
	#DD=12.
	#q=3.
	#logn=q*xp**(q-1.)/DD**q*exp(-(xp/DD)**q)
	#clogn=1.-exp(-(xp/DD)**q)
	
	#pn=logn
	cpn=spi.cumtrapz(pn,x=dn)
	z=array([0])
	cpn=hstack((z,cpn))
	
	cpe=spi.cumtrapz(pe,x=de)
	cpe=hstack((z,cpe))
	
	smd=sum(dot(diff(cpn),diff(dn))*dn[1:]**3)/sum(dot(diff(cpn),diff(dn))*dn[1:]**2)
	smde=sum(dot(diff(cpe),diff(de))*de[1:]**3)/sum(dot(diff(cpe),diff(de))*de[1:]**2)
	
	print ' --------------------'
	print 'sum(pn) = ', cpn[-1]
	print 'sum(pe) = ', cpe[-1]
	print 'E = ',E
	print 'V = ',V
	print 'SD = ', sqrt(V)
	print 'mu = ',mu
	print 'var = ',sd**2
	print 'sd = ',sd
	print 'smd  = ',smd
	print 'smde = ', smde
	print '--------------------'
	
	delta=interp(dn,de,pe)-pn
	
	if draw==1:
		fig = figure()
		ax = fig.add_subplot(1,1,1)
		subplots_adjust( left=0.2, bottom=0.15 )
		ax.plot(dn*1e6,pn,'-k')
		#ax.plot(de*1e6,pe,'s-r')
		ylabel('lognormal pdf ($1/m$)')
		xlabel('Droplet diameter ($\mu m$)')
		#legend(('fitted', 'experiment'))
		grid(0)
	
	return sum(delta**2)/dn[-1]**2 # (smd-smde)**2/smd**2
Exemple #19
0
def estimatelogn_plot(a):
	de=loadtxt('exp/bc_x_0p5/pdf.csv',delimiter=',')[:,0]*1e-6
	pe=loadtxt('exp/bc_x_0p5/pdf.csv',delimiter=',')[:,1]*1e6
	
	E=a[0]
	V=a[1]
	
	mu=log(E)-0.5*log(1+V/E**2)
	sd=sqrt(log(1.+V/E**2))
	
	dn=linspace(8e-6,50e-6,200)

	pn=1./(dn*sd*sqrt(2*pi))*exp(-(log(dn)-mu)**2/(2.*sd**2))
	#clogn=0.5*sp.erfc(-(log(dn)-mu)/sqrt(2)/sd)
	
	#DD=12.
	#q=3.
	#logn=q*xp**(q-1.)/DD**q*exp(-(xp/DD)**q)
	#clogn=1.-exp(-(xp/DD)**q)
	
	#pn=logn
	cpn=spi.cumtrapz(pn,x=dn)
	z=array([0])
	cpn=hstack((z,cpn))
	pn=pn/cpn[-1]
	cpn=cpn/cpn[-1]
	
	cpe=spi.cumtrapz(pe,x=de)
	cpe=hstack((z,cpe))
	pe=pe/cpe[-1]
	cpe=cpe/cpe[-1]
	
	smd=sum(pn*dn**3)/sum(pn*dn**2)
	smde=sum(pe*de**3)/sum(pe*de**2)
	
	print ' --------------------'
	print 'sum(pn) = ', cpn[-1]
	print 'sum(pe) = ', cpe[-1]
	print 'E = ',E
	print 'V = ',V
	print 'SD = ', sqrt(V)
	print 'mu = ',mu
	print 'var = ',sd**2
	print 'sd = ',sd
	print 'smd  = ',smd
	print 'smde = ', smde
	print '--------------------'
	
	delta=interp(dn,de,pe)-pn
	
	figure()
	plot(dn*1e6,pn,'-k')
	ylabel('lognormal pdf')
	xlabel('Droplet diameter ($\mu m$)')
	legend(('fitted', 'experiment'))
	grid(1)
	
	return (smd-smde)**2
Exemple #20
0
    def test_1d(self):
        x = np.linspace(-2, 2, num=5)
        y = x
        y_int = cumtrapz(y, x, initial=0)
        y_expected = [0., -1.5, -2., -1.5, 0.]
        assert_allclose(y_int, y_expected)

        y_int = cumtrapz(y, x, initial=None)
        assert_allclose(y_int, y_expected[1:])
Exemple #21
0
def numerical_potential(r,dens):
    '''Integrates the density profile to solve for the potential profile. This is the 2 integral method.
    Returned units are in Mpc^2/s^2
    '''
    deriv1 = dens*r**2
    deriv2 = dens*r
    inner = cumtrapz(deriv1,r)
    outer = -cumtrapz(deriv2[::-1],r[::-1])
    return -4*np.pi*G*(1.0/r[1:-1]*inner[:-1] + outer[::-1][1:])
Exemple #22
0
    def __add__(self,fld):
        newfld = copy.deepcopy(self)
        newfld.vx += fld.vx
        newfld.vy += fld.vy
        newfld.vz += fld.vz
        newfld.vx2 += fld.vx2
        newfld.vy2 += fld.vy2
        newfld.vz2 += fld.vz2
        newfld.KE += fld.KE
        newfld.dTdz += fld.dTdz
        newfld.T += fld.T
        newfld.T2 += fld.T2
        newfld.Fk += fld.Fk
        newfld.Fc += fld.Fc
        newfld.Fe += fld.Fe
        newfld.kap += fld.kap
        newfld.vort += fld.vort
        newfld.area += fld.area
        newfld.vortx += fld.vortx
        newfld.vorty += fld.vorty
        newfld.visc3 += fld.visc3
        newfld.therm += fld.therm
        newfld.P += fld.P
        newfld.dzP += fld.dzP
        newfld.Pflux += fld.Pflux
        newfld.TdzP += fld.TdzP
        newfld.trip1 += fld.trip1
        newfld.trip2 += fld.trip2
        newfld.rey12 += fld.rey12
        newfld.rey13 += fld.rey13
        newfld.rey23 += fld.rey23
        newfld.vflux += fld.vflux
        newfld.keflux += fld.keflux
        newfld.tflux += fld.tflux
        newfld.thermflux += fld.thermflux
        newfld.Fcflux1 += fld.Fcflux1
        newfld.Fcflux2 += fld.Fcflux2
        newfld.visc1 += fld.visc1
        newfld.visc2 += fld.visc2



        newfld.thermflux *= newfld.kap[:,-1][:,np.newaxis]
        newfld.Fb = -newfld.vz*newfld.delad*newfld.z[:,np.newaxis]
        newfld.Nu = newfld.Fc/newfld.Fk

        newfld.Ftot = newfld.Fk + newfld.Fc
        newfld.vrms = np.sqrt(newfld.vz2)

        newfld.s = newfld.T - newfld.delad*newfld.z[:,np.newaxis]
        newfld.dsdz = newfld.dTdz - newfld.delad
        newfld.sk = cumtrapz(-1./newfld.kap[:,-1]-newfld.delad,x=newfld.z,initial=0)
        tmp = cumtrapz((-1./newfld.kap[:,-1]-newfld.delad)[::-1],x=newfld.z[::-1],initial=0)[::-1]
        newfld.sk_t = newfld.s[-1,:] + tmp[:,np.newaxis]
        return newfld
def background_evolution_splines(zmax=10., nsamples=500):
    """
    Get interpolation functions for background functions of redshift:
      * H(z), Hubble rate in km/s/Mpc
      * r(z), comoving distance in Mpc
      * D(z), linear growth factor
      * f(z), linear growth rate
    """
    cosmo = {'omega_M_0':        0.316,
			 'omega_lambda_0':   0.684,
    			'omega_b_0':        0.049,
    			'N_eff':            3.046,
   			 'h':                0.67,
   			 'ns':               0.962,
   			 'sigma_8':          0.834,
    			'gamma':            0.55,
   			 'w0':               -1.,
    			'wa':               0.,
   			 'sigma_nl':         7.}
    _z = linspace(0., zmax, nsamples)
    a = 1. / (1. + _z)
    H0 = (100.*cosmo['h']); w0 = cosmo['w0']; wa = cosmo['wa']
    om = cosmo['omega_M_0']; ol = cosmo['omega_lambda_0']
    ok = 1. - om - ol
    C= 5e3
    # Sample Hubble rate H(z) and comoving dist. r(z) at discrete points
    omegaDE = ol * exp(3.*wa*(a - 1.)) / a**(3.*(1. + w0 + wa))
    E =sqrt( om * a**(-3.) + ok * a**(-2.) + omegaDE )
    _H = H0 * E
    
    r_c = concatenate( ([0.], cumtrapz(1./E, _z)) )
    if ok > 0.:
        _r = C/(H0*sqrt(ok)) * sinh(r_c * sqrt(ok))
    elif ok < 0.:
        _r = C/(H0*sqrt(-ok)) * sin(r_c * sqrt(-ok))
    else:
        _r = (C/H0) * r_c
    
    # Integrate linear growth rate to find linear growth factor, D(z)
    # N.B. D(z=0) = 1.
    a = 1. / (1. + _z)
    Oma = cosmo['omega_M_0'] * (1.+_z)**3. * (100.*cosmo['h']/_H)**2.
    _f = Oma**cosmo['gamma']
  #  print _f
    _D = concatenate( ([0.,], cumtrapz(_f, log(a))) )
    _D = exp(_D)
    
    # Construct interpolating functions and return
    r = interpolate.interp1d(_z, _r, kind='linear', bounds_error=False)
    H =interpolate.interp1d(_z, _H, kind='linear', bounds_error=False)
    D = interpolate.interp1d(_z, _D, kind='linear', bounds_error=False)
    f = interpolate.interp1d(_z, _f, kind='linear', bounds_error=False)
    return  _z, H, r, D, f
Exemple #24
0
def estimate_plot(a):
	de=loadtxt('exp/bc_x_0p5/pdf.csv',delimiter=',')[:,0]*1e-6
	pe=loadtxt('exp/bc_x_0p5/pdf.csv',delimiter=',')[:,1]*1e6
	
	#E=a[0]
	#V=a[1]
	
	DD=a[0]
	q=a[1]
	
	#mu=log(E)-0.5*log(1+V/E**2)
	#sd=sqrt(log(1.+V/E**2))
	
	dn=linspace(min(de),max(de),200)
	#logn=1./(dn*sd*sqrt(2*pi))*exp(-(log(dn)-mu)**2/(2.*sd**2))
	#clogn=0.5*sp.erfc(-(log(dn)-mu)/sqrt(2)/sd)
	rl=q*dn**(q-1.)/DD**q*exp(-(dn/DD)**q)
	
	#DD=12.
	#q=3.
	#logn=q*xp**(q-1.)/DD**q*exp(-(xp/DD)**q)
	#clogn=1.-exp(-(xp/DD)**q)
	
	#pn=logn
	pn=rl
	cpn=spi.cumtrapz(pn,x=dn)
	z=array([0])
	cpn=hstack((z,cpn))
	
	cpe=spi.cumtrapz(pe,x=de)
	cpe=hstack((z,cpe))
	
	
	print ' --------------------'
	print 'sum(pn) = ', cpn[-1]
	print 'sum(pe) = ', cpe[-1]
#	print 'E = ',E
#	print 'V = ',V
#	print 'mu = ',mu
#	print 'sd = ',sd
	print 'DD = ',DD
	print 'q  = ',q
	print 'smd  = ',sum(pn*dn**3)/sum(pn*dn**2)
	print 'smde = ',sum(pe*de**3)/sum(pe*de**2)
	print '--------------------'
	
	figure()
	plot(dn*1e6,pn,'-k')
	plot(de*1e6,pe,'sr')
	ylabel('Rosin-Rammler pdf')
	xlabel('Droplet diameter ($\mu m$)')
	legend(('fitted', 'experiment'))
	grid(1)
Exemple #25
0
def estimate(a):
	de=loadtxt('exp/bc_x_0p5/pdf.csv',delimiter=',')[:,0]*1e-6
	pe=loadtxt('exp/bc_x_0p5/pdf.csv',delimiter=',')[:,1]*1e6
	
	#E=a[0]
	#V=a[1]
	
	DD=a[0]
	q=a[1]
	
	#mu=log(E)-0.5*log(1+V/E**2)
	#sd=sqrt(log(1.+V/E**2))
	
	dn=linspace(8e-6,50e-6,200)

	#logn=1./(dn*sd*sqrt(2*pi))*exp(-(log(dn)-mu)**2/(2.*sd**2))
	#clogn=0.5*sp.erfc(-(log(dn)-mu)/sqrt(2)/sd)
	rl=q*dn**(q-1.)/DD**q*exp(-(dn/DD)**q)
	
	#DD=12.
	#q=3.
	#logn=q*xp**(q-1.)/DD**q*exp(-(xp/DD)**q)
	#clogn=1.-exp(-(xp/DD)**q)
	
	#pn=logn
	pn=rl
	cpn=spi.cumtrapz(pn,x=dn)
	z=array([0])
	cpn=hstack((z,cpn))
	
	cpe=spi.cumtrapz(pe,x=de)
	cpe=hstack((z,cpe))
	
	smd=sum(pn*dn**3)/sum(pn*dn**2)
	smde=sum(pe*de**3)/sum(pe*de**2)
	
	print ' --------------------'
	print 'sum(pn) = ', cpn[-1]
	print 'sum(pe) = ', cpe[-1]
#	print 'E = ',E
#	print 'V = ',V
#	print 'mu = ',mu
#	print 'sd = ',sd
	print 'DD = ',DD
	print 'q  = ',q
	print 'smd  = ',smd
	print 'smde = ', smde
	print '--------------------'
	
	delta=interp(dn,de,pe)-pn
	
	return (smd-smde)**2 + 1e7*sum(delta**2)/max(dn)**2
def plotIntegrals(angles, samplingFreq):
    anglesLeft = [getPos(a) for a in angles]
    anglesRight = [getNeg(a) for a in angles]
    leftTurnIntegral = cumtrapz(anglesLeft, dx=(1/samplingFreq), axis=0)
    rightTurnIntegral = cumtrapz(anglesRight, dx=(1/samplingFreq), axis=0)
    totalIntegral = cumtrapz(angles, dx=(1/samplingFreq), axis=0)
    xVect = np.array(range(len(leftTurnIntegral))) / samplingFreq

    plt.ylim([-180, 180])
    plt.plot(xVect, leftTurnIntegral, color='b', label='Left turns')
    plt.plot(xVect, rightTurnIntegral, color='r', label='Right turns')
    plt.plot(xVect, totalIntegral, color='g', label='Both turns')
    plt.legend(loc='best')
Exemple #27
0
 def prof(self):
   r = np.linspace(0,0.1, 1001)
   mr = self.Mgas - cumtrapz(2*np.pi*r*self.gas_SD(r), x=r)
   rc = (r[:-1]+r[1:])/2.
   self.radius = np.interp(0, -mr, rc)
   r = np.linspace(0,self.radius, 1001)
   mr = self.Mgas - cumtrapz(2*np.pi*r*self.gas_SD(r), x=r)
   rc = (r[:-1]+r[1:])/2.
   self.rc = rc
   self.mr = mr
   zc = np.logspace(-8,-2,101)
   rr, zz = np.meshgrid(rc,zc)
   f_r = self.dphidz(rr,zz).max(axis=0)
   self.fr = f_r
Exemple #28
0
def estimatelogn(a):
	de=loadtxt('exp/bc_x_0p5/pdf.csv',delimiter=',')[:,0]*1e-6
	pe=loadtxt('exp/bc_x_0p5/pdf.csv',delimiter=',')[:,1]*1e6
	
	E=a[0]
	V=a[1]
	
	mu=log(E)-0.5*log(1+V/E**2)
	sd=sqrt(log(1.+V/E**2))
	
	dn=linspace(8e-6,50e-6,200)

	pn=1./(dn*sd*sqrt(2*pi))*exp(-(log(dn)-mu)**2/(2.*sd**2))
	#clogn=0.5*sp.erfc(-(log(dn)-mu)/sqrt(2)/sd)
	
	#DD=12.
	#q=3.
	#logn=q*xp**(q-1.)/DD**q*exp(-(xp/DD)**q)
	#clogn=1.-exp(-(xp/DD)**q)
	
	#pn=logn
	cpn=spi.cumtrapz(pn,x=dn)
	z=array([0])
	cpn=hstack((z,cpn))
	pn=pn/cpn[-1]
	cpn=cpn/cpn[-1]
	
	cpe=spi.cumtrapz(pe,x=de)
	cpe=hstack((z,cpe))
	pe=pe/cpe[-1]
	cpe=cpe/cpe[-1]
	
	smd=sum(pn*dn**3)/sum(pn*dn**2)
	smde=sum(pe*de**3)/sum(pe*de**2)
	
	print ' --------------------'
	print 'sum(pn) = ', cpn[-1]
	print 'sum(pe) = ', cpe[-1]
	print 'E = ',E
	print 'V = ',V
	print 'mu = ',mu
	print 'sd = ',sd
	print 'smd  = ',smd
	print 'smde = ', smde
	print '--------------------'
	
	delta=interp(dn,de,pe)-pn
	
	return (smd-smde)**2
Exemple #29
0
    def linear_integral(self, vector, from_zero=False):
        """
        .. math::
          a_n = \int_{r_0}^{r_n} f(r) dr

        from_zero starts to integrate from zero, instead of starting between
        the first two points
        """
        r = self.get_coors()
        if from_zero:
            v = cumtrapz(vector, r, initial=vector[0] / 2 * max(0.0, r[0]))
        else:
            v = cumtrapz(vector, r)
            r = r[1:]
        return RadialVector(r, v)
Exemple #30
0
def get_velocity_displacement(time_step, acceleration, units="cm/s/s"):
    """
    Returns the velocity and displacment time series using simple integration
    :param float time_step:
        Time-series time-step (s)
    :param numpy.ndarray acceleration:
        Acceleration time-history
    :returns:
        velocity - Velocity Time series (cm/s)
        displacement - Displacement Time series (cm)
    """
    acceleration = convert_accel_units(acceleration, units)
    velocity = time_step * cumtrapz(acceleration, initial=0.0)
    displacement = time_step * cumtrapz(velocity, initial=0.0)
    return velocity, displacement
        for j in range(Ncols):
            matrixdata[i, j] = float(line[j])

    print ""
    print "Performing integration using:"
    print "x    = ", header2[cols[0]]
    print "f(x) = ", header2[cols[1]]
    xes.append(header2[cols[0]])
    fofxes.append(header2[cols[1]])

    x = matrixdata[:, cols[0]]
    f = matrixdata[:, cols[1]]
    f2 = f * cumf[n]  # multiplied by input factor

    Etot = integrate.simps(f2, x)
    Ecum = integrate.cumtrapz(f2, x, initial=0)

    print "Etot(x) = ", Etot
    print ""

    plt.plot(x, Ecum, label=str(name))
    Ecums[n] = Ecum

Emean = np.mean(Ecums, axis=0)
plt.plot(x, Emean, label='E(x)')
plt.legend()
#plt.show()

##############################################################
# -- Write output file:
if args.output is None:
        paramTest[i][j*len(paramTest[i])/4 + 0] = p_input[i]
     
    ParamLists += [paramTest]

for b in basis:
    prob.setBasis(b)
    for l,p in enumerate(param_deriv):
        #print p
        #print type(p),type(l)
        Output_p =[]
        Output =[]
        #print len(ParamLists[l])
        for param in ParamLists[l]:
            #print param
            Output_p +=[EnergyFunction.function(param,"TargetMapping",prob, p,0)] 
            Output += [EnergyFunction.function(param,"TargetMapping",prob, "")]
       # print len(Output_p),len(p_input)
        #print Output_p,p_input
        #print Output_p,Output
        # print list(p_input), "***" ,Output_p[i][j]
        
        S_Output = integrate.cumtrapz(Output_p, list(p_input))
        # print len(list(S_Output)),"****",len(Output[i][j])
        #print Output[0], S_Output
        S_Output=map(lambda x:x+Output[0], S_Output)
        S_Output = [Output[0]] + S_Output
        #print Output_p, Output, S_Output
        #map(associate,list(S_Output),Output[i][j])
        e = Error(list(S_Output),Output)
        print b,p,e
Exemple #33
0
comsol_time = com_fluxGE[0]
comsol_fluxGE = com_fluxGE[1]
comsol_fluxGE1cm = com_fluxGE1cm[1]
comsol_fluxGE1p25cm = com_fluxGE1p25cm[1]

comsol_fluxMid = com_fluxMid[1]
comsol_fluxMid1cm = com_fluxMid1cm[1]
comsol_fluxMid1p25cm = com_fluxMid1p25cm[1]
#comsol_currentMid1cm = com_currentMid1cm[1]

comsol_fluxOut = com_fluxOut[3]
comsol_fluxOut1cm = com_fluxOut1cm[1]
comsol_fluxOut1p25cm = com_fluxOut1p25cm[1]

flux_middle1 = sp.cumtrapz(emf_middle1, time_ms1)
flux_gunEdge1 = sp.cumtrapz(emf_gunEdge1, time_ms1)
flux_outside1 = sp.cumtrapz(emf_outside1, time_ms1)

#fig1, (ax1) = plt.subplots(1)
fig1, (ax1, ax2, ax3) = plt.subplots(3)
#fig2, (ax4, ax5, ax6) = plt.subplots(3, sharex=True)

#Flux Middle 0.625cm Inner Electrode
ax1.plot(time_ms1[1:210716]+1.4, flux_middle1[:210715], color='red', label='Inner Coil - Middle')
ax1.plot(comsol_time, comsol_fluxMid1cm, color='black', label='1cm COM Middle')
ax1.set_ylabel(r'Magnetic Flux (mWb)')
ax1.set_xlabel(r'Time (ms)')
ax1.legend(loc='best', frameon=False)
#plt.savefig('C:\\Users\\Josh0\\Documents\\1. Josh Documents\\Graduate School - Bryn Mawr College\\Plasma Lab (BMX) Research\\Analysis\\Plots\\08182021\\08182021_1p25cm_middle.pdf', dpi=600)
Exemple #34
0
coefficients = np.polyfit(xx, yy, 1)
##plt.plot(t1,DSC1,label='DSC signal')
##plt.plot(t,coefficients[0]*t+coefficients[1],label='baseline')
##plt.plot(t1,DSC1)
##plt.xlabel('Time (s)')
##plt.ylabel('DSC signal (microW)')
##plt.title('Exothermic Peak')
##plt.figure()
baseline = coefficients[0] * t1 + coefficients[1]

### Calculations of total heat of reaction and partial heats ###
t1Integration = t1 * 60
DeltaHStatic = integrate.trapz(DSC1 - baseline, t1Integration)
DeltaHStaticNorm = (DeltaHStatic * 10**(-3)) / (sample_mass)  #normalized
dBeta = (DSC1 - baseline) / DeltaHStatic
alpha = (DeltaHStaticNorm / DeltaHDynamic) * integrate.cumtrapz(
    dBeta, t1Integration, initial=0)
###Plot alpha vs t ###
plt.plot(t1Integration, alpha)
plt.plot(t1Integration, len(alpha) * [1])
plt.xlabel('time (s)')
plt.ylabel('degree of cure \u03B1 (-)')
plt.grid(True)
plt.figure()
plt.show()
DSC1Norm = (DSC1 * 10**(-3)) / (sample_mass)
N = [0.5, 1, 1.5, 2, 2.5]
op = 'n'
while op != 'y':
    m = float(input('Order of reaction m ?\n'))
    x = alpha**m
    for n in N:
Exemple #35
0
                  skiprows=1)
# data = np.loadtxt(os.path.join('wmap.spline'), skiprows = 1)
m = data[skip:, 0]
sigma = data[skip:, 1]
alpha = data[skip:, 3]  # d ln \sigma / d ln m
dn = dn_dm(m, sigma, alpha)
M_max = simps(dn * m, m) * V
print sum((dn[:-1] + dn[1:]) / 2 * (m[1:] - m[:-1]) * m[1:]) * V
print V * trapz(dn * m, m)
print simps(dn * m, m) * V
print rho_m * V
raw_input("Press enter to continue")

x = m[::-1]
dn = dn[::-1]
cx = cumtrapz(dn, x=x)
cx = np.hstack([0, cx])
cx = cx / cx[-1]
"""plt.plot(x, cx)
plt.xscale('log')
plt.yscale('log')
"""
X_of_Xi = interp1d(cx, x)

M_sum = 0
gal_list = []
gal_list_low = []

while M_sum <= M_max:
    m_curr = sample(X_of_Xi, n)
    M_sum = M_sum + sum(m_curr)
Exemple #36
0
    def Delta_Sigma(
            self,
            rp,
            DS0,  #PM
            Ds,
            Dl,
            zl,
            rhom,  #background/bin-dependent quantities
            pi=np.linspace(0, 100, 100 + 1),
            rpMin=1.,
            num_rpint=1000,
            wantGrad=False):
        """Delta Sigma GGL statistic. Lens redshift is assumed to be the CrossCorrelator attribute z.
        Simple top-hat redshift distribution.
        Parameters:
        -----------
        rp: array (float)
            2D abscissa values for wp/DS
        DS0: float
            Point mass marginalization parameter for Delta Sigma (see Singh ++ 2018)
        Ds: float
            Comoving distance (flat) to source
        Dl: float
            Comoving distance (flat) to lens
        zl: float
            Lens redshift
        rhom: float
            Mean matter density at z=0, in Msun^1 pc^-2 h^1 (pc, NOT Mpc)
        pi_bins: array (float),optional
            Array must be of size that divides evenly into r - projection window, tophat for now
        rpMin: float,optional
            Minimum scale to which model is trusted, below this activate point mass.
        wantGrad : boolean,optional
            Whether to return the function value, or the tuple (val,grad).
        Returns:
        ----------
        (array (float), array (float), [array (float)])
            projected radius rp, projected correlation function wp, gradient if wanted
        """
        #FIXME: do not actually need this (it works though)- we are not computing tangential shear...might later
        # def inv_Sigma_crit(Ds,Dl,zl):
        #     c=2.99792e5 #km/s
        #     G=4.30071e-9 #Mpc (km/s)**2 * M_sun^-1
        #     #Assume Ds>Dl
        #     #can come back to computing Ds, DL internally  after caching distances in gzpt object, for now require input
        #     if(Ds>Dl):
        #         Dfactor = Dl*(Ds-Dl)/Ds #Mpc/h
        #     else:
        #         Dfactor = 0.
        #     pre = (1+zl)*(4*np.pi*G)/c**2 #Mpc *Msun^-1
        #     return pre*Dfactor # #Mpc^2 Msun^-1 h^-1, what we need to match units of Sigma

        #FIXME: internally check units on this, works in the example though

        if (wantGrad):
            wpgm, grad_wpgm = self.wp(rp, pi=pi, wantGrad=True)

        else:
            wpgm = self.wp(rp, pi=pi)
        #rp interpolation pts
        rpint = np.logspace(np.log10(rp.min()), np.log10(
            rp.max()), num_rpint)  #FIXME probably not necessary to use 1000

        if (wantGrad):
            I_grad_hzpt = np.zeros((len(rpint), len(self.params)))
            I_grad_PM = np.zeros(len(rpint))

        wint = np.interp(rpint, rp, wpgm)
        rpint_c = rpint[rpint >= rpMin]
        wint_c = wint[rpint >= rpMin]
        s_int = rhom * np.interp(rpint, rp, wpgm)
        s = rhom * wpgm
        sint_c = rhom * wint_c
        t1 = np.zeros(len(rpint))
        t1[rpint < rpMin] = 0.
        #Using Singh 2018 eqn 29 - there is a typo so add missing factor of 2
        t1[rpint >= rpMin] = (2. / rpint_c**2) * cumtrapz(
            rpint_c * sint_c, x=rpint_c, initial=0)
        t2 = -s_int
        S0 = np.interp(rpMin, rpint, s_int)
        t3 = (rpMin / rpint)**2 * (DS0 + S0
                                   )  #compute Sigma_0 from Singh 18 eqn. 30
        DS = t1 + t2 + t3

        if (wantGrad):  #FIXME: test for this
            grad_wpgm_int_c = np.zeros((len(rpint_c), grad_wpgm.shape[1]))
            for i in range(grad_wpgm.shape[1]):
                grad_wpgm_int[:, i] = np.interp(rpint, rp, grad_wpgm[:, i])
                grad_wpgm_int_c[:, i] = grad_wpgm_int[:, i][rpint >= rpMin]
                term1_grad[:, i] = (2. / rpint**2) * cumtrapz(
                    rpint_c * rhom * grad_wpgm_int_c,
                    x=rpint_c,
                    axis=0,
                    initial=0)
            term2_grad = -rhom * grad_wpgm
            I_grad_hzpt = term1_grad + term2_grad
            I_grad_PM = 1 / rpint**2

        if (wantGrad):
            grad_DS = np.concatenate(
                [I_grad_hzpt, np.atleast_2d(I_grad_PM).T], axis=1)
            return DS, grad_DS
        else:
            return DS
def Interfacial_resistance(Temp_and_energy_file,logfile,IR_file,\
 fit_range1=120,fit_range2=4,timestep=5e-4,inter_step=100,Figure=True):
    '''Interfacial resistance is calculated by integral change of temperature difference 
	and recording total energy.
	Format of each line:
	step Temperature_up Temperature_down kinetic_energy potential_energy'''
    #**********variable**********#
    global i
    inter_time = timestep * inter_step * (1e-12)  #s
    ev2J = 1.60217662e-19  #ev2J
    #**********read data**********#
    data = np.loadtxt(Temp_and_energy_file)
    # print(data.shape)
    log = open(logfile, 'a')
    #**********output of result**********#
    IR = open(IR_file, 'a')  #Interfacial Resistance
    #**********step 2 time**********#
    step = (data[:, 0] - data[0, 0]) / inter_step
    step = list(map(int, step))
    step = np.array(step)
    time = inter_time * step  #s
    #**********Plot temperature profile**********#
    Temperature_up = data[:, 1]  #high temperature
    Temperature_down = data[:, 2]  #low temperature
    plt.figure(num=1, figsize=(8, 6))
    plt.plot(time, Temperature_up, time, Temperature_down)
    plt.title("Temperature")
    plt.xlabel("Time (s)")
    plt.ylabel("Temperature (K)")
    plt.savefig(str(i) + "Temperature profile.png")
    if Figure == True:
        plt.show()
    plt.close()
    #**********Plot Total energy profile**********#
    kinetic_energy = data[:, 3]
    potential_energy = data[:, 4]
    total_energy = (kinetic_energy + potential_energy) * ev2J
    plt.figure(num=2, figsize=(8, 6))
    plt.plot(time, total_energy)
    plt.title("Total energy")
    plt.xlabel("Time (s)")
    plt.ylabel("Energy (J)")
    plt.savefig(str(i) + "Total energy profile.png")
    if Figure == True:
        plt.show()
    plt.close()

    # print(step)

    #**********define temperature difference function**********#
    def Temp_Diff(x):
        temp_diff = (Temperature_up[x] - Temperature_down[x])
        return temp_diff

    #**********Integrate**********#
    y = Temp_Diff(step)
    DT_integrate = integrate.cumtrapz(y)

    #**********Fitting and plot**********#
    x1 = DT_integrate
    y1 = total_energy[1:]
    #******control the fitting interval******#
    Fit_minx2 = int(len(step) / fit_range1)
    # print(Fit_minx2)
    Fit_maxx2 = int(len(step) / fit_range2)
    # print(Fit_maxx2)
    x2 = DT_integrate[Fit_minx2:Fit_maxx2]
    y2 = total_energy[Fit_minx2:Fit_maxx2]
    # x2 = DT_integrate[20:1600]
    # y2 = total_energy[20:1600]

    fit = np.polyfit(x2, y2, 1)
    fit_fn1 = np.poly1d(fit)
    print("Formula of Heat flux Fitting:y2 = ", fit_fn1, file=log)
    #Fitting slope
    Area_R = fit[0] / inter_time  #change unit to K.s
    R = -area / Area_R
    print('R' + str(i) + '=', R, 'Km^2/W\n', file=log)

    plt.figure(num=3, figsize=(8, 6))
    plt.plot(x1, y1, linewidth=6.0)
    plt.plot(x2, fit_fn1(x2), "r-", linewidth=3.0)
    plt.title("Total energy")
    plt.xlabel("DT (K.step)")
    plt.ylabel("Energy (J)")
    plt.savefig(str(i) + "Total energy profile-DT.png")
    if Figure == True:
        plt.show()
    plt.close()
    #output Interfacial resistance
    IR.write(str(R))
    IR.write('  ')
    IR.close()
    log.close()
    return print('**********Interfacial_resistance done!**********'),\
    print('R'+str(i)+'=',R,'Km^2/W\n\n')
Exemple #38
0
        z_str = list(map(lambda x: "%.6E" % (x), z))
        confg.set_altitude_grid(z_str)

        ## get atmosphere profile
        atm_profile = SReader.atm_profile()
        ray_df, unit = ip.rayleight(atm_profile, bands)
        ray_df.to_csv("../out/extinction_rayleigh_(%s).csv" % (unit))
        z_ray = ray_df['z[km]'].values * 1e3  # convert from km to m
        ray_extinct = ray_df['extinction_coe_%d' % bands_int[
            bandindex]].values * 1e2  # convert from cm^-1 it into m^-1
        f = inp.interp1d(z_ray, ray_extinct)
        ray_extinct = f(z)
        confg.set_rayleigh(map(lambda x: "%.6E" % (x), ray_extinct),
                           ['1.000' for i in range(ray_extinct.shape[0])])

        ray_scaled = integrate.cumtrapz(ray_extinct, z, initial=0)[-1]
        # print(taua_scaled)
        # assert (round(ray_scaled,1)==0.1)
        print(ray_scaled)

        ## get temperature from atmosphere profile
        temp_k = ray_df['t[K]'].values
        f = inp.interp1d(z_ray, temp_k)
        temp_k = f(z)
        confg.set_tempprofile(map(lambda x: "%.6E" % (x), temp_k))

        ## get gas extinction profile
        gas_profile = SReader.gas_profile()
        no2_crosssect = SReader.no2_cross_section()
        o3_crosssect = SReader.ozone_cross_section()
        gas_abs, unit = ip.gas_absorption(bands=bands,
Exemple #39
0
    def set_params(self, params, t_bounds):
        """
        Method to set the parameters for lightcurve
        model.

        Parameters
        ----------
        params : dict
            Dictionary mapping parameter names to their values
        t_bounds : list
            [upper bound, lower bound] pair for time values
        """
        self.sigma = params["sigma"]
        ### conversion factors and physical constants
        Msun = 1.988409870698051e33  # g
        c = 2.99792458e10  # cm/s
        sigmaSB = 5.67e-5  # erg cm^-2 s^-1 K^-4
        d2s = 24.0 * 3600.0

        ### initialize time arrays
        tmin = 1.0e-6
        tmax = t_bounds[1]
        n = 1000
        tdays = np.logspace(np.log10(tmin), np.log10(tmax), n)
        self.tdays = tdays
        t = tdays * d2s

        ### constants
        t0 = 1.3  # s
        sigma = 0.11  # s
        beta = 13.7

        ### empty lists to hold calculated photosphere radius and temperature
        self.R_photo = []
        self.T_photo = []

        ### for each component, compute photosphere radius and temperature
        for mej, vej, Tc, kappa in zip(
            [params["mej_red"], params["mej_purple"], params["mej_blue"]],
            [params["vej_red"], params["vej_purple"], params["vej_blue"]],
            [params["Tc_red"], params["Tc_purple"], params["Tc_blue"]],
            [10.0, 3.0, 0.5]):
            a = self.fa([mej, vej])[0]
            b = self.fb([mej, vej])[0]
            d = self.fd([mej, vej])[0]
            vej *= c
            td = np.sqrt(2.0 * kappa * (mej * Msun) / (beta * vej * c))
            L_in = 4.0e18 * (mej * Msun) * (0.5 - np.arctan(
                (t - t0) / sigma) / np.pi)**1.3
            e_th = 0.36 * (np.exp(-a * tdays) + np.log1p(2.0 * b * tdays**d) /
                           (2.0 * b * tdays**d))
            L_in *= e_th

            integrand = L_in * t * np.exp((t / td)**2) / td
            L_bol = np.empty(t.size)
            L_bol[1:] = cumtrapz(integrand, t)
            L_bol[0] = L_bol[1]
            L_bol *= 2.0 * np.exp(-(t / td)**2) / td

            _T_photo = (L_bol / (4.0 * np.pi * sigmaSB * vej**2 * t**2))**0.25
            _R_photo = (L_bol / (4.0 * np.pi * sigmaSB * Tc**4))**0.5

            mask = _T_photo < Tc
            _T_photo[mask] = Tc
            mask = np.logical_not(mask)
            _R_photo[mask] = vej * t[mask]
            self.R_photo.append(_R_photo)
            self.T_photo.append(_T_photo)
    #b.set_array('J_integral', np.logical_and(r > params.eval_r1,
    #                                         r < params.eval_r2))
    #ase.io.write('eval_'+fn, b, format='extxyz')

    last_a = a

epot_cluster = np.array(epot_cluster)-epot_cluster[0]
work = np.cumsum(work)

tip_x = np.array(tip_x)
tip_y = np.array(tip_y)
bond_length = np.array(bond_length)
print 'tip_x =', tip_x

# Integrate true potential energy.
epot = -cumtrapz(bond_force, bond_length, initial=0.0)

print 'epot =', epot

savetbl('{}_eval.out'.format(prefix),
        bond_length=bond_length,
        bond_force=bond_force,
        epot=epot,
        epot_cluster=epot_cluster,
        work=work,
        tip_x=tip_x,
        tip_y=tip_y,
        J_int=J_int)

# Fit and subtract first energy minimum
i = 1
Exemple #41
0
z = []

for line in (slopes):
    columns = line.split()
    x.append(float(columns[0]))
    z.append(float(columns[1]))
file00.close()

x = np.array(x)
z = np.array(z)
h = np.zeros(len(z))

coefs = np.polyfit(x, z, 1)
p = np.poly1d(coefs)

yy = integrate.cumtrapz(z, x, initial=0)

# Subtract line
line = (yy[yy.size - 1] - z[0]) / (x[x.size - 1] - x[0]) * (x - x[0]) + yy[0]
yy = yy - line

a = fit_ellipseFitzgibbon(x, yy).flatten()

y_fit = np.zeros(x.size)

for i in range(0, x.size):
    aa = a[2]
    bb = a[1] * x[i] + a[4]
    cc = (a[0] * x[i] + a[3]) * x[i] + a[5]
    y_fit[i] = (-bb + np.sqrt(bb**2 - 4 * cc * aa)) / (2.0 * aa)
def matched_from_distribution_function(beam,
                                       full_ring_and_RF,
                                       distribution_function_input=None,
                                       distribution_user_table=None,
                                       main_harmonic_option='lowest_freq',
                                       TotalInducedVoltage=None,
                                       n_iterations=1,
                                       n_points_potential=1e4,
                                       n_points_grid=int(1e3),
                                       dt_margin_percent=0.40,
                                       extraVoltageDict=None,
                                       seed=None,
                                       distribution_exponent=None,
                                       distribution_type=None,
                                       emittance=None,
                                       bunch_length=None,
                                       bunch_length_fit=None,
                                       distribution_variable='Hamiltonian',
                                       process_pot_well=True,
                                       turn_number=0):
    '''
    *Function to generate a beam by inputing the distribution function (by
    choosing the type of distribution and the emittance).
    The potential well is preprocessed to check for the min/max and center
    the frame around the separatrix.
    An error will be raised if there is not a full potential well (2 max
    and 1 min at least), or if there are several wells (more than 2 max and
    1 min, this case will be treated in the future).
    An adjustable margin (40% by default) is applied in order to be able to
    catch the min/max of the potential well that might be on the edge of the
    frame. The slippage factor should be updated to take the higher orders.
    Outputs should be added in order for the user to check step by step if
    his bunch is going to be well generated. More detailed 'step by step'
    documentation should be implemented
    The user can input a custom distribution function by setting the parameter
    distribution_type = 'user_input' and passing the function in the
    parameter distribution_options['function'], with the following definition:
    distribution_function(action_array, dist_type, length, exponent=None).
    The user can also add an input table by setting the parameter
    distribution_type = 'user_input_table',
    distribution_options['user_table_action'] = array of action (in H or in J)
    and distribution_options['user_table_distribution']*
    '''

    # Loading the distribution function if provided by the user
    if distribution_function_input is not None:
        distribution_function_ = distribution_function_input
    else:
        distribution_function_ = distribution_function

    # Initialize variables depending on the accelerator parameters
    slippage_factor = full_ring_and_RF.RingAndRFSection_list[0].eta_0[
        turn_number]
    beta = full_ring_and_RF.RingAndRFSection_list[0].rf_params.beta[
        turn_number]
    energy = full_ring_and_RF.RingAndRFSection_list[0].rf_params.energy[
        turn_number]

    eom_factor_dE = abs(slippage_factor) / (2 * beta**2. * energy)
    eom_factor_potential = (
        np.sign(slippage_factor) * beam.Particle.charge /
        (full_ring_and_RF.RingAndRFSection_list[0].t_rev[turn_number]))

    #: *Number of points to be used in the potential well calculation*
    n_points_potential = int(n_points_potential)
    # Generate potential well
    full_ring_and_RF.potential_well_generation(
        turn=turn_number,
        n_points=n_points_potential,
        dt_margin_percent=dt_margin_percent,
        main_harmonic_option=main_harmonic_option)
    potential_well = full_ring_and_RF.potential_well
    time_potential = full_ring_and_RF.potential_well_coordinates

    induced_potential = 0

    # Extra potential from previous bunches (for multi-bunch generation)
    extra_potential = 0
    if extraVoltageDict is not None:
        extra_voltage_time_input = extraVoltageDict['time_array']
        extra_voltage_input = extraVoltageDict['voltage_array']
        extra_potential_input = -(eom_factor_potential * cumtrapz(
            extra_voltage_input,
            dx=extra_voltage_time_input[1] - extra_voltage_time_input[0],
            initial=0))
        extra_potential = np.interp(time_potential, extra_voltage_time_input,
                                    extra_potential_input)

    total_potential = potential_well + induced_potential + extra_potential

    if not TotalInducedVoltage:
        n_iterations = 1
    else:
        induced_voltage_object = copy.deepcopy(TotalInducedVoltage)
        profile = induced_voltage_object.profile

    dE_trajectory = np.zeros(n_points_potential)
    for i in range(n_iterations):
        old_potential = copy.deepcopy(total_potential)

        # Adding the induced potential to the RF potential
        total_potential = (potential_well + induced_potential +
                           extra_potential)

        sse = np.sqrt(np.sum((old_potential - total_potential)**2))

        print('Matching the bunch... (iteration: ' + str(i) + ' and sse: ' +
              str(sse) + ')')

        # Process the potential well in order to take a frame around the separatrix
        if process_pot_well == False:
            time_potential_sep, potential_well_sep = time_potential, total_potential
        else:
            time_potential_sep, potential_well_sep = potential_well_cut(
                time_potential, total_potential)

        # Potential is shifted to put the minimum on 0
        potential_well_sep = potential_well_sep - np.min(potential_well_sep)

        # Compute deltaE frame corresponding to the separatrix
        max_potential = np.max(potential_well_sep)
        max_deltaE = np.sqrt(max_potential / eom_factor_dE)

        # Initializing the grids by reducing the resolution to a
        # n_points_grid*n_points_grid frame
        time_potential_low_res = np.linspace(float(time_potential_sep[0]),
                                             float(time_potential_sep[-1]),
                                             n_points_grid)
        time_resolution_low = (time_potential_low_res[1] -
                               time_potential_low_res[0])
        deltaE_coord_array = np.linspace(-float(max_deltaE), float(max_deltaE),
                                         n_points_grid)
        potential_well_low_res = np.interp(time_potential_low_res,
                                           time_potential_sep,
                                           potential_well_sep)
        time_grid, deltaE_grid = np.meshgrid(time_potential_low_res,
                                             deltaE_coord_array)
        potential_well_grid = np.meshgrid(potential_well_low_res,
                                          potential_well_low_res)[0]

        # Computing the action J by integrating the dE trajectories
        J_array_dE0 = np.zeros(n_points_grid)

        full_ring_and_RF2 = copy.deepcopy(full_ring_and_RF)
        for j in range(n_points_grid):
            # Find left and right time coordinates for a given hamiltonian
            # value
            time_indexes = np.where(
                potential_well_low_res <= potential_well_low_res[j])[0]
            left_time = time_potential_low_res[np.max((0, time_indexes[0]))]
            right_time = time_potential_low_res[np.min(
                (time_indexes[-1], n_points_grid - 1))]
            # Potential well calculation with high resolution in that frame
            time_potential_high_res = np.linspace(float(left_time),
                                                  float(right_time),
                                                  n_points_potential)
            full_ring_and_RF2.potential_well_generation(
                n_points=n_points_potential,
                time_array=time_potential_high_res,
                main_harmonic_option=main_harmonic_option)
            pot_well_high_res = full_ring_and_RF2.potential_well

            if TotalInducedVoltage is not None and i != 0:
                induced_potential_hires = np.interp(time_potential_high_res,
                                                    time_potential,
                                                    induced_potential +
                                                    extra_potential,
                                                    left=0,
                                                    right=0)
                pot_well_high_res += induced_potential_hires
                pot_well_high_res -= pot_well_high_res.min()

            # Integration to calculate action
            dE_trajectory[pot_well_high_res <= potential_well_low_res[j]] = \
                np.sqrt((potential_well_low_res[j] -
                         pot_well_high_res[pot_well_high_res <=
                                           potential_well_low_res[j]]) / eom_factor_dE)
            dE_trajectory[pot_well_high_res > potential_well_low_res[j]] = 0

            J_array_dE0[j] = 1 / np.pi * np.trapz(
                dE_trajectory,
                dx=time_potential_high_res[1] - time_potential_high_res[0])

        # Sorting the H and J functions to be able to interpolate J(H)
        H_array_dE0 = potential_well_low_res
        sorted_H_dE0 = H_array_dE0[H_array_dE0.argsort()]
        sorted_J_dE0 = J_array_dE0[H_array_dE0.argsort()]

        # Calculating the H and J grid
        H_grid = eom_factor_dE * deltaE_grid**2 + potential_well_grid
        J_grid = np.interp(H_grid,
                           sorted_H_dE0,
                           sorted_J_dE0,
                           left=0,
                           right=np.inf)

        # Choice of either H or J as the variable used
        if distribution_variable is 'Action':
            sorted_X_dE0 = sorted_J_dE0
            X_grid = J_grid
        elif distribution_variable is 'Hamiltonian':
            sorted_X_dE0 = sorted_H_dE0
            X_grid = H_grid
        else:
            # DistributionError
            raise RuntimeError('The distribution_variable option was not ' +
                               'recognized')

        # Computing bunch length as a function of H/J if needed
        # Bunch length can be calculated as 4-rms, Gaussian fit, or FWHM
        if bunch_length is not None:
            X0 = X0_from_bunch_length(bunch_length, bunch_length_fit, X_grid,
                                      sorted_X_dE0, n_points_grid,
                                      time_potential_low_res,
                                      distribution_function_,
                                      distribution_type, distribution_exponent,
                                      beam, full_ring_and_RF)

        elif emittance is not None:
            if distribution_variable is 'Action':
                X0 = emittance / (2 * np.pi)
            elif distribution_variable is 'Hamiltonian':
                X0 = np.interp(emittance / (2 * np.pi), sorted_J_dE0,
                               sorted_H_dE0)

        # Computing the density grid
        if distribution_user_table is None:
            density_grid = distribution_function_(X_grid, distribution_type,
                                                  X0, distribution_exponent)
        else:
            density_grid = np.interp(
                X_grid, distribution_user_table['user_table_action'],
                distribution_user_table['user_table_distribution'])

        # Normalizing the grid
        density_grid[H_grid > np.max(H_array_dE0)] = 0
        density_grid = density_grid / np.sum(density_grid)

        # Calculating the line density
        line_density_ = np.sum(density_grid, axis=0)
        line_density_ *= beam.n_macroparticles / np.sum(line_density_)

        # Induced voltage contribution
        if TotalInducedVoltage is not None:
            # Inputing new line density
            profile.cut_options.cut_left = time_potential_low_res[0] - \
                0.5*time_resolution_low
            profile.cut_options.cut_right = time_potential_low_res[-1] + \
                0.5*time_resolution_low
            profile.cut_options.n_slices = n_points_grid
            profile.cut_options.cuts_unit = 's'
            profile.cut_options.set_cuts()
            profile.set_slices_parameters()
            profile.n_macroparticles = line_density_

            # Re-calculating the sources of wakes/impedances according to this
            # slicing
            induced_voltage_object.reprocess()

            # Calculating the induced voltage
            induced_voltage_object.induced_voltage_sum()
            induced_voltage = induced_voltage_object.induced_voltage

            # Calculating the induced potential
            induced_potential_low_res = -(eom_factor_potential * cumtrapz(
                induced_voltage, dx=time_resolution_low, initial=0))
            induced_potential = np.interp(time_potential,
                                          time_potential_low_res,
                                          induced_potential_low_res,
                                          left=0,
                                          right=0)
        del full_ring_and_RF2
        gc.collect()
    # Populating the bunch
    populate_bunch(beam, time_grid, deltaE_grid, density_grid,
                   time_resolution_low,
                   deltaE_coord_array[1] - deltaE_coord_array[0], seed)

    if TotalInducedVoltage is not None:
        return [time_potential_low_res, line_density_], induced_voltage_object
    else:
        return [time_potential_low_res, line_density_]
def matched_from_line_density(beam,
                              full_ring_and_RF,
                              line_density_input=None,
                              main_harmonic_option='lowest_freq',
                              TotalInducedVoltage=None,
                              plot=False,
                              figdir='fig',
                              half_option='first',
                              extraVoltageDict=None,
                              n_iterations=100,
                              n_points_potential=1e4,
                              n_points_grid=int(1e3),
                              dt_margin_percent=0.40,
                              n_points_abel=1e4,
                              bunch_length=None,
                              line_density_type=None,
                              line_density_exponent=None,
                              seed=None,
                              process_pot_well=True):
    '''
    *Function to generate a beam by inputing the line density. The distribution
    function is then reconstructed with the Abel transform and the particles
    randomly generated.*
    '''

    # Initialize variables depending on the accelerator parameters
    slippage_factor = full_ring_and_RF.RingAndRFSection_list[0].eta_0[0]

    eom_factor_dE = abs(slippage_factor) / (2 * beam.beta**2. * beam.energy)
    eom_factor_potential = (
        np.sign(slippage_factor) * beam.Particle.charge /
        (full_ring_and_RF.RingAndRFSection_list[0].t_rev[0]))

    #: *Number of points to be used in the potential well calculation*
    n_points_potential = int(n_points_potential)
    # Generate potential well
    full_ring_and_RF.potential_well_generation(
        n_points=n_points_potential,
        dt_margin_percent=dt_margin_percent,
        main_harmonic_option=main_harmonic_option)
    potential_well = full_ring_and_RF.potential_well
    time_potential = full_ring_and_RF.potential_well_coordinates

    extra_potential = 0

    if extraVoltageDict is not None:
        extra_voltage_time_input = extraVoltageDict['time_array']
        extra_voltage_input = extraVoltageDict['voltage_array']
        extra_potential_input = -(eom_factor_potential * cumtrapz(
            extra_voltage_input,
            dx=extra_voltage_time_input[1] - extra_voltage_time_input[0],
            initial=0))
        extra_potential = np.interp(time_potential, extra_voltage_time_input,
                                    extra_potential_input)

    if line_density_type is not 'user_input':
        # Time coordinates for the line density
        n_points_line_den = int(1e4)
        time_line_den = np.linspace(float(time_potential[0]),
                                    float(time_potential[-1]),
                                    n_points_line_den)
        line_den_resolution = time_line_den[1] - time_line_den[0]

        # Normalizing the line density
        line_density_ = line_density(
            time_line_den,
            line_density_type,
            bunch_length,
            exponent=line_density_exponent,
            bunch_position=(time_potential[0] + time_potential[-1]) / 2)

        line_density_ -= np.min(line_density_)
        line_density_ *= beam.n_macroparticles / np.sum(line_density_)

    elif line_density_type is 'user_input':
        # Time coordinates for the line density
        time_line_den = line_density_input['time_line_den']
        n_points_line_den = len(time_line_den)
        line_den_resolution = time_line_den[1] - time_line_den[0]

        # Normalizing the line density
        line_density_ = line_density_input['line_density']
        line_density_ -= np.min(line_density_)
        line_density_ *= beam.n_macroparticles / np.sum(line_density_)
    else:
        # GenerationError
        raise RuntimeError('The input for the matched_from_line_density ' +
                           'function was not recognized')

    induced_potential_final = 0

    if TotalInducedVoltage is not None:
        # Calculating the induced voltage
        induced_voltage_object = copy.deepcopy(TotalInducedVoltage)
        profile = induced_voltage_object.profile

        # Inputing new line density
        profile.cut_options.cut_left = time_line_den[0] - \
            0.5*line_den_resolution
        profile.cut_options.cut_right = time_line_den[-1] + \
            0.5*line_den_resolution
        profile.cut_options.n_slices = n_points_line_den
        profile.cut_options.cuts_unit = 's'
        profile.cut_options.set_cuts()
        profile.set_slices_parameters()
        profile.n_macroparticles = line_density_

        # Re-calculating the sources of wakes/impedances according to this
        # slicing
        induced_voltage_object.reprocess()

        # Calculating the induced voltage
        induced_voltage_object.induced_voltage_sum()
        induced_voltage = induced_voltage_object.induced_voltage

        # Calculating the induced potential
        induced_potential = -(eom_factor_potential * cumtrapz(
            induced_voltage, dx=profile.bin_size, initial=0))

    # Centering the bunch in the potential well
    for i in range(0, n_iterations):
        if TotalInducedVoltage is not None:
            # Interpolating the potential well
            induced_potential_final = np.interp(time_potential,
                                                profile.bin_centers,
                                                induced_potential)

        # Induced voltage contribution
        total_potential = (potential_well + induced_potential_final +
                           extra_potential)

        # Potential well calculation around the separatrix
        if process_pot_well == False:
            time_potential_sep, potential_well_sep = time_potential, total_potential
        else:
            time_potential_sep, potential_well_sep = potential_well_cut(
                time_potential, total_potential)

        minmax_positions_potential, minmax_values_potential = \
            minmax_location(time_potential_sep, potential_well_sep)
        minmax_positions_profile, minmax_values_profile = \
            minmax_location(time_line_den[line_density_ != 0],
                            line_density_[line_density_ != 0])

        n_minima_potential = len(minmax_positions_potential[0])
        n_maxima_profile = len(minmax_positions_profile[1])

        # Warnings
        if n_maxima_profile > 1:
            print(
                'Warning: the profile has serveral max, the highest one ' +
                'is taken. Be sure the profile is monotonous and not too noisy.'
            )
            max_profile_pos = minmax_positions_profile[1][np.where(
                minmax_values_profile[1] == minmax_values_profile[1].max())]
        else:
            max_profile_pos = minmax_positions_profile[1]
        if n_minima_potential > 1:
            print(
                'Warning: the potential well has serveral min, the deepest ' +
                'one is taken. The induced potential is probably splitting ' +
                'the potential well.')
            min_potential_pos = minmax_positions_potential[0][np.where(
                minmax_values_potential[0] ==
                minmax_values_potential[0].min())]
        else:
            min_potential_pos = minmax_positions_potential[0]

        # Moving the bunch (not for the last iteration if intensity effects
        # are present)
        if TotalInducedVoltage is None:
            time_line_den -= max_profile_pos - min_potential_pos
            max_profile_pos -= max_profile_pos - min_potential_pos
        elif i != n_iterations - 1:
            time_line_den -= max_profile_pos - min_potential_pos
            # Update profile
            profile.cut_options.cut_left -= max_profile_pos - min_potential_pos
            profile.cut_options.cut_right -= max_profile_pos - min_potential_pos
            profile.cut_options.set_cuts()
            profile.set_slices_parameters()

    # Taking the first/second half of line density and potential
    n_points_abel = int(n_points_abel)

    abel_both_step = 1
    if half_option is 'both':
        abel_both_step = 2
        distribution_function_average = np.zeros((n_points_abel, 2))
        hamiltonian_average = np.zeros((n_points_abel, 2))

    for abel_index in range(0, abel_both_step):
        if half_option is 'first':
            half_indexes = np.where((time_line_den >= time_line_den[0]) *
                                    (time_line_den <= max_profile_pos))
        if half_option is 'second':
            half_indexes = np.where((time_line_den >= max_profile_pos) *
                                    (time_line_den <= time_line_den[-1]))
        if half_option is 'both' and abel_index == 0:
            half_indexes = np.where((time_line_den >= time_line_den[0]) *
                                    (time_line_den <= max_profile_pos))
        if half_option is 'both' and abel_index == 1:
            half_indexes = np.where((time_line_den >= max_profile_pos) *
                                    (time_line_den <= time_line_den[-1]))

        line_den_half = line_density_[half_indexes]
        time_half = time_line_den[half_indexes]
        potential_half = np.interp(time_half, time_potential_sep,
                                   potential_well_sep)
        potential_half = potential_half - np.min(potential_half)

        # Derivative of the line density
        line_den_diff = np.diff(line_den_half) / line_den_resolution

        time_line_den_diff = time_half[:-1] + line_den_resolution / 2
        line_den_diff = np.interp(time_half,
                                  time_line_den_diff,
                                  line_den_diff,
                                  left=0,
                                  right=0)

        # Interpolating the line density derivative and potential well for
        # Abel transform
        time_abel = np.linspace(float(time_half[0]), float(time_half[-1]),
                                n_points_abel)
        line_den_diff_abel = np.interp(time_abel, time_half, line_den_diff)
        potential_abel = np.interp(time_abel, time_half, potential_half)

        distribution_function_ = np.zeros(n_points_abel)
        hamiltonian_coord = np.zeros(n_points_abel)

        # Abel transform
        warnings.filterwarnings("ignore")

        if (half_option is 'first') or (half_option is 'both'
                                        and abel_index == 0):
            for i in range(0, n_points_abel):
                integrand = (
                    line_den_diff_abel[:i + 1] /
                    np.sqrt(potential_abel[:i + 1] - potential_abel[i]))

                if len(integrand) > 2:
                    integrand[-1] = integrand[-2] + (integrand[-2] -
                                                     integrand[-3])
                elif len(integrand) > 1:
                    integrand[-1] = integrand[-2]
                else:
                    integrand = np.array([0])

                distribution_function_[i] = (
                    np.sqrt(eom_factor_dE) / np.pi *
                    np.trapz(integrand, dx=line_den_resolution))

                hamiltonian_coord[i] = potential_abel[i]

        if (half_option is 'second') or (half_option is 'both'
                                         and abel_index == 1):
            for i in range(0, n_points_abel):
                integrand = (line_den_diff_abel[i:] /
                             np.sqrt(potential_abel[i:] - potential_abel[i]))

                if len(integrand) > 2:
                    integrand[0] = integrand[1] + (integrand[2] - integrand[1])
                if len(integrand) > 1:
                    integrand[0] = integrand[1]
                else:
                    integrand = np.array([0])

                distribution_function_[i] = -(
                    np.sqrt(eom_factor_dE) / np.pi *
                    np.trapz(integrand, dx=line_den_resolution))
                hamiltonian_coord[i] = potential_abel[i]

        warnings.filterwarnings("default")

        # Cleaning the distribution function from unphysical results
        distribution_function_[np.isnan(distribution_function_)] = 0
        distribution_function_[distribution_function_ < 0] = 0

        if half_option is 'both':
            hamiltonian_average[:, abel_index] = hamiltonian_coord
            distribution_function_average[:, abel_index] = \
                distribution_function_

    if half_option is 'both':
        hamiltonian_coord = hamiltonian_average[:, 0]
        distribution_function_ = (
            distribution_function_average[:, 0] +
            np.interp(hamiltonian_coord, hamiltonian_average[:, 1],
                      distribution_function_average[:, 1])) / 2

    # Compute deltaE frame corresponding to the separatrix
    max_potential = np.max(potential_half)
    max_deltaE = np.sqrt(max_potential / eom_factor_dE)

    # Initializing the grids by reducing the resolution to a
    # n_points_grid*n_points_grid frame
    time_for_grid = np.linspace(float(time_line_den[0]),
                                float(time_line_den[-1]), n_points_grid)
    deltaE_for_grid = np.linspace(-float(max_deltaE), float(max_deltaE),
                                  n_points_grid)
    potential_well_for_grid = np.interp(time_for_grid, time_potential_sep,
                                        potential_well_sep)
    potential_well_for_grid = (potential_well_for_grid -
                               potential_well_for_grid.min())

    time_grid, deltaE_grid = np.meshgrid(time_for_grid, deltaE_for_grid)
    potential_well_grid = np.meshgrid(potential_well_for_grid,
                                      potential_well_for_grid)[0]

    hamiltonian_grid = eom_factor_dE * deltaE_grid**2 + potential_well_grid

    # Sort the distribution function and generate the density grid
    hamiltonian_argsort = np.argsort(hamiltonian_coord)
    hamiltonian_coord = hamiltonian_coord.take(hamiltonian_argsort)
    distribution_function_ = distribution_function_.take(hamiltonian_argsort)
    density_grid = np.interp(hamiltonian_grid, hamiltonian_coord,
                             distribution_function_)

    density_grid[np.isnan(density_grid)] = 0
    density_grid[density_grid < 0] = 0
    # Normalizing density
    density_grid = density_grid / np.sum(density_grid)
    reconstructed_line_den = np.sum(density_grid, axis=0)

    # Ploting the result
    if plot:
        plt.figure('Generated bunch')
        plt.plot(time_line_den, line_density_)
        plt.plot(
            time_for_grid, reconstructed_line_den /
            np.max(reconstructed_line_den) * np.max(line_density_))
        plt.title('Line densities')
        if plot is 'show':
            plt.show()
        elif plot is 'savefig':
            fign = figdir + '/generated_bunch.png'
            plt.savefig(fign)

    # Populating the bunch
    populate_bunch(beam, time_grid, deltaE_grid, density_grid,
                   time_for_grid[1] - time_for_grid[0],
                   deltaE_for_grid[1] - deltaE_for_grid[0], seed)

    if TotalInducedVoltage is not None:
        # Inputing new line density
        profile.cut_options.cut_left = time_for_grid[0] - \
            0.5*(time_for_grid[1]-time_for_grid[0])
        profile.cut_options.cut_right = time_for_grid[-1] + 0.5 * (
            time_for_grid[1] - time_for_grid[0])
        profile.cut_options.n_slices = n_points_grid
        profile.cut_options.set_cuts()
        profile.set_slices_parameters()
        profile.n_macroparticles = reconstructed_line_den * beam.n_macroparticles

        # Re-calculating the sources of wakes/impedances according to this
        # slicing
        induced_voltage_object.reprocess()

        # Calculating the induced voltage
        induced_voltage_object.induced_voltage_sum()
        gc.collect()
        return [hamiltonian_coord, distribution_function_], \
            induced_voltage_object
    else:
        gc.collect()
        return [hamiltonian_coord, distribution_function_],\
               [time_line_den, line_density_]
Exemple #44
0
def compute_velocity_dispersions_bulge(force_grid, p, u):
    R, RplusdR, z, = force_grid['R'], force_grid['RplusdR'], force_grid['z']
    z_list = force_grid['z_list']
    Dphi_R, Dphi_z, Dphi_z_dR = force_grid['Dphi_R'], force_grid[
        'Dphi_z'], force_grid['Dphi_z_dR']
    VelVc2, epi_gamma2 = force_grid['VelVc2'], force_grid['epi_gamma2']

    rho = compute_rho_bulge(R, z, p, u)

    # Compute velocity dispersion in R/z directions
    # NOTE: We are computing rho * vel dispersion, since this makes computing phi vel dispersion
    # easier. Later, we will divide by rho.
    VelDispRz_bulge = cumtrapz(rho * Dphi_z, z_list, initial=0, axis=1)
    VelDispRz_bulge = np.transpose(
        (VelDispRz_bulge[:, -1] - np.transpose(VelDispRz_bulge)))
    VelDispRz_bulge[np.isnan(VelDispRz_bulge)] = 0.0

    # Now compute derivative of velocity dispersion in R/z direction wrt R
    rho_dR = compute_rho_bulge(RplusdR, z, p, u)

    VelDispRz_dR_bulge = cumtrapz(rho_dR * Dphi_z_dR,
                                  z_list,
                                  initial=0,
                                  axis=1)
    VelDispRz_dR_bulge = np.transpose(
        (VelDispRz_dR_bulge[:, -1] - np.transpose(VelDispRz_dR_bulge)))
    VelDispRz_dR_bulge[np.isnan(VelDispRz_dR_bulge)] = 0.0

    dVDispRz_R = (VelDispRz_dR_bulge - VelDispRz_bulge) / (RplusdR - R)
    dVDispRz_R[0, :] = 0.0

    # Now compute velocity dispersion in phi direction, first just the deriv term
    # Recall that dVDispRz_R is actually the derivative of rho * vel disp
    VelDispPhi_bulge = (R / rho) * dVDispRz_R
    VelDispPhi_bulge[np.isnan(VelDispPhi_bulge)] = 0.0
    VelDispPhi_bulge[np.isinf(VelDispPhi_bulge)] = 0.0

    # Divide by rho for the vel disp RZ
    VelDispRz_bulge /= rho
    VelDispRz_bulge[np.isnan(VelDispRz_bulge)] = 0.0
    VelDispRz_bulge[np.isinf(VelDispRz_bulge)] = 0.0

    # Add other terms for velocity dispersion in phi direction
    VelDispPhi_bulge += VelDispRz_bulge + VelVc2

    # Set streaming velocity and then convert from avg(vphi^2) to sigma(vphi)^2
    VelStreamPhi_bulge = np.zeros(np.shape(VelDispPhi_bulge))

    VelDispPhi_bulge = VelDispPhi_bulge - np.square(VelStreamPhi_bulge)
    VelDispPhi_bulge[VelDispPhi_bulge < 0.0] = 0.0

    VelDispRz_bulge[0, :] = 0.0
    VelDispPhi_bulge[0, :] = 0.0

    VelDispRz_bulge[np.logical_or(VelDispRz_bulge < 0.0,
                                  np.isnan(VelDispRz_bulge))] = 0.0
    VelDispPhi_bulge[np.logical_or(VelDispPhi_bulge < 0.0,
                                   np.isnan(VelDispPhi_bulge))] = 0.0

    # Now put into a nice dict
    force_grid['VelDispRz_bulge'] = VelDispRz_bulge
    force_grid['VelDispPhi_bulge'] = VelDispPhi_bulge
    force_grid['VelStreamPhi_bulge'] = VelStreamPhi_bulge

    return force_grid
Exemple #45
0
def aggregate_dataframe(dataframe,
                        period=86400,
                        interval=3600,
                        label='middle'):
    """
    Function to calculate the aggregated average of a timeseries by 
    period (typical a day) in bins of interval seconds (default = 3600s).
    
    label = 'left', 'middle' or 'right'.  
    'Left' means that the label i contains data from 
    i till i+1, 'right' means that label i contains data from i-1 till i.    
    
    Returns a new dataframe with period/interval values, one for each interval
    of the period. 
    
    A few limitations of the method:
        - the period has to be a multiple of the interval
        - for correct results, the timespan of the timeseries has to be a 
          multiple of the period
            
    Example of usefulness: if the timeseries has 15-minute values for 1 year of
    eg. the electricity consumption of a building.  
    - You want to know how a typical daily profile looks like, by 15 minutes 
      ==> period=86400, interval=900
    - you want to know how a typical weekly profile looks like, by hour:
      ==> period = 7*86400, interval=3600
      
    """
    #pdb.set_trace()
    # first, create cumulative integrated signals for every column, put these
    # in a new dataframe called cum

    if np.round(np.remainder(period, interval), 7) != 0:
        raise ValueError(
            'Aggregation will lead to wrong results if period is no multiple of interval'
        )

    # There's a problem with the join() operation on dataframes with duplicate
    # time index.  The current method returns a cartesian product instead of
    # what we want.  We circumvent by joining the aggregated values

    for i, c in enumerate(dataframe.columns):
        # we need to remove the empty values for the cumtrapz function to work

        ts = dataframe[c].dropna()
        #cumdata misses one value.  We add a zero in front.
        cumdata = np.zeros(len(ts.values))
        cumdata[1:] = cumtrapz(ts.values, ts.index.asi8 / 1e9)
        tscum = pd.DataFrame(data=cumdata, index=ts.index, columns=[c])

        # then, resample the dataframe by the given interval
        # We convert it to milliseconds in order to obtain integer values for most cases
        interval_string = str(int(interval * 1000)) + 'L'
        ts_resampled = tscum.resample(interval_string,
                                      how='last',
                                      closed='right',
                                      label='right')
        if i == 0:
            # first run, result is the ts
            df_resampled = deepcopy(ts_resampled)
        else:
            df_resampled.join(ts_resampled)

    # create dataframe with the average signal during each interval
    df_diff = pd.DataFrame(index=df_resampled.index[:-1])
    for c in df_resampled.columns:

        reshaped_array = df_resampled[c].values.reshape(len(df_resampled))
        diffdata = np.diff(reshaped_array) / interval
        df_diff[c] = diffdata

    # now create bins for the groupby() method
    # time in seconds
    time_s = df_diff.index.asi8 / 1e9
    time_s -= time_s[0]
    try:
        df_diff['bins'] = np.mod(time_s, period)
    except (KeyError):
        df_diff = pd.DataFrame(df_resampled)
        df_diff['bins'] = np.mod(time_s, period)

    df_aggr = df_diff.groupby('bins').mean()

    # pdb.set_trace()
    # replace the bins by a real datetime index
    if label == 'left':
        df_aggr.index = df_diff.index[:len(df_aggr)]
    elif label == 'right':
        df_aggr.index = df_diff.index[1:1 + len(df_aggr)]
    elif label == 'middle':
        df_aggr.index = df_diff.index[:len(df_aggr)]
        df_aggr = df_aggr.tshift(int(interval * 500), 'L')

    return df_aggr
Exemple #46
0
def calc_QPos(R_initialOrientation, omega, initialPosition, accMeasured, rate):
    ''' Reconstruct position and orientation, from angular velocity and linear acceleration.
    Assumes a start in a stationary position. No compensation for drift.

    Parameters
    ----------
    omega : ndarray(N,3)
        Angular velocity, in [rad/s]
    accMeasured : ndarray(N,3)
        Linear acceleration, in [m/s^2]
    initialPosition : ndarray(3,)
        initial Position, in [m]
    R_initialOrientation: ndarray(3,3)
        Rotation matrix describing the initial orientation of the sensor,
        except a mis-orienation with respect to gravity
    rate : float
        sampling rate, in [Hz]

    Returns
    -------
    q : ndarray(N,3)
        Orientation, expressed as a quaternion vector
    pos : ndarray(N,3)
        Position in space [m]

    Example
    -------
    >>> q1, pos1 = calc_QPos(R_initialOrientation, omega, initialPosition, acc, rate)

    '''

    # Transform recordings to angVel/acceleration in space --------------

    # Orientation of \vec{g} with the sensor in the "R_initialOrientation"
    g = 9.81
    g0 = np.linalg.inv(R_initialOrientation).dot(r_[0,0,g])

    # for the remaining deviation, assume the shortest rotation to there
    q0 = vector.qrotate(accMeasured[0], g0)    
    R0 = quat.quat2rotmat(q0)

    # combine the two, to form a reference orientation. Note that the sequence
    # is very important!
    R_ref = R_initialOrientation.dot(R0)
    q_ref = quat.rotmat2quat(R_ref)

    # Calculate orientation q by "integrating" omega -----------------
    q = quat.vel2quat(omega, q_ref, rate, 'bf')

    # Acceleration, velocity, and position ----------------------------
    # From q and the measured acceleration, get the \frac{d^2x}{dt^2}
    g_v = r_[0, 0, g] 
    accReSensor = accMeasured - vector.rotate_vector(g_v, quat.quatinv(q))
    accReSpace = vector.rotate_vector(accReSensor, q)

    # Make the first position the reference position
    q = quat.quatmult(q, quat.quatinv(q[0]))

    # compensate for drift
    #drift = np.mean(accReSpace, 0)
    #accReSpace -= drift*0.7

    # Position and Velocity through integration, assuming 0-velocity at t=0
    vel = np.nan*np.ones_like(accReSpace)
    pos = np.nan*np.ones_like(accReSpace)

    for ii in range(accReSpace.shape[1]):
        vel[:,ii] = cumtrapz(accReSpace[:,ii], dx=1./rate, initial=0)
        pos[:,ii] = cumtrapz(vel[:,ii],        dx=1./rate, initial=initialPosition[ii])

    return (q, pos)
    def interpret_astra_data(self, xemit, yemit, zemit):
        z, t, mean_x, rms_x, rms_xp, exn, mean_xxp = np.transpose(xemit)
        z, t, mean_y, rms_y, rms_yp, eyn, mean_yyp = np.transpose(yemit)
        z, t, e_kin, rms_z, rms_e, ezn, mean_zep = np.transpose(zemit)
        e_kin = 1e6 * e_kin
        t = 1e-9 * t
        exn = 1e-6 * exn
        eyn = 1e-6 * eyn
        rms_x, rms_xp, rms_y, rms_yp, rms_z, rms_e = 1e-3 * np.array(
            [rms_x, rms_xp, rms_y, rms_yp, rms_z, rms_e])
        rms_e = 1e6 * rms_e
        self.append('z', z)
        self.append('t', t)
        self.append('kinetic_energy', e_kin)
        gamma = 1 + (e_kin / self.E0_eV)
        self.append('gamma', gamma)
        cp = np.sqrt(e_kin *
                     (2 * self.E0_eV + e_kin)) * constants.elementary_charge
        self.append('cp', cp)
        self.append('cp_eV', cp / constants.elementary_charge)
        p = cp * self.q_over_c
        self.append('p', p)
        self.append('enx', exn)
        ex = exn / gamma
        self.append('ex', ex)
        self.append('eny', eyn)
        ey = eyn / gamma
        self.append('ey', ey)
        self.append('enz', ezn)
        ez = ezn / gamma
        self.append('ez', ez)
        self.append('beta_x', rms_x**2 / ex)
        self.append('gamma_x', rms_xp**2 / ex)
        self.append('alpha_x', (-1 * np.sign(mean_xxp) * rms_x * rms_xp) / ex)
        # self.append('alpha_x', (-1 * mean_xxp * rms_x) / ex)
        self.append('beta_y', rms_y**2 / ey)
        self.append('gamma_y', rms_yp**2 / ey)
        self.append('alpha_y', (-1 * np.sign(mean_yyp) * rms_y * rms_yp) / ey)
        self.append('beta_z', rms_z**2 / ez)
        self.append('gamma_z', rms_e**2 / ez)
        self.append('alpha_z', (-1 * np.sign(mean_zep) * rms_z * rms_e) / ez)
        self.append('sigma_x', rms_x)
        self.append('sigma_y', rms_y)
        self.append('sigma_z', rms_z)
        beta = np.sqrt(1 - (gamma**-2))
        self.append('sigma_t', rms_z / (beta * constants.speed_of_light))
        self.append('sigma_p', (rms_e / e_kin))
        self.append('sigma_cp', (rms_e / e_kin) * p)
        self.append('sigma_cp_eV', (rms_e))
        # print('astra = ', (rms_e)[-1)
        self.append(
            'mux',
            integrate.cumtrapz(x=self['z'], y=1 / self['beta_x'], initial=0))
        self.append(
            'muy',
            integrate.cumtrapz(x=self['z'], y=1 / self['beta_y'], initial=0))
        self.append('eta_x', np.zeros(len(z)))
        self.append('eta_xp', np.zeros(len(z)))

        self.append('ecnx', exn)
        self.append('ecny', eyn)
        self.append('element_name', np.zeros(len(z)))
        self.append('eta_x_beam', np.zeros(len(z)))
        self.append('eta_xp_beam', np.zeros(len(z)))
        self.append('eta_y_beam', np.zeros(len(z)))
        self.append('eta_yp_beam', np.zeros(len(z)))
        self.append('beta_x_beam', rms_x**2 / ex)
        self.append('beta_y_beam', rms_y**2 / ey)
        self.append('alpha_x_beam',
                    (-1 * np.sign(mean_xxp) * rms_x * rms_xp) / ex)
        self.append('alpha_y_beam',
                    (-1 * np.sign(mean_yyp) * rms_y * rms_yp) / ey)
def run_opensees(BIM_file, EVENT_file, event_path, model_script,
                 model_script_path, ndm, getRV):

    sys.path.insert(0, os.getcwd())

    # load the model builder script
    with open(BIM_file, 'r') as f:
        BIM_in = json.load(f)

    model_params = BIM_in['GI']
    model_units = BIM_in['units']

    # convert units if necessary
    if model_units['length'] in ['inch', 'in']:
        model_params['Vs30'] = model_params['Vs30'] * 0.0254
        model_params['DepthToRock'] = model_params['DepthToRock'] * 0.3048
    elif model_units['length'] in ['foot', 'ft', 'feet']:
        model_params['Vs30'] = model_params['Vs30'] * 0.0254
        model_params['DepthToRock'] = model_params['DepthToRock'] * 0.3048

    sys.path.insert(0, model_script_path)

    # Create input motion from SimCenterEvent
    if getRV:
        write_RV(BIM_file, EVENT_file, event_path)
    else:
        get_records(BIM_file, EVENT_file, event_path)
        # load the event file
        with open(EVENT_file, 'r') as f:
            EVENT_in_All = json.load(f)
            EVENT_in = EVENT_in_All['Events'][0]

        event_list = EVENT_in['timeSeries']
        pattern_list = EVENT_in['pattern']

        fileNames = ['xInput', 'yInput']
        # define the time series
        for evt_i, event in enumerate(event_list):

            acc = event['data']
            vel = integrate.cumtrapz(acc, dx=event['dT']) * gravityG
            vel = np.insert(vel, 0, 0.0)
            disp = integrate.cumtrapz(vel, dx=event['dT'])
            disp = np.insert(disp, 0, 0.0)
            time = np.arange(0, event['dT'] * len(acc), event['dT'])
            np.savetxt(fileNames[evt_i] + '.acc', acc)
            np.savetxt(fileNames[evt_i] + '.vel', vel)
            np.savetxt(fileNames[evt_i] + '.disp', disp)
            np.savetxt(fileNames[evt_i] + '.time', time)

        # run the analysis
        shutil.copyfile(os.path.join(model_script_path, model_script),
                        os.path.join(os.getcwd(), model_script))

        build_model(model_params, int(ndm) - 1)

        subprocess.Popen('OpenSees ' + model_script, shell=True).wait()

        # update Event file with acceleration recorded at surface
        acc = np.loadtxt('accelerationElasAct.out')
        acc_surf_x = acc[:, -3] / gravityG
        EVENT_in_All['Events'][0]['timeSeries'][0]['data'] = acc_surf_x.tolist(
        )
        if int(ndm) == 3:
            acc_surf_z = acc[:, -1] / gravityG
            EVENT_in_All['Events'][0]['timeSeries'][1][
                'data'] = acc_surf_z.tolist()

        # EVENT_file2 = 'EVENT2.json' for debug
        with open(EVENT_file, 'w') as f:
            json.dump(EVENT_in_All, f, indent=2)
    tFill_s=tFill_s,
    tauSRxy_s=tauSRxy_s,
    tauSRl_s=tauSRl_s,
    sigmaBOff_m2=sigmaBOff_m2,
    VRF_V=VRF_V,
    IBSON=1,
    emitBU=emitBU,
    BoffON=1,
    nIPs=2.,
    dt_s=15 * 60.,
    tau_empirical_h=tau_empirical_h,
    tau_empirical_v=tau_empirical_v)

na = np.array

integrated_luminosity_inv_fb = cumtrapz(y=Luminosity_invm2s, x=tt_s) * 1e-43

Luminosity_invcm2s = na(Luminosity_invm2s) * 1e-4
sp = None
pl.close('all')
fig_h = pl.figure(3, figsize=(16, 8))
fig_h.patch.set_facecolor('w')
sp = pl.subplot(2, 3, 1, sharex=sp)
pl.plot(tt_s / 3600., na(bunch_intensity_p1), linewidth=2.)
pl.plot(tt_s / 3600., na(bunch_intensity_p2), linewidth=2.)
pl.gca().ticklabel_format(style='sci', scilimits=(0, 0), axis='y')
pl.ylabel('Bunch intensity [p]')
pl.xlabel('Time [h]')
pl.grid()

sp = pl.subplot(2, 3, 2, sharex=sp)
t = np.array(real_data['drive_lengths'])
t_sim = np.array([x * dt / us for x in range(len(sim_results[0]))])

factor = 1  #omega1*omega2*max_drive_strength**2/(2*np.pi)

hs_1_dot = h1_dot(m,
                  eta,
                  omega1,
                  np.pi / 10,
                  t_sim * us,
                  max_drive_strength,
                  ramp_time=real_data['ramp_time'])
hs_1_dot = np.array([hs_1_dot[0], hs_1_dot[1], hs_1_dot[2]])
E1_dot = np.sum(hs_1_dot * sim_results, axis=0)
W1 = cumtrapz(E1_dot, x=t_sim * us, initial=0)

plt.plot(t_sim, W1 / factor, '-', c=tuple(0.8 * nice_blue))

#### plot real 1 #########################

correct_results_real = real_data['corrected results']
pure_results_real = pure_results(correct_results_real)

times = np.array(real_data['drive_lengths']) * us

pure_results = np.array(
    [pure_results_real['x'], pure_results_real['y'], pure_results_real['z']])

hs_1_dot = h1_dot(m,
                  eta,
    def get_number_of_effective_electrons(self, nat, cumulative=False):
        r"""Compute the number of effective electrons using the Bethe f-sum
        rule.

        The Bethe f-sum rule gives rise to two definitions of the effective
        number (see [*]_), neff1 and neff2:

        .. math::

            n_{\mathrm{eff_{1}}} = n_{\mathrm{eff}}\left(-\Im\left(\epsilon^{-1}\right)\right)

        and:

        .. math::

            n_{\mathrm{eff_{2}}} = n_{\mathrm{eff}}\left(\epsilon_{2}\right)

        This method computes and return both.

        Parameters
        ----------
        nat: float
            Number of atoms (or molecules) per unit volume of the sample.

        Returns
        -------
        neff1, neff2: Signal1D
            Signal1D instances containing neff1 and neff2. The signal and
            navigation dimensions are the same as the current signal if
            `cumulative` is True, otherwise the signal dimension is 0
            and the navigation dimension is the same as the current
            signal.

        Notes
        -----
        .. [*] Ray Egerton, "Electron Energy-Loss Spectroscopy 
           in the Electron Microscope", Springer-Verlag, 2011.

        """

        m0 = constants.value("electron mass")
        epsilon0 = constants.epsilon_0  # Vacuum permittivity [F/m]
        hbar = constants.hbar  # Reduced Plank constant [J·s]
        k = 2 * epsilon0 * m0 / (np.pi * nat * hbar**2)

        axis = self.axes_manager.signal_axes[0]
        if cumulative is False:
            dneff1 = k * simps((-1. / self.data).imag * axis.axis,
                               x=axis.axis,
                               axis=axis.index_in_array)
            dneff2 = k * simps(self.data.imag * axis.axis,
                               x=axis.axis,
                               axis=axis.index_in_array)
            neff1 = self._get_navigation_signal(data=dneff1)
            neff2 = self._get_navigation_signal(data=dneff2)
        else:
            neff1 = self._deepcopy_with_new_data(k * cumtrapz(
                (-1. / self.data).imag * axis.axis,
                x=axis.axis,
                axis=axis.index_in_array,
                initial=0))
            neff2 = self._deepcopy_with_new_data(
                k * cumtrapz(self.data.imag * axis.axis,
                             x=axis.axis,
                             axis=axis.index_in_array,
                             initial=0))

        # Prepare return
        neff1.metadata.General.title = (
            r"$n_{\mathrm{eff}}\left(-\Im\left(\epsilon^{-1}\right)\right)$ "
            "calculated from " + self.metadata.General.title +
            " using the Bethe f-sum rule.")
        neff2.metadata.General.title = (
            r"$n_{\mathrm{eff}}\left(\epsilon_{2}\right)$ "
            "calculated from " + self.metadata.General.title +
            " using the Bethe f-sum rule.")

        return neff1, neff2
y_totaldisp = np.zeros(num_samples)
z_totaldisp = np.zeros(num_samples)
x_totalrot = np.zeros(num_samples)
y_totalrot = np.zeros(num_samples)
z_totalrot = np.zeros(num_samples)
x_peakvel = np.zeros(num_samples)
y_peakvel = np.zeros(num_samples)
z_peakvel = np.zeros(num_samples)
x_peakrot = np.zeros(num_samples)
y_peakrot = np.zeros(num_samples)
z_peakrot = np.zeros(num_samples)

classlabels = np.zeros(num_samples, dtype=int)

for i in range(num_samples):
    x_velocity = cumtrapz(movement_rawdata_collected[i]["x acceleration"][:])
    x_peakvel[i] = abs(max(x_velocity, key=abs))
    x_totaldisp[i] = simps(x_velocity)
    y_velocity = cumtrapz(movement_rawdata_collected[i]["y acceleration"][:])
    y_peakvel[i] = abs(max(y_velocity, key=abs))
    y_totaldisp[i] = simps(y_velocity)
    z_velocity = cumtrapz(movement_rawdata_collected[i]["z acceleration"][:])
    z_peakvel[i] = abs(max(z_velocity, key=abs))
    z_totaldisp[i] = simps(z_velocity)

    x_rot_velocity = cumtrapz(movement_rawdata_collected[i]["x gyroscope"][:])
    x_peakrot[i] = abs(max(x_rot_velocity, key=abs))
    x_totalrot[i] = simps(x_rot_velocity)
    y_rot_velocity = cumtrapz(movement_rawdata_collected[i]["y gyroscope"][:])
    y_peakrot[i] = abs(max(y_rot_velocity, key=abs))
    y_totalrot[i] = simps(y_rot_velocity)
Bz = domain.new_field(name='Bz')
B = domain.new_field(name='B')
V['g'] = Shmag * (z)
Vz['g'] = Shmag * (
    z - z + 1
)  #Note this assumes no horizotal variation (ie. won't work for the non-uniform case)
Bt = np.zeros([nz])
Bz['g'] = np.array(Bzmag * np.ones([nz]))
#zind = np.floor( next((x[0] for x in enumerate(z) if x[1]>BLH)))
#Bz['g'][0:zind] = BzmagBL

tpoint = np.floor(next((x[0] for x in enumerate(z) if x[1] > BLH)))
Bstr = -0.5 * (np.tanh((-z + z[tpoint]) / 40) + 1)

Bz['g'] = Bz['g'] * 10**(2 * Bstr)
Bt[1:nz] = integrate.cumtrapz(Bz['g'], z)
B['g'] = Bt

problem = de.EVP(domain,
                 variables=['u', 'v', 'w', 'b', 'p'],
                 eigenvalue='omg',
                 tolerance=1e-10)
problem.parameters['tht'] = tht
problem.parameters['V'] = V
problem.parameters['Uz'] = Uz
problem.parameters['Vz'] = Vz
problem.parameters['NS'] = Bz
problem.parameters['f'] = f
problem.parameters['tht'] = tht
problem.parameters['Pr'] = Pr
problem.parameters['k'] = 0.  # will be set in loop
Exemple #54
0
def load_picoscope(shot_number,
                   maxrange=1,
                   scopenum=4,
                   time_range=[-2.0, 198.0],
                   location='',
                   plot=False):
    def butter_highpass(cutoff, fs, order=5):
        nyq = 0.5 * fs
        normal_cutoff = cutoff / nyq
        b, a = signal.butter(order,
                             normal_cutoff,
                             btype='highpass',
                             analog=False)
        return b, a

    def butter_highpass_filter(data, cutoff, fs, order=5):
        b, a = butter_highpass(cutoff, fs, order=order)
        y = signal.filtfilt(b, a, data)
        return y

    if (type(scopenum) == int):
        if scopenum == 1:
            scopename = '03102020pico1/'
        elif scopenum == 2:
            scopename = '03102020pico2/'
        elif scopenum == 3:
            scopename = '03102020pico3/'
        elif scopenum == 4:
            scopename = '03102020pico4/'
        elif scopenum == 5:
            scopename = '03102020pico5/'
        elif scopenum == 6:
            scopename = '03102020pico6/'
        elif scopenum == 7:
            scopename = '03102020pico7/'
        else:
            scopename = '03102020pico8/'
    else:
        print(f'scopenum is not an int, {scopenum}')
        sys.exit()

    probe_dia = 0.003175  #m (1/8'' probe)
    probe_dia = 0.00158755  #m (1/16'' probe)
    ##hole_sep = 0.001016     #m (1/16''probe)  ## Aparently unused variable
    r_probe_area = np.pi * (probe_dia / 2)**2
    #tz_probe_area = probe_dia*hole_sep  ## Aparently unused variable
    startintg_index = 0  #3000
    meancutoff = 1000
    ##### load file
    # The location and filename lines must be updated to your system.
    location = '/Volumes/CarFlor/Research/Data/2020/03102020/'
    filename = '20200310-0001 ('

    print(location + scopename + filename + str(shot_number) + ').txt')
    try:
        data = np.loadtxt(location + scopename + filename + str(shot_number) +
                          ').txt',
                          skiprows=2,
                          unpack=True)
    except NameError as err:
        print(
            "Double check you have updated the location variable to your OS system; mac, pc: ",
            err)
    ##### return data
    dataraw = data

    print(dataraw.shape)
    Bdotraw1 = dataraw[1, :]
    Bdotraw2 = dataraw[2, :]
    Bdotraw3 = dataraw[3, :]
    #Bdotraw4 = dataraw[4, :]
    data = data[:, startintg_index:]

    time_ms = data[0, :]
    time_s = time_ms * 1e-6
    timeB_s = time_s[1:]
    timeB_ms = time_ms[1:]
    timeraw = dataraw[0, :]

    Bdot1 = data[1, :] - np.mean(data[1, 0:meancutoff])
    neginfs = np.isneginf(Bdot1)
    Bdot1[np.where(neginfs)] = -maxrange
    posinfs = np.isposinf(Bdot1)
    Bdot1[np.where(posinfs)] = maxrange

    Bdot2 = data[2, :] - np.mean(data[2, 0:meancutoff])
    neginfs = np.isneginf(Bdot2)
    Bdot2[np.where(neginfs)] = -maxrange
    posinfs = np.isposinf(Bdot2)
    Bdot2[np.where(posinfs)] = maxrange

    Bdot3 = data[3, :] - np.mean(data[3, 0:meancutoff])
    neginfs = np.isneginf(Bdot3)
    Bdot3[np.where(neginfs)] = -maxrange
    posinfs = np.isposinf(Bdot3)
    Bdot3[np.where(posinfs)] = maxrange

    #### 03102020 does not use the fourth pico port for magnetic data
    """Bdot4 = data[4,:] - np.mean(data[4, 0:meancutoff])
    neginfs = np.isneginf(Bdot4)
    Bdot4[np.where(neginfs)] = -maxrange
    posinfs = np.isposinf(Bdot4)
    Bdot4[np.where(posinfs)] = maxrange"""

    B1 = sp.cumtrapz(Bdot1 / r_probe_area, time_s) * 1e4  #Gauss
    B2 = sp.cumtrapz(Bdot2 / r_probe_area, time_s) * 1e4  #Gauss
    B3 = sp.cumtrapz(Bdot3 / r_probe_area, time_s) * 1e4  #Gauss
    #B4 = sp.cumtrapz(Bdot4/r_probe_area,time_s)*1e4 #Gauss
    #Bt7 = 3.162*sp.cumtrapz(Btdot7/tz_probe_area,time_s)*1e4#Gauss
    #Bt9 = 3.162*sp.cumtrapz(Btdot9/tz_probe_area,time_s)*1e4#Gauss
    #Bz7 = sp.cumtrapz(Bzdot7/tz_probe_area,time_s)*1e4#Gauss
    #Bz9 = sp.cumtrapz(Bzdot9/tz_probe_area,time_s)*1e4#Gauss
    #filtering

    #fps = 30
    #sine_fq = 10 #Hz
    #duration = 10 #seconds
    #sine_5Hz = sine_generator(fps,sine_fq,duration)
    #sine_fq = 1 #Hz
    #duration = 10 #seconds
    #sine_1Hz = sine_generator(fps,sine_fq,duration)

    #sine = sine_5Hz + sine_1Hz

    #filtered_sine = butter_highpass_filter(sine.data,10,fps)

    #Integration and Calibration
    #Bx =sp.cumtrapz(Bxdot/probe_area,time_s)
    #Bx = 3.162*Bx/1.192485591065652224e-03

    #By =sp.cumtrapz(Bydot/probe_area,time_s)
    #By = 3.162*By/1.784763055992550198e-03

    #Bz =sp.cumtrapz(Bzdot/probe_area,time_s)
    #Bz = 3.162*Bz/1.297485014039849059e-03
    #meanBx = np.mean(Bx)
    # Filtering
    B1filt = butter_highpass_filter(B1, 5e4, 125e6, order=3)
    B2filt = butter_highpass_filter(B2, 5e4, 125e6, order=3)
    B3filt = butter_highpass_filter(B3, 5e4, 125e6, order=3)
    #B4filt = butter_highpass_filter(B4, 5e4, 125e6, order = 3)
    #Btot = np.sqrt(Bxfilt**2+Byfilt**2+Bzfilt**2)
    #Btotave=Btotave+Btot

    #if plot:
    #    plt.figure(1)
    #    plt.plot(time,data[1,:])
    #    plt.figure(2)
    #    plt.plot(time[1:],Btot)

    return time_ms, time_s, timeB_s, timeB_ms, Bdot1, Bdot2, Bdot3, B1, B2, B3, B1filt, B2filt, B3filt, Bdotraw1, Bdotraw2, Bdotraw3, timeraw
def tr_rate_equation_simul_ext(c_Tm, f_ab_1, f_ab_2, f_s3, f_q22, f_q23,
                               f_w2_nr, f_w3nr, dt_coeff, time_target,
                               num_pump, p_start, p_end):

    P_pump_array_power = np.linspace(
        p_start, p_end,
        num=num_pump)  # The base-10 logarithm of pumping power density

    P_pump_array = 10**(P_pump_array_power)  # Pumping power density in [W/m2]

    w2_r = 162.60  # Radiative relaxation rate constant at the 3F4 state in [1/s]
    w2_nr = 162.60 * f_w2_nr + 0.00239  # Non-radiative relaxation rate constant at the 3F4 state in [1/s]
    w3_r = 636.01  # Radiative relaxation rate constant at the 3H4 state in [1/s]
    w3_nr = 6.02 + 162.60 * f_w3nr  # Non-radiative relaxation rate constant at the 3H4 state in [1/s]

    w2 = w2_r + w2_nr  # Total relaxation rate constant at the 3F4 state in [1/s]

    print("c_Tm: ", c_Tm)

    b32 = 0.144  # branching ratio from the 3H4 state to the 3F4 state

    v_pump = np.array([9398])  # excitation wavenumber in [1/cm]
    dt_coeff = 0.4  # time-step constant for variable step-size method
    hp = 6.626e-34  # Planck constant in [J*s]
    c = 2.998e8  # the speed of light in [m/s]
    Eph = v_pump * 1e2 * c * hp  # photon energy in [J]
    Fai_pump_1 = P_pump_array / Eph[
        0]  # photon incident flux at Gaussian peak in [#/m^2/s]

    ab_cs_12_1 = f_ab_1 * 0.12e-25  # the ground-state absorption cross section for 3H6-3H4 transition in [cm^2]
    ab_cs_23_1 = f_ab_2 * 3.2e-25  # tne excited-stete absorption cross section for 3F4-3H4 transition in [cm^2]

    s3 = f_s3 * (
        1.6 * c_Tm**2) * 1000.0  # the cross relaxation rate parameter in [1/s]

    a_uc = f_q22 * 0.32 * 1000  # A coefficient for energy trasfer rate parameter (3F4 + 3F4 -> 3H6 + 3F5) in [1/s]
    a_inv = f_q23 * 0.09 * 1000  # energy tranfser upconversion rate parameter (3F4 + 3F4 -> 3H6 + 3H4) in [1/s]

    Q22 = (a_uc * c_Tm**3) / (
        c_Tm**2 + 4.3**2
    )  # energy tranfser rate parameter (3F4 + 3F4 -> 3H6 + 3F5) in [1/s]
    Q23 = (a_inv * c_Tm**3) / (
        c_Tm**2 + 4.3**2
    )  # energy tranfser upconversion rate parameter (3F4 + 3F4 -> 3H6 + 3H4) in [1/s]

    index = 0

    A_fl_array = np.array(
        [[0, w2, (1.0 - b32) * w3_r], [0, -w2, b32 * w3_r + w3_nr],
         [0, 0,
          -w3_r - w3_nr]])  # Total relaxation rate parameter tensor in [1/s]

    time_array = []  # time data in [s]
    population_array = [
    ]  # populations with time, Te sum of population is set to 1
    population_steady_array = []  # populations at steady state

    for Fai_1 in Fai_pump_1:
        index += 1
        n_array = np.array(
            [1.0, 0.0, 0.0]
        )  # initial populations when time is zero, all the ions are at the ground state 3H6
        pump_coef = np.array([[-ab_cs_12_1,  0.0          , 0.0 ],\
                              [ ab_cs_12_1, -ab_cs_23_1   , 0.0 ],\
                              [0.0        ,  ab_cs_23_1   , 0.0 ]]) * Fai_1 # absorption rate tensor in [1/s]

        time_now = 0.0  #time at initial state in [s]
        data_now = n_array  #pupulation at time 0

        time_array_i = []  # time data under photon influx Fai_1 in [s]
        population_array_i = [
        ]  # pupulation with time under photon influx Fai_1

        time_array_i.append(time_now)
        population_array_i.append(data_now)
        print(index, time_target)

        ##### Calculation of time-resolved population until time_target with laser pumping
        while time_now < time_target:

            A_max_i = rhs_calc_fordt(
                n_array, A_fl_array, pump_coef, Q22, Q23,
                s3)  # Maximum rate parameter in rate parameter tensor in [1/s]

            dt = dt_coeff / A_max_i  # time step in [s]

            y_dt = runge_kutta_4(rhs_calc, time_now, data_now, dt, A_fl_array,
                                 pump_coef, Q22, Q23,
                                 s3)  # populations after the time step t + dt

            data_last = data_now
            data_now = y_dt
            time_last = time_now
            time_now = time_last + dt

            time_array_i.append(time_now)
            population_array_i.append(data_now)

        population_steady_array.append(data_now)

        pump_coef = np.array([[-ab_cs_12_1,  0.0          , 0.0 ],\
                      [ ab_cs_12_1, -ab_cs_23_1   , 0.0 ],\
                      [0.0        ,  ab_cs_23_1   , 0.0 ]]) * 0.0                   # After time_target, pump_power is assumed to be zero

        ##### Calculation of time-resolved population until time_target_2 without laser pumping
        while time_now < time_target_2:

            A_max_i = rhs_calc_fordt(
                n_array, A_fl_array, pump_coef, Q22, Q23,
                s3)  # Maximum rate parameter in rate parameter tensor in [1/s]

            dt = dt_coeff / A_max_i  # time step in [s]

            y_dt = runge_kutta_4(rhs_calc, time_now, data_now, dt, A_fl_array,
                                 pump_coef, Q22, Q23,
                                 s3)  # populations after the time step t + dt

            data_last = data_now
            data_now = y_dt
            time_last = time_now
            time_now = time_last + dt

            time_array_i.append(time_now)
            population_array_i.append(data_now)

        time_array.append(np.array(time_array_i))
        population_array.append(np.array(population_array_i))

    time_array = np.array(time_array)
    population_array = np.array(population_array)
    P_pump_array = np.array(P_pump_array)
    population_steady_array = np.array(population_steady_array)

    population_3H4_array = population_steady_array[:, 2]

    ##### Integrated over the Gaussian profile to account for the Gaussian profile of the laser beam
    f_I = population_3H4_array / P_pump_array

    population_3H4_gaussian_array = cumtrapz(f_I,
                                             P_pump_array,
                                             initial=min(P_pump_array))

    population_3H4_gaussian_array = np.array(population_3H4_gaussian_array)
    return time_array, population_array, P_pump_array, population_steady_array, population_3H4_gaussian_array
Exemple #56
0
def identify_kco2_wltp_correction_factor(
        drive_battery_electric_powers, service_battery_electric_powers,
        co2_emissions, times, force_on_engine, after_treatment_warm_up_phases,
        velocities, is_hybrid=True):
    """
    Identifies the kco2 correction factor [g/Wh].

    :param drive_battery_electric_powers:
        Drive battery electric power [kW].
    :type drive_battery_electric_powers: numpy.array

    :param service_battery_electric_powers:
        Service battery electric power [kW].
    :type service_battery_electric_powers: numpy.array

    :param force_on_engine:
        Phases when engine is on because parallel mode is forced [-].
    :type force_on_engine: numpy.array

    :param after_treatment_warm_up_phases:
        Phases when engine speed is affected by the after treatment warm up [-].
    :type after_treatment_warm_up_phases: numpy.array

    :param velocities:
        Vehicle velocity [km/h].
    :type velocities: numpy.array

    :param co2_emissions:
        CO2 instantaneous emissions vector [CO2g/s].
    :type co2_emissions: numpy.array

    :param times:
        Time vector.
    :type times: numpy.array

    :param is_hybrid:
        Is the vehicle hybrid?
    :type is_hybrid: bool

    :return:
        kco2 correction factor [g/Wh].
    :rtype: float
    """
    if not is_hybrid:
        return sh.NONE
    from scipy.integrate import cumtrapz
    from sklearn.linear_model import RANSACRegressor, Lasso
    b = ~(force_on_engine | after_treatment_warm_up_phases)
    e = np.where(
        b, drive_battery_electric_powers + service_battery_electric_powers, 0
    )
    e = cumtrapz(e, times, initial=0) / 3.6
    co2 = cumtrapz(np.where(b, co2_emissions, 0), times, initial=0)
    km = cumtrapz(np.where(b, velocities / 3.6, 0), times, initial=0) / 1000
    # noinspection PyTypeChecker
    it = co2_utl.sliding_window(list(zip(km, zip(km, e, co2))), 5)
    d = np.diff(np.array([(v[0][1], v[-1][1]) for v in it]), axis=1)[:, 0, :].T
    e, co2 = d[1:] / d[0]
    d0 = t0 = -float('inf')
    b, dkm = [], .5
    dt = dkm / np.mean(velocities) * 3600
    for i, (d, t) in enumerate(zip(km, times)):
        if d > d0 and t > t0:
            d0, t0 = d + dkm, t + dt
            b.append(i)
    b = np.array(b)
    m = RANSACRegressor(
        random_state=0,
        base_estimator=Lasso(random_state=0, positive=True)
    ).fit(e[b, None], co2[b])
    return float(m.estimator_.coef_)
ay = df['y-axis acceleration'].tolist()
az = df['z-axis acceleration'].tolist()

gx = df['x-axis gyroscope'].tolist()
gy = df['y-axis gyroscope'].tolist()
gz = df['z-axis gyroscope'].tolist()

t = df['Time (ms)'].tolist()

startPoint = 1
windowBefore = 0
windowAfter = 999

df = df[startPoint - windowBefore - 1:startPoint + windowAfter]

gx_int = integrate.cumtrapz(df['x-axis gyroscope'], dx=0.01)
gy_int = integrate.cumtrapz(df['y-axis gyroscope'], dx=0.01)
gz_int = integrate.cumtrapz(df['z-axis gyroscope'], dx=0.01)

gx_int = gx_int.tolist()
gy_int = gy_int.tolist()
gz_int = gz_int.tolist()

gx_int.insert(0, 0)
gy_int.insert(0, 0)
gz_int.insert(0, 0)

ax_trimmed = df['x-axis acceleration'].tolist()
ay_trimmed = df['y-axis acceleration'].tolist()
az_trimmed = df['z-axis acceleration'].tolist()
        def variableResistorCircuitModelFit(omegaDimIn, Cox, Cstern, c0NomIn,
                                            k):
            k = k * 1e-6
            run = runModels.implementationClass()
            NFingers = combDriveParams.NFingers
            NCombs = combDriveParams.NCombs
            b = combDriveParams.b
            g = combDriveParams.d
            epsilonBulk = params.epsilonR
            epsilon0 = params.epsilon0
            epsilonOx = params.epsilonOx
            c0NomIn = c0NomIn * 100e-6
            #print(c0NomIn)
            c0NomIn = (c0NomIn * 1000.0) * params.NA
            lambda_d = np.sqrt(
                (params.epsilonR * params.epsilon0 * params.kB * params.T) /
                (2 * (params.eCharge**2) * c0NomIn))
            c0NomIn = c0NomIn / (params.NA * 1000)
            eps = lambda_d / params.L
            #print("eps",eps,c0NomIn)
            oxideLayer = 1.0
            sternLayer = 1.0
            doubleLayer = 1.0

            #Define Resistance(s) and Capacitance(s)
            C0 = 2 * eps  #Dimensionless linear component electric double layer capacitor
            c0 = 1  #Dimensionless bulk concentration of single ion
            R = 1. / (2 * c0)  #Dimensionless initial resistance of bulk
            #            Cox=alphaOx*C0*oxideLayer  #Dimensionless capacitance of oxide
            #            Cstern=alphaStern*C0*sternLayer #Dimensionless capacitance of Stern Layer
            C0 = C0 * doubleLayer

            #            #CBulk=(params.epsilonR/params.epsilonR)*(lambda_d/params.L)*C0*bulkCapacitance
            #            Cox=(params.epsilonOx/params.epsilonR)*(lambda_d/params.lambda_Ox)*C0*oxideLayer  #Dimensionless capacitance of oxide
            #            Cstern=(params.epsilonStern/params.epsilonR)*(lambda_d/params.lambda_Stern)*C0*sternLayer #Dimensionless capacitance of Stern Layer
            #            C0=0;
            ##            Cox = (params.epsilonOx/params.epsilonR)*(lambda_d/params.L)*C0
            #            Cstern = (params.epsilonStern/params.epsilonR)*(lambda_d/params.lambda_Stern)*C0

            V_T = (params.kB * params.T) / params.eCharge  #Thermal Voltage
            #print(model.circuitModel)
            #Define parameters relevant to circuit model
            circuitParams = params
            circuitParams.C0 = C0
            circuitParams.Cox = Cox
            circuitParams.Cstern = Cstern
            #circuitParams.CB=CBulk
            circuitParams.R = R

            delta_phi0 = Vpp / V_T  #Make applied voltage dimensionless

            #print(delta_phi0)
            #            nFreq = len(omegaDimIn);
            #            class initConditions:
            #                c0Init=np.reshape(np.array([c0*1.0]*nFreq),(nFreq,1));
            #                R0Init=np.reshape(np.array([R*1.0]*nFreq),(nFreq,1));
            #                q0Init=np.reshape(np.array([0.0]*nFreq),(nFreq,1)); #0;
            #                v0Init=np.reshape(np.array([0.0]*nFreq),(nFreq,1)); #0;
            #                vOxInit=np.reshape(np.array([0.0]*nFreq),(nFreq,1)); #0;
            #                vSternInit=np.reshape(np.array([0.0]*nFreq),(nFreq,1)); #0;
            #                vBInit=np.reshape(np.array([0.0]*nFreq),(nFreq,1));#0;#delta_phi0

            class initConditions:
                c0Init = np.array([c0 * 1.0])
                R0Init = np.array([R * 1.0])
                q0Init = np.array([0.0])
                #0;
                v0Init = np.array([0.0])
                #0;
                vOxInit = np.array([0.0])
                #0;
                vSternInit = np.array([0.0])
                #0;
                vBInit = np.array([0.0])
                #0;#delta_phi0

            displ = np.zeros((len(omegaDimIn), 1), dtype=float)
            #frequency = omegaDimIn/(params.Di/(params.L**2))
            uniqueOmega = np.unique(omegaDimIn)

            t0 = 0
            nPeaks = 20
            for chooseFrequency in range(len(uniqueOmega)):
                idx = np.where(omegaDimIn == uniqueOmega[chooseFrequency])[0]
                frequency = uniqueOmega[chooseFrequency] / (params.Di /
                                                            (params.L**2))
                tEnd = (nPeaks * 2 * np.pi) / frequency
                dt0 = 0.01 / frequency
                dt = dt0 / (2**0)
                dx0 = 1. / 50
                dx = dx0 / (2**0)
                nTime = (tEnd - t0) / dt

                time = np.linspace(0, tEnd, nTime)
                m = int(1. / dx)
                x = np.linspace(0, 1, m)
                initConditions.cInit = (2 * c0 * np.ones((m, 1)))

                [
                    chargeOut, currentOut, vEdlOut, vOxOut, vSternOut, cOut,
                    cTotOut, ROut, vBulkOut, timeBulk
                ] = run.implementCircuitModel(initConditions, circuitParams,
                                              model, time, dx, eps, frequency,
                                              delta_phi0)
                zero_crossings = np.where(np.diff(np.signbit(vBulkOut)))[0]
                zero_crossings[0] = zero_crossings[0] - 1
                zero_crossings[-1] = zero_crossings[-1] + 1
                vBulkRMS = np.sqrt(
                    integrate.cumtrapz(
                        vBulkOut[zero_crossings[-3]:zero_crossings[-1]]**2,
                        time[zero_crossings[-3]:zero_crossings[-1]]) /
                    (time[zero_crossings[-3] + 1:zero_crossings[-1]] -
                     time[zero_crossings[-3]])) * V_T
                #displ[chooseFrequency]= params.epsilonR*(alpha)*(vBulkRMS[-1]**2)
                displ[idx] = (1.0 / k) * (
                    (NFingers * NCombs * epsilon0 * epsilonBulk * b) /
                    (g)) * (vBulkRMS[-1]**2)

            #print(tEnd,dt0)
#            nTime=(tEnd-t0)/dt
#            time=np.linspace(0,tEnd,nTime)
#            m=int(1./dx)
#            x=np.linspace(0,1,m)
#            initConditions.cInit=(2*c0*np.ones((m,1)))
#            nFreq = len(frequency)
#            #print(frequency,omegaDimIn[chooseFrequency],lambda_d,eps,model.circuitModel)
#            #stateOut = run.implementCircuitModel(initConditions,circuitParams,model,time,dx,eps,frequency,delta_phi0);
#            [chargeOut, currentOut, vEdlOut, vOxOut, vSternOut, cOut, cTotOut, ROut, vBulkOut,timeBulk]=run.implementCircuitModel(initConditions,circuitParams,model,time,dx,eps,frequency,delta_phi0)
#            #[chargeOut, currentOut, vEdlOut, vOxOut, vSternOut, cOut, cTotOut, ROut, vBulkOut] = [stateOut[:,(idx-1)*nFreq:idx*nFreq] for idx in range(1,9)]
#            #print("vBulk",vBulkOut,np.min(vBulkOut))
#            time=timeBulk;
#           # print("timeBulk",timeBulk)
#            for chooseFrequency in xrange(len(omegaDimIn)):
#                #plt.plot(timeBulk,vBulkOut[:,chooseFrequency])
#                zero_crossings = np.where(np.diff(np.signbit(vBulkOut[:,chooseFrequency])))[0]
#                zero_crossings[0]=zero_crossings[0]-1
#                zero_crossings[-1]=zero_crossings[-1]+1
#                vBulkRMS=np.sqrt(integrate.cumtrapz(vBulkOut[zero_crossings[-3]:zero_crossings[-1],chooseFrequency]**2,time[zero_crossings[-3]:zero_crossings[-1]])/(time[zero_crossings[-3]+1:zero_crossings[-1]]-time[zero_crossings[-3]]))*V_T
#                #displ[chooseFrequency]= params.epsilonR*(alpha)*(vBulkRMS[-1]**2)
#                displ[chooseFrequency]= (1.0/k)*((NFingers*NCombs*epsilon0*epsilonBulk*b)/(g))*(vBulkRMS[-1]**2);

#            for chooseFrequency in xrange(len(omegaDimIn)):
#                #chooseFrequency=5
#                frequency = omegaDimIn[chooseFrequency]/(params.Di/(params.L**2))
#
#                t0=0
#                nPeaks=20
#                tEnd=(nPeaks*2*np.pi)/frequency
#                #dt0=0.0025/frequency
#                #dx0=1./200
#                dt0=0.01/frequency
#                dx0=1./50
#
#                dt=dt0/(2**0)
#                dx=dx0/(2**0)
#                nTime=(tEnd-t0)/dt
#                time=np.linspace(0,tEnd,nTime)
#                m=int(1./dx)
#                x=np.linspace(0,1,m)
#                initConditions.cInit=(2*c0*np.ones((m,1)))
#                #print(frequency,omegaDimIn[chooseFrequency],lambda_d,eps,model.circuitModel)
#                [chargeOut, currentOut, vEdlOut, vOxOut, vSternOut, cOut, cTotOut, ROut, vBulkOut,timeBulk]=run.implementCircuitModel(initConditions,circuitParams,model,time,dx,eps,frequency,delta_phi0)
#                zero_crossings = np.where(np.diff(np.signbit(vBulkOut)))[0]
#                zero_crossings[0]=zero_crossings[0]-1
#                zero_crossings[-1]=zero_crossings[-1]+1
#                vBulkRMS=np.sqrt(integrate.cumtrapz(vBulkOut[zero_crossings[-3]:zero_crossings[-1]]**2,time[zero_crossings[-3]:zero_crossings[-1]])/(time[zero_crossings[-3]+1:zero_crossings[-1]]-time[zero_crossings[-3]]))*V_T
#                #displ[chooseFrequency]= params.epsilonR*(alpha)*(vBulkRMS[-1]**2)
#                displ[chooseFrequency]= (1.0/k)*((NFingers*NCombs*epsilon0*epsilonBulk*b)/(g))*(vBulkRMS[-1]**2);
##                plt.figure(); plt.plot(timeBulk*((params.L**2)/params.Di),vBulkOut)
##                plt.figure(); plt.plot(timeBulk*((params.L**2)/params.Di),vEdlOut)
##                plt.figure(); plt.plot(timeBulk*((params.L**2)/params.Di),ROut)
            return displ.ravel()
Exemple #59
0
plt.grid()
plt.xlabel(r'$1/T$ in 1/K')
plt.ylabel(r'$ln(I)$ ')
plt.legend(loc="best")
plt.tight_layout()
plt.savefig('Dipol2Anlauf.png')
plt.show()

W1=params2[0]*const.k/const.e #Berechnung der Aktivierungsenergie W=c*k und umrechnen in eV
print('Aktivierungsarbeit über Anlauf (eV)=', W1)


#####Polarisationsansatz/ Integral
Tint=T[10:42] #Für Integral verwendete Werte
Iint=Iohne[10:42] #Für Integral verwendete Werte
I_int = integrate.cumtrapz(Iint, Tint, initial=Iint[0]) #Integral berechnen
IInt=np.log(I_int/Iint)

def Integral(Tint,Iint,H):
    array = np.array([])
    for b in Tint:
        array = np.append(array, np.trapz(Iint[Tint >= b], Tint[Tint >= b]))
    return array

Integ = Integral(Tint, Iint, H)
print('Iteg=', Integ)
Integ=Integ/Iint
Integ = Integ[Integ > 0] #negative Werte rausschmeißen
Integ = np.log(Integ)
print('Iteg=', Integ)
#print('Iint', Iint)
Exemple #60
0
#plt.xlim()
#plt.ylim()
plt.legend(loc='upper left')
plt.show()
print("Updated Offset first 200 samples:", zsMean)
"""""" """""" """""" """"""
""" INTEGRATOR       """
"""""" """""" """""" """"""
# Variable extraction
xAcc = DataSet["Denoised + Offset Removed X Accel. (g)"]
yAcc = DataSet["Denoised + Offset Removed Y Accel. (g)"]
zAcc = DataSet["Denoised + Offset Removed Z Accel. (g)"]
time = DataSet["Time (all)"]

# Integrating acceleration vectors, producing velocities
xVel = it.cumtrapz(xAcc, time)
yVel = it.cumtrapz(yAcc, time)
zVel = it.cumtrapz(zAcc, time)

# Plotting Velocities in X, Y, and Z
time = np.resize(time, time.size - 1)
plt.figure(figsize=(10, 6))
plt.plot(time, xVel, linewidth=2.25, alpha=0.6, label='x', color='r')
plt.plot(time, yVel, linewidth=2.25, alpha=0.6, label='y', color='g')
plt.plot(time, zVel, linewidth=2.25, alpha=0.6, label='z', color='b')
plt.title("Velocities in X, Y, and Z")
plt.xlabel("Time (s)")
plt.ylabel("Velocity (m/s)")
plt.grid()
plt.xlim()
plt.ylim()