Ejemplo n.º 1
0
def SF_SD(m, wavelength, dp, ndp, minAngle=0, maxAngle=180, angularResolution=0.5, space='theta', angleMeasure='radians', normalization=None):
#  http://pymiescatt.readthedocs.io/en/latest/forward.html#SF_SD
  _steps = int(1+(maxAngle-minAngle)/angularResolution)
  ndp = coerceDType(ndp)
  dp = coerceDType(dp)
  SL = np.zeros(_steps)
  SR = np.zeros(_steps)
  SU = np.zeros(_steps)
  kwargs = {'minAngle':minAngle,
            'maxAngle':maxAngle,
            'angularResolution':angularResolution,
            'space':space,
            'normalization':None}
  for n,d in zip(ndp,dp):
    measure,l,r,u = ScatteringFunction(m,wavelength,d,**kwargs)
    SL += l*n
    SR += r*n
    SU += u*n
  if normalization in ['n','N','number','particles']:
    _n = trapz(ndp,dp)
    SL /= _n
    SR /= _n
    SU /= _n
  elif normalization in ['m','M','max','MAX']:
    SL /= np.max(SL)
    SR /= np.max(SR)
    SU /= np.max(SU)
  elif normalization in ['t','T','total','TOTAL']:
    SL /= trapz(SL,measure)
    SR /= trapz(SR,measure)
    SU /= trapz(SU,measure)
  return measure,SL,SR,SU
Ejemplo n.º 2
0
    def update_area_pattern(self, new_pos=True):
        with self.visuals_changed.hold_and_emit():
            if new_pos:
                # calculate average starting point y value
                condition = (self.data_x >= self.area_startx - 0.1) & (self.data_x <= self.area_startx + 0.1)
                section = np.extract(condition, self.data_y[:, 0])
                self.avg_area_starty = np.min(section)

                # calculate average ending point y value
                condition = (self.data_x >= self.area_endx - 0.1) & (self.data_x <= self.area_endx + 0.1)
                section = np.extract(condition, self.data_y[:, 0])
                self.avg_area_endy = np.min(section)

                # Calculate new bg slope
                self.area_slope = (self.avg_area_starty - self.avg_area_endy) / (self.area_startx - self.area_endx)

            # Get the x-values in between start and end point:
            condition = (self.data_x >= self.area_startx) & (self.data_x <= self.area_endx)
            section_x = np.extract(condition, self.data_x)
            section_y = np.extract(condition, self.data_y)
            bg_curve = (self.area_slope * (section_x - self.area_startx) + self.avg_area_starty)

            #Calculate the peak area:
            self.area_result = abs(trapz(section_y, x=section_x) - trapz(bg_curve, x=section_x))


            # Calculate the new y-values:
            self.area_pattern = (section_x, bg_curve, section_y)
Ejemplo n.º 3
0
def find_basis_normal(q):
    """
    Finds the basis normal to the srvf

    :param q1: numpy ndarray of shape (2,M) of M samples

    :rtype: list of numpy ndarray
    :return basis: list containing basis vectors

    """
    n, T = q.shape

    f1 = zeros((n, T))
    f2 = zeros((n, T))
    for i in range(0, T):
        f1[:, i] = q[0, i] * q[:, i] / norm(q[:, i]) + array([norm(q[:, i]),
                                                              0])
        f2[:, i] = q[1, i] * q[:, i] / norm(q[:, i]) + array([0,
                                                              norm(q[:, i])])

    h3 = f1
    h4 = f2
    integrandb3 = zeros(T)
    integrandb4 = zeros(T)
    for i in range(0, T):
        a = q[:, i].T
        integrandb3[i] = a.dot(h3[:, i])
        integrandb4[i] = a.dot(h4[:, i])

    b3 = h3 - q * trapz(integrandb3, linspace(0, 1, T))
    b4 = h4 - q * trapz(integrandb4, linspace(0, 1, T))

    basis = [b3, b4]

    return (basis)
Ejemplo n.º 4
0
 def correl_swsw(self, angle, nz1, nz2, h1, h2, method='quad'):
     """Correlation coeficients between two spherical waves."""
     alpha = angle * 4.85*1.e-6
     R = self.pupil_diameter / 2.
     R1 = (h1-self.h_profile) / h1 * R
     R2 = (h2-self.h_profile) / h2 * R
     zeta = alpha  * self.h_profile / R1
     w = R2 / R1
     Lam = 2. * np.pi * R1 / self.large_scale
     n1, m1 = zernike.noll2zern(nz1)
     n2, m2 = zernike.noll2zern(nz2)
     k1, k2 = _kvalues(n1, n2, m1, m2, nz1, nz2)
     results = np.zeros(len(self.h_profile))
     for idx in np.arange(len(self.h_profile)):
         if method == 'quad':
             result_quad = quad(_chassat_integral, 0, np.inf, args=(zeta[idx], Lam[idx], w[idx],  k1, k2, n1, n2, m1, m2))
             results[idx] = result_quad[0] * self.cn2[idx] * R1[idx]**(5./3.)
         elif method == 'romberg':
             result_quad = romberg(_modified_chassat, 1.e-26,np.pi/2., args=(zeta[idx], Lam[idx], w[idx], k1, k2, n1, n2, m1, m2), vec_func = False)
             results[idx] = result_quad * self.cn2[idx] * R1[idx]**(5./3.)
     if len(results) < 2:
         final_integral = results / (self.cn2 * R1**(5./3.))
     else:
         final_integral = trapz(results, x=self.h_profile) / trapz(self.cn2 * R1**(5./3.), x=self.h_profile)
     final_integral = 3.895 * (-1.)**((n1+n2-m1-m2)/2.) * np.sqrt((n1+1.)*(n2+1.)) * self.dr0**(5./3.) * final_integral    
     return final_integral
Ejemplo n.º 5
0
def calc_j(basis):
    """
    Calculates Jacobian matrix from normal basis

    :param basis: list of numpy ndarray of shape (2,M) of M samples basis

    :rtype: numpy ndarray
    :return j: Jacobian

    """
    b1 = basis[0]
    b2 = basis[1]
    T = b1.shape[1]
    integrand11 = zeros(T)
    integrand12 = zeros(T)
    integrand22 = zeros(T)

    for i in range(0, T):
        a = b1[:, i].T
        b = b2[:, i].T
        integrand11[i] = a.dot(b1[:, i])
        integrand12[i] = a.dot(b2[:, i])
        integrand22[i] = b.dot(b2[:, i])

    j = zeros((2, 2))
    j[0, 0] = trapz(integrand11, linspace(0, 1, T))
    j[0, 1] = trapz(integrand12, linspace(0, 1, T))
    j[1, 1] = trapz(integrand22, linspace(0, 1, T))
    j[1, 0] = j[0, 1]

    return (j)
Ejemplo n.º 6
0
    def _ZZlk(self):
        hist = np.zeros(self.result_bins)
        histLB = np.zeros(self.result_bins)
        self.ZZ_phist = [ np.zeros(self.prob_pts) for i in range(4) ]

        for i in range(self.blocks):
            pp00,pp01,pp10 = \
                    np.mgrid[i*self.prob_pts/self.blocks:(i+1)*self.prob_pts/self.blocks,\
                    0:self.prob_pts,0:self.prob_pts]/float(self.prob_pts)
            plk = self.diag_likelihood(self.N_ZZ, pp00,pp01,pp10)
            hist += np.histogram(pp01+pp10, bins=self.result_bins,
                    range=(0,1), density=False, weights=plk)[0]
            histLB += np.histogram(pp01+pp10-2*np.sqrt(pp00*(1.-pp00-pp01-pp10)), 
                    bins=self.result_bins, range=(0,1), density=False, weights=plk)[0]
            
            for i,pp in enumerate([pp00,pp01,pp10,1.-pp00-pp01-pp10]):
                self.ZZ_phist[i] += np.histogram(pp, bins=self.prob_pts,
                        range=(0,1), density=False, weights=plk)[0]

        I = integrate.trapz(hist, dx=self.ZZ_dx)
        hist /= I
        ILB = integrate.trapz(histLB, dx=self.ZZ_dx)
        histLB /= ILB
        
        for h in self.ZZ_phist:
            Ip = integrate.trapz(h, dx=1./self.prob_pts)
            h /= Ip        
        
        return hist, histLB
Ejemplo n.º 7
0
    def get_angular_integrated(self, psd, geometry, property_name):
        if self._angular_table is None:
            raise AttributeError(
                "Initialize or load the table of angular-integrated " + 
                "quantities first."
            )

        psd_w = psd(self._psd_D)

        def sca_xsect(geom):
            return trapz(
                self._angular_table["sca_xsect"][geom] * psd_w, 
                self._psd_D
            )
    
        if property_name == "sca_xsect":
            sca_prop = sca_xsect(geometry)
        elif property_name == "ext_xsect":
            sca_prop = trapz(
                self._angular_table["ext_xsect"][geometry] * psd_w, 
                self._psd_D
            )
        elif property_name == "asym":
            sca_xsect_int = sca_xsect(geometry)
            if sca_xsect_int > 0:
                sca_prop = trapz(
                    self._angular_table["asym"][geometry] * \
                    self._angular_table["sca_xsect"][geometry] * psd_w,  
                    self._psd_D
                )
                sca_prop /= sca_xsect_int
            else:
                sca_prop = 0.0

        return sca_prop
Ejemplo n.º 8
0
    def read_intensity(self, freq=[0.0], t_start=0.0, t_end=100.0):
        '''
        Calculates the intensity of a certain frequency in the FID. It calculates 
        the Fourier coefficents for that specific frequency.
        
        Parameters
        ----------
        freq (list, float): List of frequencies for which the intensity should be 
        calculated (Frequencies in MHz)

        Returns
        -------
        intensity (1D-array, float): Array of the intensities
        
        Notes
        -----
        none
        '''
        if len(self.fid) > 0:
            fid = slice_fid(self.fid, t_start, t_end)
            intensity = np.array([])
            for x in freq:
                cos2 = np.cos(fid[:,0]*x*1.E6*2.*np.pi)
                sin2 = np.sin(fid[:,0]*x*1.E6*2.*np.pi)
        
                a = inte.trapz(cos2*fid[:,1], fid[:,0]) / abs(fid[-1,0]) * 2.
                b = inte.trapz(sin2*fid[:,1], fid[:,0]) / abs(fid[-1,0]) * 2.
       
                intensity = np.hstack((intensity, np.sqrt(a**2 + b**2)))
            
            return intensity
                
        else:
            return np.array([])
Ejemplo n.º 9
0
 def computeColor(self, filtX, filtY, z=0):
     """Compute color (flux in filter X - filter Y) of SED at redshift z, return color in magnitudes
     
        @param filtX   lower wavelength filter
        @param filtY   higher wavelength filter
        @param z       redshift of SED
     """
     
     if filtX not in self.filterDict:
         emsg = "Filter " + filtX + " is not in the filter dictionary"
         raise LookupError(emsg)
     if filtY not in self.filterDict:
         emsg = "Filter " + filtY + " is not in the filter dictionary"
         raise LookupError(emsg)
     if filtX == filtY:
         raise ValueError("ERROR! cannot have color as difference between same filter")
     
     # define integral limits by filter extents
     aX, bX = self.filterDict[filtX].returnFilterRange()
     aY, bY = self.filterDict[filtY].returnFilterRange()
     
     
     # int S(lam_obs)*X(lam)*lam dlam
     lam = self.filterDict[filtX].wavelengths
     res = self.sed.getFlux(lam, z)*self.filterDict[filtX].getTrans(lam)*lam
     integrand = interp.InterpolatedUnivariateSpline(lam, res, k=1)
     if self.FAST_INTEG:
         x = np.linspace(aX, bX, self.INTEG_PREC)
         y = integrand(x)
         int1 = integ.trapz(y,x)
     else:
         int1 = integ.quad(integrand, aX, bX)[0]
     
     
     
     # int S(lam_obs)*Y(lam)*lam dlam
     lam = self.filterDict[filtY].wavelengths
     res = self.sed.getFlux(lam, z)*self.filterDict[filtY].getTrans(lam)*lam
     integrand = interp.InterpolatedUnivariateSpline(lam, res, k=1)
     if self.FAST_INTEG:
         x = np.linspace(aY, bY, self.INTEG_PREC)
         y = integrand(x)
         int2 = integ.trapz(y,x)
     else:
         int2 = integ.quad(integrand, aY, bY)[0]
     
     
     # Not sure about this zero-point term? But colors are totally off without it
     # .integral2 = int filter(lam)/lam dlam
     int3 = self.filterDict[filtX].integral2
     int4 = self.filterDict[filtY].integral2
     
     zp = -2.5*math.log10(int4/int3)
     
     
     # zero observed flux in either filter so color should be infinite
     if (int1==0. or int2==0.):
         return float("inf")
         
     return -2.5*math.log10(int1/int2) + zp
Ejemplo n.º 10
0
    def readDustInfo(self):

        """
        Read all column densities, min/max temperatures and min/max radii for 
        the species involved in the MCMax model.
        
        Note that the self.coldens dictionary does not give real column 
        densities! This dict merely gives column densities in a prescribed 
        shell with given min and max radius, in order to compare with the H2 
        col density. 
        
        """

        dens = self.star.getDustDensity()
        temp = self.star.getDustTemperature()
        compf = os.path.join(
            cc.path.mcmax, self.star.path_mcmax, "models", self.star["LAST_MCMAX_MODEL"], "composition.dat"
        )
        comp = DataIO.readCols(compf)
        self.rad = comp.pop(0) * self.au
        self.r_outer = self.rad[-1]

        for species in self.star.getDustList():
            # - Save the actual density profile for this dust species, as well
            # - as calculating the full column density of a dust species.
            self.dustfractions[species] = comp.pop(0)
            self.compd[species] = self.dustfractions[species] * dens
            self.fullcoldens[species] = trapz(x=self.rad, y=self.compd[species])
            # - Determine the column density from 90% of the dust species formed
            # - onward, based on the mass fractions!
            # - Not before, because the comparison with H2 must be made,
            # - and this will skew the result if not solely looking at where the
            # - dust has (almost) all been formed.
            # - We also save min amd max radii, for use with the H2 calculation
            a_species = self.star["A_%s" % species]
            maxdens = max(self.compd[species])
            mindens = maxdens * 10 ** (-10)
            radsel = self.rad[(self.dustfractions[species] > 0.9 * a_species) * (self.compd[species] > mindens)]
            denssel = self.compd[species][
                (self.dustfractions[species] > 0.9 * a_species) * (self.compd[species] > mindens)
            ]
            self.coldens[species] = trapz(x=radsel, y=denssel)
            if radsel.size:
                self.r_min_cd[species] = radsel[0]
                self.r_max_cd[species] = radsel[-1]
            else:
                print "Threshold dust mass fraction not reached for %s." % species
                self.r_min_cd[species] = 0
                self.r_max_cd[species] = 0
            # - Determine the actual destruction radius and temperature.
            # - Taken where the density reaches 1% of the maximum density
            # - (not mass fraction).
            self.r_des[species] = self.rad[self.compd[species] > (maxdens * 0.01)][0]
            self.t_des[species] = temp[self.compd[species] > (maxdens * 0.01)][0]

            # - e-10 as limit for minimum is ok, because if shell is 100000 R*
            # - the mass conservation dictates ~ (10^5)^2 = 10^10 (r^2 law)
            # - decrease in density. Shells this big dont occur anyway.
            self.r_max[species] = self.rad[self.compd[species] > mindens][-1]
            self.t_min[species] = temp[self.compd[species] > mindens][-1]
Ejemplo n.º 11
0
def compute_integral4D(grid, x0, x1, x2, x3):
    """
    Computes norm of a 4D grid

    :param grid: 4D grid to integrate
    :param x0: coordinates of all points along axis=0
    :param x1: coordinates of all points along axis=1
    :param x2: coordinates of all points along axis=2
    :param x3: coordinates of all points along axis=3

    :type grid: numpy array
    :type x0: numpy array
    :type x1: numpy array
    :type x2: numpy array
    :type x3: numpy array

    :rtype: float
    :returns: grid norm

    """

    grid_norm = si.trapz(si.trapz(si.trapz(si.trapz(grid, x=x0, axis=0),
                                           x=x1, axis=0), x=x2, axis=0),
                         x=x3, axis=0)
    return grid_norm
Ejemplo n.º 12
0
def regression_warp(beta, time, q, y, alpha):
    """
    calculates optimal warping for function linear regression

    :param beta: numpy ndarray of shape (M,N) of M functions with N samples
    :param time: vector of size N describing the sample points
    :param q: numpy ndarray of shape (M,N) of M functions with N samples
    :param y: numpy ndarray of shape (1,N) of M functions with N samples
    responses
    :param alpha: numpy scalar

    :rtype: numpy array
    :return gamma_new: warping function

    """
    gam_M = uf.optimum_reparam(beta, time, q)
    qM = uf.warp_q_gamma(time, q, gam_M)
    y_M = trapz(qM * beta, time)

    gam_m = uf.optimum_reparam(-1 * beta, time, q)
    qm = uf.warp_q_gamma(time, q, gam_m)
    y_m = trapz(qm * beta, time)

    if y > alpha + y_M:
        gamma_new = gam_M
    elif y < alpha + y_m:
        gamma_new = gam_m
    else:
        gamma_new = uf.zero_crossing(y - alpha, q, beta, time, y_M, y_m,
                                     gam_M, gam_m)

    return gamma_new
Ejemplo n.º 13
0
def calculate_energy(alphadot, T=100, k=5):
    """
    calculates energy along path

    :param alphadot: numpy ndarray of shape (2,M) of M samples
    :param T: Number of samples of curve (Default = 100)
    :param k: number of samples along path (Default = 5)

    :rtype: numpy scalar
    :return E: energy

    """
    integrand1 = zeros((k, T))
    integrand2 = zeros(k)

    for i in range(0, k):
        for j in range(1, T):
            tmp = alphadot[:, j, i].T
            integrand1[i, j] = tmp.dot(alphadot[:, j, i])

        integrand2[i] = trapz(integrand1[i, :], linspace(0, 1, T))

    E = 0.5 * trapz(integrand2, linspace(0, 1, k))

    return E
Ejemplo n.º 14
0
    def draw_reabsorption(self, columns, reabsorption=1):
        try:
            f = plt.figure(self.graphnumber_reab)
            plt.xlabel('Wavelengths, nm')
            plt.ylabel('Intensity, a.u.')

            Xe = self.data['Xe']
            Ec = self.data['Ec']
            Ec = Ec / integrate.trapz(Ec, dx=self.step_E)
            Ed = self.data['Ed']
            Ed = Ed / integrate.trapz(Ed, dx=self.step_E) * reabsorption

            if len(Xe) > len(Ec):
                Ec = np.append(Ec, np.zeros(len(Xe)-len(Ec)))
            if len(Xe) > len(Ed):
                Ed = np.append(Ed, np.zeros(len(Xe)-len(Ed)))

            plt.plot(Xe, Ec)
            plt.plot(Xe, Ed)

            f.canvas.set_window_title('Reabsorption (' + str(reabsorption) + '; Xe: Ec, Ed)')
            plt.title('Xe: Ec, Ed')

            f.show()
            self.graphnumber_reab += 1
            self.logger.info('Reabsorption calculation finished')
        except:
            self.logger.info('No reabsortion graph could be drawn')
Ejemplo n.º 15
0
def kde_KL_divergence_2d(x, y, h_x, h_y, nb_bins=100, fft=True):
    """Uses Kernel Density Estimator with Gaussian kernel on two
    dimensional samples x and y and returns estimated Kullback-
    Leibler divergence.

    @param x, y: samples, given as a (n, 2) shaped numpy array,
    @param h: width of the Gaussian kernel,
    @param nb_bins: number of grid points to use,
    @param fft: whether to use FFT to compute convolution.
    """
    min_ = np.min(np.vstack([np.min(x, axis=0), np.min(y, axis=0)]), axis=0)
    max_ = np.max(np.vstack([np.max(x, axis=0), np.max(y, axis=0)]), axis=0)
    bounds_ = np.vstack((min_, max_))
    (x_grid, y_grid, kde_x) = gaussian_kde_2d(x, h_x, h_y,
            nb_bins=nb_bins,
            fft=fft,
            bounds=bounds_
            )
    (x_grid2, y_grid2, kde_y) = gaussian_kde_2d(y, h_x, h_y,
            nb_bins=nb_bins,
            fft=fft,
            bounds=bounds_
            )
    delta_x = x_grid[1] - x_grid[0]
    delta_y = y_grid[1] - y_grid[0]
    plogp = - kde_x * np.log((kde_x + EPSILON) / (kde_y + EPSILON))
    # Integrate
    div = trapz(trapz(plogp, dx=delta_x, axis=1), dx=delta_y, axis=0)
    return div
Ejemplo n.º 16
0
    def integration(self,is_quadrant=0,use_flux_per_mrad2_or_mm2=1):
        if (self.X is None or self.Y is None):
            raise Exception(" X and Y must be array for integration")
        if self.X.shape != self.Y.shape:
            raise Exception(" X and Y must have the same shape")

        if len(self.X.shape)==2 :
            X = np.linspace(self.X.min(), self.X.max(), self.X.shape[0])
            Y = np.linspace(self.Y.min(), self.Y.max(), self.Y.shape[1])

            res1=integrate.trapz(self.intensity, X)
            res = integrate.trapz(res1, Y)

            # res = integrate.simps(integrate.simps(self.intensity, X), Y)

            # res = self.intensity.sum() * (self.X[1,0] - self.X[0,0]) * (self.Y[0,1] - self.X[0,0]) # regular grid only
        else : # X and Y are 1d array
            if len(self.X) == 1:
                res = self.intensity[0]
            else: # choix arbitraire
                XY = np.zeros_like(self.X)
                for i in range(1, len(self.X)):
                    XY[i] = XY[i-1]+np.sqrt((self.X[i] - self.X[i-1]) * 2 + (self.Y[i] - self.Y[i-1]) ** 2)
                res = np.trapz(self.intensity, XY)

        # Note that the value of flux is in phot/s/0.1%bw/mrad2 (or .../mm2) and
        # our grid is in rad (or m), therefore we must account for this:
        if use_flux_per_mrad2_or_mm2:
            res *= 1e6

        # in case the calculation is for a quadrant, the integral is four times the calculated value
        if is_quadrant:
            res *= 4
        return res
Ejemplo n.º 17
0
def calculate_variance(beta):
    """
    This function calculates variance of curve beta

    :param beta: numpy ndarray of shape (2,M) of M samples

    :rtype: numpy ndarray
    :return variance: variance

    """
    n, T = beta.shape
    betadot = gradient(beta, 1. / (T - 1))
    betadot = betadot[1]
    normbetadot = zeros(T)
    centroid = calculatecentroid(beta)
    integrand = zeros((n, n, T))
    t = linspace(0, 1, T)
    for i in range(0, T):
        normbetadot[i] = norm(betadot[:, i])
        a1 = (beta[:, i] - centroid)
        a1 = a1.reshape((n, 1))
        integrand[:, :, i] = a1.dot(a1.T) * normbetadot[i]

    l = trapz(normbetadot, t)
    variance = trapz(integrand, t, axis=2)
    variance /= l

    return (variance)
Ejemplo n.º 18
0
def compute_expected_coordinates1D(grid, x0):
    """
    Computes expected value and variance of a 1D grid along its (only) axis.

    :param grid: 1D grid to integrate
    :param x0: coordinates of all points along the integration axis

    :type grid: numpy array
    :type x0: numpy array

    :rtype: float
    :returns:
        * exp_x0 : expected value
        * var_x0 : variance

    """

    # normalize 1D grid
    grid_integral = compute_integral1D(grid, x0)
    prob_x0 = grid / grid_integral

    # get 1D marginals, expected values and variances
    exp_x0 = si.trapz(x0*prob_x0, x=x0)
    var_x0 = si.trapz((x0-exp_x0)*(x0-exp_x0)*prob_x0, x=x0)

    return exp_x0, var_x0
Ejemplo n.º 19
0
def calculate_MOC_initial_condition(number_of_classes, U=0.177, D=0.03):
    data = genfromtxt("paper_data/re6400/upstream.csv")

    # Data provided are diameters in mm. We need to covert to volume
    mm2m = 0.001
    v_data = pi / 6 * (data[:, 0] * mm2m)**3
    p_data = data[:, 1]

    p_interpolated = interp1d(
        v_data, p_data, kind='nearest', fill_value=0, bounds_error=False)

    # From Galinat we know that phase volumetric fraction is 1.7%-3%
    alpha = (0.03 + 0.017) / 2
    A = pi / 4 * D**2
    Vc = 1.0 * A  # Assume unit height column

    # Estimate the total number from total concentration
    v = linspace(0, 1.2 * max(v_data), number_of_classes + 1)
    dv = v[1]
    p_empirical = p_interpolated(v) / trapz(p_interpolated(v), x=v)
    cdf_empirical = cumsum(p_empirical) * dv  # Cumulative distribution

    meanVd = trapz(v * p_empirical, x=v)  # Dispersed phase mean volume
    N0 = alpha * Vc / meanVd  # Number of drops in 1m column

    # print("Total number of drops in unit column {0:0.0f}".format(N0))
    # print("Particle influx will be: {0:0.2f} drops/s".format(U * N0))
    # print(U * N0 * meanVd / (U * A))

    return dv, clip(
        N0 * (cdf_empirical[1:] - cdf_empirical[:-1]),
        a_min=1e-4, a_max=None
    )
Ejemplo n.º 20
0
    def integrate(self, t0, tf, weights=None):
        """Integrates the data contained in the integrator from t0 to
        tf, inclusive. Applies weights to the data according to function
        weights(times - t0)
        """
        if t0 < self.times[0] or tf > self.times[-1]:
            return None

        interp = spi.interp1d(x=np.squeeze(self.times),
                              y=np.squeeze(self.vals), axis=0)

        # TODO Clean up using bisect
        istart = next(i for i, x in enumerate(self.times) if x > t0)
        ifinal = next((i for i, x in enumerate(self.times) if x > tf), -1)

        times = [t0] + self.times[istart:ifinal] + [tf]
        ref = [interp(t0)] + self.vals[istart:ifinal] + [interp(tf)]
        times = np.array(times)
        ref = np.asarray(ref)

        if weights is not None:
            w = weights(times - t0)
            ref = ref * w
            z = spt.trapz(y=w, x=times)
        else:
            z = 1.0

        return spt.trapz(y=ref, x=times) / z
    def __init__(self, waveLengths, transmission):
        """Initialise the filter transmission function

        @param waveLengths  
        @param transmission

        """        
 
        # keep track of original range and resolution of transmission function 
        self.lamMin = waveLengths[0]
        self.lamMax = waveLengths[-1]
        self.nLam = len(waveLengths)   
		#this could come handy if we need to defer the construction of the interpolator
        self.wavelengths = waveLengths
        self.transmission = transmission
        #cache these values for later reuse. Note that this will fail if the filter
        #support is disconnected... need to build a safer interface?
        self.integral1 = integ.trapz(transmission*waveLengths,waveLengths)
        self.integral2 = integ.trapz(transmission/waveLengths,waveLengths)
        self.effLambda = np.sqrt(self.integral1/self.integral2)
        
        # check wavelength and transmission domains are valid
        if (self.lamMin<0.):
            raise ValueError("ERROR! wavelengths cannot be less than zero!")
            
        if np.any(transmission<0.):
            raise ValueError("ERROR! cannot have negative transmission")
           
        if np.any(transmission>1.000001): #why not just 1.?
            raise ValueError("ERROR! cannot have transmission > 1") 

        # filter is now represeted as an interpolation object (linear)
        self.filt = interp.InterpolatedUnivariateSpline(waveLengths, transmission, k=1)
Ejemplo n.º 22
0
def diff_integrate(x_values, y_values):
    positive_res = 0
    negative_res = 0

    filter_function = lambda (x, y): INTERVAL_START <= x <= INTERVAL_END
    if INTERVAL_START > INTERVAL_END:
        print('NOTICE: The interval start %s is bigger than the interval end %s, using all the data.' %
              (INTERVAL_START, INTERVAL_END))
        interval_values = zip(x_values, y_values)
    else:
        print('NOTICE: Calculating integrals for every value in the closed interval [%s, %s].' %
              (INTERVAL_START, INTERVAL_END))
        interval_values = filter(filter_function, zip(x_values, y_values))

    positive_x_values = []
    positive_y_values = []
    negative_x_values = []
    negative_y_values = []
    # split positive and negative y-values
    for x, y in interval_values:
        if y < 0:
            negative_x_values.append(x)
            negative_y_values.append(y)
        else:
            positive_x_values.append(x)
            positive_y_values.append(y)

    positive_res = integrate.trapz(positive_y_values, positive_x_values)
    negative_res = integrate.trapz(negative_y_values, negative_x_values)

    return positive_res, negative_res
Ejemplo n.º 23
0
 def convolve(self,filt,returnMag=True):
     if not filt.type=='filter':
         raise TypeError('The "filter" you specified is invalid.')
     # Find the part of the spectrum where the FTC is defined
     startN,stopN = np.searchsorted(self.wavelengths.value,[filt.wavelengths[0].value,filt.wavelengths[-1].value])            
     if startN!=0:
         startN-=1
     if stopN!=(len(self.wavelengths)-1):
         stopN+=1
     ls = self.wavelengths[startN:stopN].value
     # Resample filter
     newFtc = np.interp(ls,filt.wavelengths.value,filt.ftc)
     num = interp1d(ls,newFtc * self.spec[startN:stopN])
     denom = interp1d(ls,newFtc)
     # Integrate
     numerator = trapz(num(filt.wavelengths[0:-1].value),x=filt.wavelengths[0:-1].value)
     denominator = trapz(denom(filt.wavelengths[0:-1].value),x=filt.wavelengths[0:-1].value)
    
     if returnMag:
         try:
             ABmag = -2.5 * np.log10(numerator/denominator) - 48.60
         except:
             global badFilt
             badFilt = filt
             
             global badspec
             badspec = self
             
             ABmag = -2.5 * np.log10(numerator/denominator) - 48.60
         if np.isnan(ABmag):
             print(self.params)
         return(ABmag*u.Unit('mag'))
     else:
         return(u.Unit('erg') * u.Unit('s')**-1 * u.Unit('cm')**-2 * u.Unit('Hz')**-1\
             *numerator/denominator) # erg s−1 cm−2 Hz−1
Ejemplo n.º 24
0
    def get_SZ(self, psd, geometry):
        """
        Compute the scattering matrices for the given PSD and geometries.

        Returns:
            The new amplitude (S) and phase (Z) matrices.
        """
        if (self._S_table is None) or (self._Z_table is None):
            raise AttributeError(
                "Initialize or load the scattering table first.")

        if (not isinstance(psd, PSD)) or self._previous_psd != psd:
            self._S_dict = {}
            self._Z_dict = {}
            psd_w = psd(self._psd_D)

            for geom in self.geometries:
                self._S_dict[geom] = \
                    trapz(self._S_table[geom] * psd_w, self._psd_D)
                self._Z_dict[geom] = \
                    trapz(self._Z_table[geom] * psd_w, self._psd_D)

            self._previous_psd = psd

        return (self._S_dict[geometry], self._Z_dict[geometry])
Ejemplo n.º 25
0
 def cPofZ(self, arr, zx):
     ## hardcoding zmax for the time being, should fix it
     zmax = 1.5
     Ng = len(arr)
     dz = 0.001
     if not hasattr(self, "cnorm"):
         Nx = int(zmax / dz)
         xar = np.linspace(0, zmax, Nx)
         rect = np.zeros((Ng, Nx))
         for i, z in enumerate(xar):
             rect[:, i] = self.PofZ(arr, float(z), dz) / dz
         self.cnorm = trapz(rect, xar, axis=1)
     # for floats
     if type(zx) == type(0.1):
         Nx = int(zx / dz)
         xar = np.linspace(0, zx, Nx)
         rect = np.zeros((Ng, Nx))
         for i, z in enumerate(xar):
             rect[:, i] = self.PofZ(arr, float(z), dz) / dz
         unnormC = trapz(rect, xar, axis=1)
     else:
         # for arrays
         zxm = zx.max()
         Nx = int(zxm / dz)
         xar = np.linspace(0, zxm, Nx)
         rect = np.zeros((Ng, Nx))
         for i, z in enumerate(xar):
             rect[:, i] = self.PofZ(arr, float(z), dz) / dz
             rect[np.where(zx > z), i] = 0.0
         unnormC = trapz(rect, xar, axis=1)
     return unnormC / self.cnorm
Ejemplo n.º 26
0
def ps_to_xi(k,ps,r,precision='mid'):
    xi = 0 * r
    if precision=='low':
        from scipy.integrate import trapz
        from scipy import sin
        
        for i in range(len(r)):
            xi[i] = trapz((ps/k) * sin(k*r[i]) / (k*r[i]),k)
    
    elif precision=='high':
        from scipy import sin
        from scipy.integrate import quad,quadrature
        from scipy.interpolate import interp1d
        for i in range(len(r)):
            psi = interp1d(k,ps)
            core = lambda k: (psi(k)/k) * sin(k*r[i]) / (k*r[i])
            A = quadrature(core,min(k),2.0/r[i],tol=1e-3)
            xi[i] = A[0]
        
    else:
        from scipy.integrate import trapz
        from scipy import sin
        from pylab import find
        import numpy as np
        for i in range(len(r)):
            cutoff = np.min(find(k>2.0/r[i]))
            kl = k[1:cutoff]
            psl = ps[1:cutoff]
            i1 = trapz((psl/kl) * sin(kl*r[i]) / (kl*r[i]),kl)
            psh = ps[cutoff+1:len(k)]
            kh = k[cutoff+1:len(k)]
            i2 = trapz((psh/kh) * sin(kh*r[i]) / (kh*r[i]),kh)
            xi[i] = i1+i2
                         
    return xi
Ejemplo n.º 27
0
def dbltrapz(f, x, y):
    """ Perform a double trapezon integration of a vectorized function

        f(x,y) where x is an array of x values and y is an array of y 
        values to perform the integral over.
    
        Implementation taken from: 
            http://mail.scipy.org/pipermail/scipy-user/2011-February/028592.html

        I don't really understand it, but it seems to work...
        
        For example, if we wanted to integrate some really random function 

            f(x,y) = e^(-x^2*y)

        from 1<x<5 and 1<y<10.

        Using matheamtica, we find that the integral is ~0.09:
        
            N[Integrate[Integrate[Exp[-(x^2*y)], {x, 1, 5}], {y, 1, 10}], 20]

        Using our new function.

        >>> import numpy as np
        >>> f=lambda x,y: np.exp(-(x**2*y))
        >>> int=dbltrapz(f, np.linspace(1,5,1000), np.linspace(1,10,1000))
        >>> print np.allclose(int,0.089071862226039234609, rtol=1e-5, atol=1e-5)
        True
    """
    yy = y[:,np.newaxis]
    xx = x[np.newaxis,:]
    integrand=f(xx,yy)

    return integrate.trapz(integrate.trapz(integrand, yy, axis=0), x, axis=0)
Ejemplo n.º 28
0
def trapez_moms(x_arg, y_arg, mom):
    """ Returns numerically integrated moments """
    if mom == 0:
        ret = trapz(y_arg, x_arg)
    else:
        ret = trapz(y_arg * x_arg**mom, x_arg)
    return ret
Ejemplo n.º 29
0
def alexpand(tau, noise_psd, freq, one2tennoise):
    kern = lambda t:[ s*np.sin(np.pi*f*t)**4/(np.pi*carrier*t)**2 for s,f in zip(noise_psd,freq)]
    kernFreq = lambda t:[ s*np.sin(np.pi*f*t)**4./(np.pi*f*t)**2. for s,f in zip(noise_psd,freq)]
    fone2ten = np.logspace(-9,1,num=50)
    alfromone2ten = 2*trapz([one2tennoise*100+.01*one2tennoise/f+.1*one2tennoise/f**2.*np.sin(np.pi*f*tau)**4./(np.pi*f*tau)**2 for f in fone2ten],fone2ten)
    out = 2. *trapz(kernFreq(tau),freq)+alfromone2ten
    #out = alfromone2ten
    return out
Ejemplo n.º 30
0
def icwt(wavelet):
    """Compute the inverse continuous wavelet transform.

    Parameters
    ----------
    wavelet : Instance of the MotherWavelet class
        instance of the MotherWavelet class for a particular wavelet family

    Examples
    --------
    Use the Morlet mother wavelet to perform wavelet transform on 'data', then
    use icwt to compute the inverse wavelet transform to come up with an estimate
    of data ('data2').  Note that data2 is not exactly equal data.

    # import matplotlib.pyplot as plt
    # from scipy.signal import SDG, Morlet, cwt, icwt, fft, ifft
    # import numpy as np
    #
    # x = np.arange(0,2*np.pi,np.pi/64)
    # data = np.sin(8*x)
    # scales=np.arange(0.5,17)
    #
    # mother_wavelet = Morlet(len_signal = len(data), scales = scales)
    # wave_coefs=cwt(data, mother_wavelet)
    # data2 = icwt(wave_coefs)
    #
    # plt.plot(data)
    # plt.plot(data2)
    # plt.show()

    References
    ----------
    Addison, P. S., 2002: The Illustrated Wavelet Transform Handbook.  Taylor
      and Francis Group, New York/London. 353 pp.

    """
    from scipy.integrate import trapz

    # if original wavelet was created using padding, make sure to include
    #   information that is missing after truncation (see self.coefs under __init__
    #   in class Wavelet.
    if wavelet.motherwavelet.len_signal !=  wavelet.motherwavelet.len_wavelet:
        full_wc = np.c_[wavelet.coefs,wavelet._pad_coefs]
    else:
        full_wc = wavelet.coefs

    # get wavelet coefficients and take fft
    wcf = fft(full_wc,axis=1)

    # get mother wavelet coefficients and take fft
    mwf = fft(wavelet.motherwavelet.coefs,axis=1)

    # perform inverse continuous wavelet transform and make sure the result is the same type
    #  (real or complex) as the original data used in the transform
    x = (1. / wavelet.motherwavelet.cg) *
        trapz(fftshift(ifft(wcf * mwf,axis=1),axes=[1]) /
        (wavelet.motherwavelet.scales[:,np.newaxis]**2),
        dx = 1. / wavelet.motherwavelet.sampf, axis=0)
Ejemplo n.º 31
0
y_2 = np.zeros(shape=(len(ns), len(ls)))
A = np.zeros(shape=(len(ns), len(ls)))
A_2 = np.zeros(shape=(len(ns), len(ls)))
EL = np.zeros(len(ls))
G_l_t_dt = np.zeros(shape=(len(ls), len(thetas)))
A2_theta = np.zeros(shape=(len(ls), len(thetas)))

aiz = []
aiz = Aiz(eiz_x, eiz_z, eiz_w)  # of length = len(ns)

for k, length in enumerate(ls):
    sum_A = np.empty(len(ls))
    sum_A_2 = np.empty(len(ls))
    for j, n in enumerate(ns):
        # Integral:
        y[j, k] = trapz(ys(aiz[j], ts, eiz_w[j], length, n), ts, dt)
        y_2[j, k] = trapz(y_2s(aiz[j], ts, eiz_w[j], length, n), ts, dt)
        #print 'dt Integral   y = ',i,k,j, y
        #print 'dt Integral y_2 = ',i,k,j, y_2
        #print '----'
        #print 'N terms for A0 = '  , As(eiz_z[j],eiz_w[j],length,n,y)
        #print 'N terms for A2 = ', A_2s(eiz_z[j],eiz_w[j],length,n,y_2)
        #print '----'
        A[j, k] = As(eiz_z[j], eiz_w[j], length, n, y[j, k])
        A_2[j, k] = A_2s(eiz_z[j], eiz_w[j], length, n,
                         y_2[j, k])  # * np.cos(2.0*theta)
        A[0] = (1. / 2) * A[0]
        A_2[0] = (1. / 2) * A_2[0]
    sum_A = np.sum(A, axis=0)
    #print 'sum of A0 = ', k,j,sum_A
    sum_A_2 = np.sum(A_2, axis=0)
Ejemplo n.º 32
0
    def mass_MOI(self, t):
        # =======================================
        # this method returns mass properties of rocket
        #
        # INPUT:  t = time
        # OUTPUT: mass = total mass of rocket
        #         MOI = total moment of inertia wrt CG
        #         d_dt_MOI = MOI time rate
        #         CG = center of gravity location from the nose tip
        # =======================================

        if t >= self.Params.t_MECO:
            # ---------------------------
            # mass for coasting phase (m, I = const.)
            # ---------------------------
            # mass
            mass = self.Params.m_dry
            # moment of inertia
            MOI = self.Params.MOI_dry
            # total CG location
            CG = self.Params.CG_dry

            return mass, MOI, np.zeros(3), CG

        else:
            # ---------------------------
            # mass for powered phase (m = m(t), I = I(t))
            # ---------------------------
            # propellant comsumption rate = (impulse consumed so far) / (total impulse)
            time_so_far = np.linspace(0., t)
            Impulse_so_far = integrate.trapz(
                self.Params.thrust_function(time_so_far), time_so_far)
            r = (1 - Impulse_so_far / self.Params.Impulse_total
                 )  # impulse ratio

            # total mass
            mass = self.Params.m_dry + r * self.Params.m_prop
            # total CG location
            CG = (self.Params.CG_dry * self.Params.m_dry +
                  self.Params.CG_prop * r * self.Params.m_prop) / mass
            # total MOI using parallel axis theorem
            tmp = np.array([0., 1., 1.])
            MOI = self.Params.MOI_dry + tmp * self.Params.m_dry * (
                CG -
                self.Params.CG_dry)**2. + r * self.Params.MOI_prop + tmp * (
                    CG - self.Params.CG_prop) * (r * self.Params.m_prop)**2.

            # ---------------------------------
            # finite differencing for d(MOI)/dt, dm/dt
            # ---------------------------------
            h = 1.E-3
            Impulse_so_far = integrate.trapz(
                self.Params.thrust_function(np.linspace(0., t + h)),
                np.linspace(0., t + h))
            r2 = (1 - Impulse_so_far / self.Params.Impulse_total
                  )  # impulse ratio
            # total mass
            # mass2 = self.m_dry + r2 * self.m_prop
            # total CG location
            CG2 = (self.Params.CG_dry * self.Params.m_dry +
                   self.Params.CG_prop * r2 * self.Params.m_prop) / (
                       self.Params.m_dry + r2 * self.Params.m_prop)
            # total MOI using parallel axis theorem
            MOI2 = self.Params.MOI_dry + tmp * self.Params.m_dry * (
                CG2 -
                self.Params.CG_dry)**2. + r2 * self.Params.MOI_prop + tmp * (
                    CG2 - self.Params.CG_prop) * (r2 * self.Params.m_prop)**2.

            Impulse_so_far = integrate.trapz(
                self.Params.thrust_function(np.linspace(0., t - h)),
                np.linspace(0., t - h))
            r3 = (1 - Impulse_so_far / self.Params.Impulse_total
                  )  # impulse ratio
            # mass3 = self.m_dry + r3 * self.m_prop
            CG3 = (self.Params.CG_dry * self.Params.m_dry +
                   self.Params.CG_prop * r3 * self.Params.m_prop) / (
                       self.Params.m_dry + r3 * self.Params.m_prop)
            MOI3 = self.Params.MOI_dry + tmp * self.Params.m_dry * (
                CG3 -
                self.Params.CG_dry)**2. + r3 * self.Params.MOI_prop + tmp * (
                    CG3 - self.Params.CG_prop) * (r3 * self.Params.m_prop)**2.

            # dm/dt and d(MOI)/dt
            # d_dt_m = (mass2 - mass3) / (2*h)
            d_dt_MOI = (MOI2 - MOI3) / (2 * h)

            return mass, MOI, d_dt_MOI, CG
Ejemplo n.º 33
0
        np.arange(thermo_nc.variables[theta_key][0, 0, 0, :].shape[0]) *
        grid_spacing,
        np.arange(thermo_nc.variables[theta_key][0, 0, :, 0].shape[0]) *
        grid_spacing)

    # Compute and store the horizontal mean LCL
    lcl[key] = zi_nc.variables[lcl_key][t_idx, :, :].mean()
    zi[key] = zi_nc.variables[zi_new_key][t_idx, :, :].mean()
    # Get the mean wind speed and direction below LCL
    iz_LCL = np.where(
        np.abs(z_theta - lcl[key]) == np.min(np.abs(z_theta - lcl[key])))[0][0]
    iz_zi = np.where(
        np.abs(z_theta - zi[key]) == np.min(np.abs(z_theta - zi[key])))[0][0]
    u_mean = u_nc.variables[u_key][t_idx, :iz_zi, :, :].mean(axis=(0, 2, 3))
    v_mean = v_nc.variables[v_key][t_idx, :iz_zi, :, :].mean(axis=(0, 2, 3))
    U_zi = integrate.trapz(x=z_theta[:iz_zi], y=u_mean) / zi[key]
    V_zi = integrate.trapz(x=z_theta[:iz_zi], y=v_mean) / zi[key]
    speed, wind_dir = fromComponents(u=U_zi, v=V_zi)

    # Get the distance away from the island-edge of the surface warm plume.
    mass_flux_xs[key]['theta_anom'] = np.nanmean(np.array([
        thermo_nc.variables[theta_key][idx, 1, :, :] -
        thermo_nc.variables[theta_key][idx, 1, :, :].mean() for idx in t_idx
    ]),
                                                 axis=0)
    R_wp = 4 * R_i  #np.nanmax(np.where(mass_flux_xs[key]['theta_anom'] > 0.1, np.sqrt((X-x_c)**2 + (Y-y_c)**2) - R_i, np.nan))

    # Compute the rectangle and the new cartesian coordinate system
    # distance travelled during time averaging period
    mass_flux_xs[key]['mask'], mass_flux_xs[key]['y_prime'], mass_flux_xs[key][
        'x_prime'] = downwind_rectangle(
Ejemplo n.º 34
0
total_i = []

findClosest = lambda a,l:min(l,key=lambda x:abs(x-a))

for scan in scan_nums:
    print 'Loading scan {}'.format(scan)
    flist = glob.glob(os.path.join(datadir, fprefix+'_{:04d}*.dat'.format(scan)))

    flist = sorted(flist, key=lambda f: int(f.split('_')[-1].strip('.dat')))

    scan_intensity = 0

    for f in flist[20:]:
        # print f
        q, i, err, parameters = loadPrimusDatFile(f)

        closest_qmin = findClosest(0.02, q)
        closest_qmax = findClosest(0.1, q)

        nmin = np.where(q == closest_qmin)[0][0]
        nmax = np.where(q == closest_qmax)[0][0]

        scan_intensity = scan_intensity + integrate.trapz(i[nmin:nmax+1], q[nmin:nmax+1])

    total_i.append(scan_intensity)


plt.bar(range(1, len(total_i)+1), total_i)
plt.show()
Ejemplo n.º 35
0
        senal[k * p:(k + 1) * p] = -sinus
#Graficamos los la senal correspondiente a los primeros 5 bits
plt.figure()
plt.plot(senal[0:5 * p])
plt.ylabel('Amplitud señal portadora')
plt.xlabel('Tiempo (ms)')
plt.title('Modulación BSPK para los primeros 5 bits del archivo "bits10k.csv"')
plt.savefig('modulacionBSPK.png')
plt.show
""" 2) Calcular la potencia promedio de la señal modulada generada.
"""
#La potencia instantánea está dada por
Pinst = senal**2

# Potencia promedio a partir de la potencia instantanea
Ps = integrate.trapz(Pinst, t) / (N * T)  #W
print('La potencia promedio es: ', Ps, 'W')
#Resultado: P = 0.49 W
""" 3) Simular un canal  ruidoso del tipo AWGN (ruido aditivo blanco gaussiano) con una relación 
señal a ruido (SNR) desde -2 hasta 3 dB.
 """
#Se realiza el mismo procedimiento para cada SNR
# Potencia del ruido para SNR y potencia de la señal dadas
SNR0 = -2
SNR1 = -1
SNR2 = 0
SNR3 = 1
SNR4 = 2
SNR5 = 3

#######SNR = -2
Ejemplo n.º 36
0
energy_array = np.linspace(
    energy_initial_float.value, energy_final_float.value,
    energy_steps_number_float.value) * energy_step_float.unit

# ---------------------------------------------------------------------------------------------------------------------
# Calculate the fermi diff. integral:
# ---------------------------------------------------------------------------------------------------------------------
fermiDiracLeft_array = fermi_dirac_fn(energy_array, temperature_float,
                                      chemical_potential_float)
fermiDiracRight_array = fermi_dirac_fn(energy_array + energy_bias_float,
                                       temperature_float,
                                       chemical_potential_float)
fermiDiracDiff_array = fermiDiracLeft_array - fermiDiracRight_array

# Use trapezoidal method to integrate:
fermi_diff_integral_value_float = integrate.trapz(fermiDiracDiff_array,
                                                  energy_array)

# ---------------------------------------------------------------------------------------------------------------------
# Plot the Fermi-Dirac curves:
# ---------------------------------------------------------------------------------------------------------------------
plt.figure(2)
plt.plot(energy_array,
         fermiDiracDiff_array,
         '-',
         color='orange',
         label='$n_{FD}^L(E) - n_{FD}^R(E)$')
plt.plot(energy_array,
         fermiDiracLeft_array,
         '--',
         color='r',
         label='$n_{FD}^L(E)$')
Ejemplo n.º 37
0
def CmpRealAxisBubble(filemesh, filegkr, fbubbleReal, Qlist, k_index):

    def Cmp_ChiQ_real(iOm,Q,gkr,k_m_q,nkp,norb,dx_,idxl,Nd,zero_ind,k_index):
        level = (iOm-1)/Nd                      # which linear mesh should we use?
        om_idx=idxl[level]                      # index for the linear mesh on this level
        izero= (len(om_idx)-1)/2                # zero_ind on this level
        dOm = om_idx.index(zero_ind+iOm)-izero  # om-Om in integer notation is i-dOm
        om_idx=array(om_idx)                    
        
        Qi = k_index(Q)
        codeBub="""
            #line 239 "Suscept.py"
            using namespace std;
            for (int iorb=0; iorb<norb; iorb++){
               for (int jorb=0; jorb<norb; jorb++){
                  complex<double> csum=0.0;
                  for (int iom=izero; iom<izero+dOm+1; iom++){
                     double dom =  (iom==izero || iom==izero+dOm) ? dx/2.0 : dx;  // trapezoid rule has 1/2 at the last interval
                     complex<double> csum2=0.0;
                     for (int ik=0; ik<nkp; ik++){
                        int ikq  = k_m_q(ik,Qi);
                        int iom1 = om_idx(iom);
                        int iom2 = om_idx(iom-dOm);
                        complex<double> irhok=(gkr(iorb,jorb,iom1,ik)-conj(gkr(jorb,iorb,iom1,ik)))/2.0;             // (G_k-G_k^+)/2
                        complex<double> irhoq=(gkr(jorb,iorb,iom2,ikq)-conj(gkr(iorb,jorb,iom2,ikq)))/2.0;
                        csum2 -= irhok*irhoq; //  rho_k * rho_{k+q}
                     }
                     csum += csum2*dom;
                  }
                  ImBub(iorb,jorb) = csum/(nkp*M_PI);
               }
            }
            """
        ImBub=zeros((norb,norb),dtype=complex)
        dx=float(dx_)
        weave.inline(codeBub, ['ImBub','gkr','norb','dx','nkp','Qi','k_m_q','dOm','izero','om_idx'],type_converters=weave.converters.blitz, compiler='gcc')
        return ImBub


    #print 'Qlist=', Qlist
    ##########################
    # Reading real axis mesh #
    ##########################
    fm = open(filemesh, 'r')
    lined = fm.next().split()
    (mdelta, mmax) = map(float,lined[:2])
    Nd = int(lined[2])
    (oml,idxl) = LinLogMesh.LinLogMeshGen(mdelta,mmax,Nd)

    #print 'Qlist=', Qlist
    #######################################
    # Reading real axis Green's function  #
    #######################################
    # Reading some basic information from G_k
    fg = open(filegkr,'r')
    first_line = fg.next()
    nkp,nsymop,nom,cixdm,norbitals = map(int,first_line.split()[1:6])
    fg.close()
    (gkr,omr) = ReadGk(filegkr, nkp, nsymop, nom, cixdm)
    
    print 'shape(gkr)=', shape(gkr)
    if sum(abs(omr-oml))>1e-5: print 'Mesh in '+filegkr+' and in '+filemesh+' are not compatible.'
    
    zero_ind = oml.tolist().index(0.0)
    Oml=oml[zero_ind:]
    
    norb=cixdm

    ###############################
    # Computing real axis Bubble  #
    ###############################
    # This is the zero frequency value
    chi0r0 = zeros((len(Qlist),norb,norb), dtype=float)
    
    #print 'Qlist=', Qlist

    for iq,Q in enumerate(Qlist):
        print 'Q=', Q
        ImBub = zeros((norb,norb,len(Oml)),dtype=complex)
        for iOm in range(1,len(Oml)):
            ImBub[:,:,iOm] = Cmp_ChiQ_real(iOm,Q,gkr,k_m_q,nkp,norb,Oml[iOm]-Oml[iOm-1],idxl,Nd,zero_ind,k_index)
            
        ImBubr = real(ImBub)
        Bub = zeros((norb,norb,len(Oml)), dtype=complex)
        for iorb in range(norb):
            for jorb in range(norb):
                tOm = hstack( (-Oml[::-1][:-1], Oml[1:]) )
                ImB = hstack( (-ImBubr[iorb,jorb,::-1][:-1], ImBubr[iorb,jorb,1:]) )
                
                Bub[iorb,jorb,0] = integrate.trapz(ImB/tOm, x=tOm)/pi
                izero=len(tOm)/2
                for i in range(izero,len(tOm)):
                    Bub[iorb,jorb,i-izero+1] = krams.kramarskronig(ImB, tOm, i) + ImB[i]*1j
        
        chi0r0[iq,:,:] = Bub[:,:,0].real

        fq = open(fbubbleReal+str(iq), 'w')
        for iOm,Omx in enumerate(Oml):
            print >> fq, Omx,
            for iorb in range(norb):
                for jorb in range(norb):
                    print >> fq, Bub[iorb,jorb,iOm].real, Bub[iorb,jorb,iOm].imag,
            print >> fq
        fq.close()
        print 'Chi_q-real axis: Q point', iq, 'finished'
        
    return chi0r0
Ejemplo n.º 38
0
def eiz(eiz_arg, X):
    return 1. + (2. / pi) * trapz(eiz_arg, X)
Ejemplo n.º 39
0
a = 0
b = 1
n = 100
h = (b - a) / n

x = arange(a, b + h, h)
#print(x)

y = []
for xi in x:
    y.append(f(xi))

#print(y)

I = trapz(y, x=x)
print(I)

# setting the axes at the centre
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.spines['left'].set_position('center')
ax.spines['bottom'].set_position('zero')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')

plt.plot(x, y, 'r')
plt.show()
Ejemplo n.º 40
0
def residual_area(sample_probabilities, predicted_pos_percents):
    """Compute the total area under the curve of |predicted prob - expected prob|
    """
    abs_deviations = np.abs(predicted_pos_percents - sample_probabilities)
    return integrate.trapz(abs_deviations, sample_probabilities)
Ejemplo n.º 41
0
                 dtype=float))  # Wavelength from dictionary appended to array
    I.append(
        np.array(DATA[i]['Intensity'],
                 dtype=float))  # Intensity from dictionary appended to array

    DATA[i]['diff'] = DATA[i]['Intensity'].diff().abs(
    )  # Calculates the absolute value of the difference between neighboring intensity values
    DATA[i] = DATA[i][
        DATA[i]['diff'] <
        0.002]  # Descriminates large jumps in intensity out of the DATA dictionary

    datacut[i] = DATA[i][(
        DATA[i]['Wavelength'] > Filter
    )]  #& (DATA[i]['Wavelength']<600)] # Limits the wavelength between two values
    intensity[i] = integrate.trapz(
        datacut[i]['Intensity'],
        datacut[i]['Wavelength'])  # Calculates the integral of datacut
    INT.append(intensity[i])  # Appends the integral to the INT array

if verbose == True:  # When True
    print("Length of DATA dictionary=",
          len(DATA))  # Prints the number of pairs in the DATA dictionary
    print(
        "Length of INT dictionary=",
        len(INT))  # Prints the length of the INT array. Must match DATA length

# <codecell>
### Dispose of data preceeding max ###
Cut = INT.index(
    max(INT))  # Identifies the file number with the maximum intensity
Redundant = np.arange(
Ejemplo n.º 42
0
    def J(u,y,yT,T):
        t = np.linspace(0,T,len(u))

        I = trapz(u**2,t)

        return 0.5*(I + (y-yT)**2)
Ejemplo n.º 43
0
def bringData(folder,
              dataFolder,
              scorePosition,
              lossPosition,
              pickle=False,
              save=False):
    dir = joinPath([basePath, dataFolder, folder[0]])

    f = ''
    areaCompare = float('-inf')
    fileName = ''

    paper_fig_settings()
    plt.figure(figsize=(20, 15))

    for file in returnList(dir):

        selectFolder = joinPath([dir, file])
        selectFile = returnList(selectFolder)[[scorePosition]]
        f = joinPath([selectFolder, selectFile[0]])

        selectLossFile = returnList(selectFolder)[[lossPosition]]
        l = joinPath([selectFolder, selectLossFile[0]])

        data[file] = np.load(f, allow_pickle=pickle)
        loss[file] = np.load(l, allow_pickle=pickle)

        d = np.sort(data[file])

        area = trapz(d)
        mean = np.mean(d)
        std = np.std(d)

        # if areaCompare < area:
        # 	areaCompare = area
        fileName = 'focused'
        try:
            selectedDenseScore = data['dense']
            selectedFocusedScore = data[fileName]
            selectedDenseLoss = loss['dense']
            selectedFocusedLoss = loss[fileName]
        except:
            pass

        selectedFolder = folder[0]

        print(
            f'{folder[0]} -> {file} -> Curve Area: {area}, Mean: {mean}, std: {std}',
        )

        plot(d, mean, std, file)
    plt.legend(data.keys())
    if save:
        plt.savefig('./plots/histograms/' + selectedFolder + '_histogram.png',
                    bbox_inches="tight")

    print('selected sigma: ', fileName)

    #This part will run after selecting right focused values againts dense values
    # trend comparison
    plt.figure(figsize=(20, 15))
    paper_fig_settings()
    showTrend(scores=selectedDenseScore, name=selectedFolder + ' dense')
    showTrend(scores=selectedFocusedScore, name=selectedFolder + ' focused')
    if save:
        plt.savefig('./plots/histograms/' + selectedFolder + '-' + fileName +
                    '_trend.png',
                    bbox_inches="tight")

    # loss comparison
    plt.figure(figsize=(20, 15))
    paper_fig_settings()
    lossComparison(name=selectedFolder + ' dense', results=selectedDenseLoss)
    lossComparison(name=selectedFolder + ' focused',
                   results=selectedFocusedLoss)
    print(f'Area Under dense Curve', trapz(selectedDenseLoss))
    print(f'Area Under focused Curve', trapz(selectedFocusedLoss))
    if save:
        plt.savefig('./plots/histograms/' + selectedFolder + '-' + fileName +
                    '_loss.png',
                    bbox_inches="tight")

    # t-test will applied according to our hypotesis
    # 	we will make an assumption according to area under curve
    # 	and will say like this focused nn learn to solve
    # 	the problem more fast, accurate and efficient
    statistic, pvalue = ttest_ind(selectedDenseScore, selectedFocusedScore)
    print('statistic: ', float(statistic))
    print('pvalue: ', float(pvalue))

    plt.show()
                             (1 - xi * xi) / (4 * np.cosh(gamma)) +
                             P_1 * kappa_p * kappa_p * np.sinh(kappa) *
                             (np.cosh(gamma * xi) / np.cosh(gamma) -
                              xi * np.sinh(kappa_p * xi) / np.sinh(kappa_p)) /
                             (2 * gamma * gamma))
                uy1 = 0.5 * (kappa * P_1 * np.sinh(kappa) /
                             (gamma * gamma)) * (
                                 np.sinh(kappa_p * xi) / np.sinh(kappa_p) -
                                 np.sinh(kappa * xi) / np.sinh(kappa))
                ux1 = 0.5 * ((P_1 * kappa * np.cosh(kappa) / (gamma * gamma)) *
                             (np.cosh(kappa * xi) / np.cosh(kappa) -
                              np.cosh(kappa_p * xi) / np.cosh(kappa_p)) +
                             (F0 * np.tanh(gamma) / (gamma)) *
                             (np.cosh(kappa_p * xi) / np.cosh(kappa_p) -
                              xi * np.sinh(gamma * xi) / np.sinh(gamma)))
                ux2 -= scpi.trapz(ux2, xi) / 2 + scpi.trapz(ux1, xi) / 4

                B0 = (Pe * F0 * np.tanh(gamma) /
                      (2 * gamma * (rho * rho - gamma * gamma))) * (
                          np.cosh(rho * xi) /
                          (rho * np.sinh(rho)) - np.cosh(gamma * xi) /
                          (gamma * np.sinh(gamma))) + Pe * F0 * np.tanh(
                              gamma) / (2 * gamma * gamma * gamma * rho * rho)
                B0_deriv = (Pe * F0 * np.tanh(gamma) /
                            (2 * gamma * (rho * rho - gamma * gamma))) * (
                                np.sinh(rho * xi) / np.sinh(rho) -
                                np.sinh(gamma * xi) / np.sinh(gamma))
                B0_deriv_deriv = (
                    Pe * F0 * np.tanh(gamma) /
                    (2 * gamma * (rho * rho - gamma * gamma))) * (
                        rho * np.cosh(rho * xi) / np.sinh(rho) -
Ejemplo n.º 45
0
def MaximumEntropy(p, tau, Gt):

    beta = tau[-1]

    random.seed(1)  # seed for random numbers

    if p.has_key('x0'):
        omega = GiveTanMesh(p['x0'], p['L'], p['Nw'])
    else:
        omega = linspace(-p['L'], p['L'], 2 * p['Nw'] + 1)
    dom = array([0.5 * (omega[1] - omega[0])] + [
        0.5 * (omega[i + 1] - omega[i - 1]) for i in range(1,
                                                           len(omega) - 1)
    ] + [0.5 * (omega[-1] - omega[-2])])

    fsg = 1
    if p['statistics'] == 'fermi':
        Gt = -Gt
        fsg = -1
        normalization = Gt[0] + Gt[-1]
        Ker = me.initker_fermion(omega, dom, beta, tau)
    elif p['statistics'] == 'bose':
        normalization = integrate.trapz(Gt, x=tau)
        Ker = me.initker_boson(omega, dom, beta, tau)

    print 'beta=', beta
    print 'normalization=', normalization

    # Set error
    if p['idg']:
        sxt = ones(len(tau)) / (p['deltag']**2)
    else:
        sxt = Gt * p['deltag']
        for i in range(len(sxt)):
            if sxt[i] < 1e-5: sxt[i] = 1e-5
        sxt = 1. / sxt**2

    # Set model
    if p['iflat'] == 0:
        model = normalization * ones(len(omega)) / sum(dom)
    elif p['iflat'] == 1:
        model = exp(-omega**2 / p['gwidth'])
        model *= normalization / dot(model, dom)
    else:
        dat = loadtxt('model.dat').transpose()
        fm = interpolate.interp1d(dat[0], dat[1])
        model = fm(omega)
        model *= normalization / dot(model, dom)
        #savetxt('brisi_test', vstack((tau, fsg*dot(model,Ker))).transpose())

    print 'Model normalization=', dot(model, dom)

    # Set starting Aw(omega)
    Aw = random.rand(len(omega))
    Aw = Aw * (normalization / dot(Aw, dom))
    print 'Aw normalization=', dot(Aw, dom)

    dlda = me.initdlda(omega, dom, Ker, sxt)

    temp = 10.
    rfac = 1.
    alpha = p['alpha0']

    for itt in range(p['Nitt']):
        print itt, 'Restarting maxent with rfac=', rfac, 'alpha=', alpha
        iseed = random.randint(0, maxint)

        me.maxent(Aw, rfac, alpha, temp, Ker, sxt, Gt, model, dom, p['Asteps'],
                  iseed)
        S = me.entropy(Aw, model, dom)
        Trc = me.lambdac(alpha, Aw, omega, dom, dlda)

        ratio = -2 * S * alpha / Trc
        print 'Finished maxent with alpha=', alpha, '-2*alpha*S=', -2 * alpha * S, 'Trace=', Trc
        print '   ratio=', ratio

        savetxt('dos_' + str(itt), vstack((omega, Aw)).transpose())
        temp = 0.001
        rfac = 0.05

        if abs(ratio - 1) < p['min_ratio']: break

        if (abs(ratio) < 0.05):
            alpha *= 0.5
        else:
            alpha *= (1. + 0.001 * (random.rand() - 0.5)) / ratio

    for itt in range(p['Nr']):
        print 'Smoothing itt ', itt
        Aw = Broad(p['bwdth'], omega, Aw)
        Aw *= (normalization / dot(Aw, dom))  # Normalizing Aw

        savetxt('dos_' + str(p['Nitt']), vstack((omega, Aw)).transpose())

        temp = 0.005
        rfac = 0.005
        iseed = random.randint(0, maxint)
        me.maxent(Aw, rfac, alpha, temp, Ker, sxt, Gt, model, dom, p['Asteps'],
                  iseed)

        S = me.entropy(Aw, model, dom)
        Trc = me.lambdac(alpha, Aw, omega, dom, dlda)
        ratio = -2 * S * alpha / Trc
        print 'Finished smoothing run with alpha=', alpha, '-2*alpha*S=', -2 * alpha * S, 'Trace=', Trc
        print '   ratio=', ratio

    savetxt('gtn', vstack((tau, fsg * dot(Aw, Ker))).transpose())
    Aw = Broad(p['bwdth'], omega, Aw)
    savetxt('dos.out', vstack((omega, Aw)).transpose())
    return (Aw, omega)
Ejemplo n.º 46
0
def gen_fine_clic(p_angles_fine,xi_r,xi_t,dt_t,rs,dr_n,s_model,model,depth,inv_f,par,tcut):
    clic_theta_grid = 200
    clic_theta = np.linspace(0,180,clic_theta_grid)
    global clic_xi_r,clic_xi_t,clic_dt_t
    
    clic_xi_r = np.zeros((depth,clic_theta_grid))
    clic_xi_t = np.zeros((depth,clic_theta_grid))
    clic_dt_t = np.zeros((depth,clic_theta_grid))
    clic_rs = np.zeros((depth,clic_theta_grid))
    if tcut:
        cut=True
        print "--- CUT ---"
    else:
        cut=False
    for d in range(depth):
        if cut:
            xi_r[d,0:12] = 1.*xi_r[d,12]
            xi_t[d,0:12] = 1.*xi_t[d,12]
            dt_t[d,0:12] = 1.*dt_t[d,12]
            rs[d,0:12] = 1.*rs[d,12]
            if par=="EVEN":
                xi_r[d,-9::] = 1.*xi_r[d,-10]
                xi_t[d,-9::] = 1.*xi_t[d,-10]
                dt_t[d,-9::] = 1.*dt_t[d,-10]
                rs[d,-9::] = 1.*rs[d,-10]
        clic_xi_r[d,0:100] = np.interp(clic_theta[0:100],np.linspace(0,90,100),xi_r[d,:])
        clic_xi_t[d,0:100] = np.interp(clic_theta[0:100],np.linspace(0,90,100),xi_t[d,:])
        clic_dt_t[d,0:100] = np.interp(clic_theta[0:100],np.linspace(0,90,100),dt_t[d,:])
        clic_rs[d,0:100] = np.interp(clic_theta[0:100],np.linspace(0,90,100),rs[d,:])
    
    
    for i in range(depth):
        if par=="EVEN":
            clic_xi_r[i,100::] = clic_xi_r[i,::-1][100::]
            clic_xi_t[i,100::] = clic_xi_t[i,::-1][100::]
            clic_dt_t[i,100::] = clic_dt_t[i,::-1][100::]
        else:
            clic_xi_r[i,100::] = -1.*clic_xi_r[i,::-1][100::]
            clic_xi_t[i,100::] = -1.*clic_xi_t[i,::-1][100::]
            clic_dt_t[i,100::] = -1.*clic_dt_t[i,::-1][100::]
            
        clic_rs[i,100::] = clic_rs[i,::-1][100::]
            

    #plt.plot(clic_theta[0:200],clic_rs[-2,0:200])

    ############ find the perturbed models:
    # Take static ROTORC model to CLIC grid:
    global s_model_c
    s_model_c = np.zeros((clic_theta_grid,len(s_model[0,:])))
    s_model_c[:,0] = clic_theta
    for i in range(1,len(s_model[0,:])):
        s_model_c[0:100,i] = np.interp(clic_theta[0:100],s_model[:,0],s_model[:,i])
        s_model_c[100::,i] = s_model_c[::-1,i][100::]

    G = 6.67259e-8
    mass = (float((model[0]).replace("p",".")))*1.99e33
    theta = np.deg2rad(s_model_c[:,0])
    radius = s_model_c[:,1]*6.96e10
    vrot = s_model_c[:,4]*1e5
    omega = vrot/(radius*np.sin(theta))
    omega = np.average(omega[50:-50])
    

    a_r = scint.trapz(clic_dt_t[-1,:])
    
#    inv_f = 1.
#    if a_r<0:
#        print "fine -T area"
#        inv_f = -1.    
        
    global pmodels_fine
    pert_r = np.empty((len(s_model_c[:,0]),depth))
    pert_t = np.empty((len(s_model_c[:,0]),depth))
    pmodels_fine = []
    for i in range(depth):
        #pert_r[:,i] = s_model_c[:,1]*(1.+inv_f*clic_xi_r[-i,:])
        pert_r[:,i] = s_model_c[:,1]+inv_f*clic_rs[-i,:]*clic_xi_r[-i,:]
        pert_t[:,i] = s_model_c[:,2]*(1.+inv_f*clic_dt_t[-i,:])
        for j in range(len(pert_t[:,i])):
            if pert_t[j,i]<7500.: pert_t[j,i]=7500.
        g_pert = (G*mass/((pert_r[:,i]*6.96e10)**2))-((omega**2)*((pert_r[:,i]*6.96e10)*np.sin(theta)))*np.sin(theta)
        for j in range(len(g_pert)):
            if np.log10(g_pert[j])>4.33: g_pert[j]=10**(4.33)
        tmodel = 1.*s_model_c
        """
        pert_r[0:11,:] = pert_r[11,:]
        pert_r[190:200,:] = pert_r[189,:]
        
        pert_t[0:11,:] = pert_t[11,:]
        pert_t[190:200,:] = pert_t[189,:]
        """
        tmodel[:,1] = pert_r[:,i]
        tmodel[:,2] = pert_t[:,i]
        tmodel[:,3] = np.log10(g_pert)
        pmodels_fine.append(tmodel)
        
    #plt.plot(clic_dt_t[0,:])
    return pmodels_fine
Ejemplo n.º 47
0
def MaximumEntropyTest(p, tau, Gt):
    def MEStep(alpha,
               rfac,
               Aw,
               temp,
               Ker,
               sxt,
               Gt,
               model,
               f0,
               Asteps,
               itt,
               reset=True):
        if (reset):
            temp = 0.001
            rfac = 0.05
        print 'Restarting maxent with rfac=', rfac, 'alpha=', alpha
        iseed = random.randint(0, maxint)
        me.maxent(Aw, rfac, alpha, temp, Ker, sxt, Gt, model, f0, Asteps,
                  iseed)
        S = me.entropy(Aw, model, f0)
        Trc = me.lambdac(alpha, Aw, omega, dlda)
        ratio = -2 * S * alpha / Trc
        print 'Finished maxent with alpha=', alpha, '-2*alpha*S=', -2 * alpha * S, 'Trace=', Trc
        print '   ratio=', ratio
        savetxt('dos_' + str(itt), vstack((omega, Aw)).transpose())
        return ratio

    beta = tau[-1]

    random.seed(1)  # seed for random numbers

    omega = linspace(-p['L'], p['L'], 2 * p['Nw'] + 1)
    f0, f1, f2 = me.initf0(omega)
    fsg = 1
    if p['statistics'] == 'fermi':
        Gt = -Gt
        fsg = -1
        normalization = Gt[0] + Gt[-1]
        Ker = me.initker_fermion(omega, beta, tau)
    elif p['statistics'] == 'bose':
        normalization = integrate.trapz(Gt, x=tau)
        Ker = me.initker_boson(omega, beta, tau)

    print 'beta=', beta
    print 'normalization=', normalization

    # Set error
    if p['idg']:
        sxt = ones(len(tau)) / (p['deltag']**2)
    else:
        sxt = Gt * p['deltag']
        for i in range(len(sxt)):
            if sxt[i] < 1e-5: sxt[i] = 1e-5
        sxt = 1. / sxt**2

    # Set model
    if p['iflat'] == 0:
        model = normalization * ones(len(omega)) / sum(f0)
    elif p['iflat'] == 1:
        model = exp(-omega**2 / p['gwidth'])
        model *= normalization / dot(model, f0)
    else:
        dat = loadtxt('model.dat').transpose()
        fm = interpolate.interp1d(dat[0], dat[1])
        model = fm(omega)
        model *= normalization / dot(model, f0)
        #savetxt('brisi_test', vstack((tau, fsg*dot(model,Ker))).transpose())

    print 'Model normalization=', dot(model, f0)

    # Set starting Aw(omega)
    Aw = random.rand(len(omega))
    Aw = Aw * (normalization / dot(Aw, f0))
    print 'Aw normalization=', dot(Aw, f0)

    dlda = me.initdlda(omega, Ker, sxt)

    temp = 10.
    rfac = 1.
    alpha = p['alpha0']

    for itt in range(10):
        ratio = MEStep(alpha, rfac, Aw, temp, Ker, sxt, Gt, model, f0,
                       p['Asteps'], itt, itt != 0)
        if abs(ratio - 1) < p['min_ratio']: break
        if (ratio < 0.05):
            if ratio > 0:
                alpha *= 1.1
            else:
                alpha /= 10.
        else:
            alpha *= (1. + 0.001 * (random.rand() - 0.5)) / ratio

    if abs(ratio - 1) > 2 * p['min_ratio']:
        alpha = 1.
        p['Asteps'] *= 1.5
        for itt in range(3, 6):
            ratio = MEStep(alpha, rfac, Aw, temp, Ker, sxt, Gt, model, f0,
                           p['Asteps'], itt)

    for itt in range(p['Nr']):
        print 'Smoothing itt ', itt
        Aw = Broad(p['bwdth'], omega, Aw)
        Aw *= (normalization / dot(Aw, f0))  # Normalizing Aw
        ratio = MEStep(alpha, rfac, Aw, temp, Ker, sxt, Gt, model, f0,
                       p['Asteps'], p['Nitt'] + itt)

    savetxt('gtn', vstack((tau, fsg * dot(Aw, Ker))).transpose())
    Aw = Broad(p['bwdth'], omega, Aw)
    savetxt('dos.out', vstack((omega, Aw)).transpose())
    return (Aw, omega)
Ejemplo n.º 48
0
def run_visc_pert(model,vel,mode,par,sigma,reese,force_f,minL,phase=0.,ampl=1.,tlmax=0.,tcut=False):
    # Info for del dot xi calculation:------------------
    # NRO Mode filename:
    #modefname="MODE1"
    global kind
    
    norm_f = True
    ssc = 0.02
    if model[0]=="2p5": ssc = 0.01
    if kind=="g":
        scale = ssc
    else:
        scale = ssc/(sigma**2)
    scale =0.001*drtempmax
    depth = 10 #radial zones down from the surface
    forcenro = False
    #---------------------------------------------------
    
    #clic_run = True
    
    
    v = find_vel(model,vel)
    folder = homedir+"ROTORCmodels/"+par+"/M"+model[0]+"_V"+v+"/"
    
    rotorc_f = homedir+"From_Bob/Delta_Scuti_2010/"+model[0]+"Msun/"+model[0]+"Msun_V"+v+"/"
    
    bob_bin = homedir+"From_Bob/clotho_disc10_bin/"
    
    static_m = homedir+"ROTORCmodels/visibilities/"
    
    
    
    
    #check if visibility file from pulset exits for the selected model:
    
    #v_file = glob.glob(rotorc_f+"visibility_file")
    
    
    ###### FULL MODE FILE GENERATION:
    
    temp_freqs = np.genfromtxt(folder+"temp_freqs")
    tfreq = temp_freqs[mode-1]
    if force_f==True:
        tfreq = sigma
        print sigma
    #tfreq = 1.59692
    #print pyNRO.run_nro(tfreq,folder,model[0],v,par,mode)
    
    
    
    where =static_m+model[0]+'Msun/V'+v+"/MODE_"+par+"_"+str(mode)
    
    if not os.path.exists(static_m+model[0]+'Msun/'+'V'+v):
        os.makedirs(static_m+model[0]+'Msun/'+'V'+v)
        
    if not os.path.exists(static_m+model[0]+'Msun/'+'V'+v+"/MODE_"+par+"_"+str(mode)):
        os.makedirs(where)
    
    if os.path.isfile(where+'/MODE_'+par+'_'+str(mode))==False or forcenro==True:
        #check if visibility file from pulset exits for the selected model:
        
        #v_file = glob.glob(rotorc_f+"visibility_file")
        if True:
            os.chdir(rotorc_f)
            r_mod_name = glob.glob("*_ZAMS")
            subprocess.call(["cp",r_mod_name[0],"orcmod_pulset"])
            subprocess.call([bob_bin+"pulsetnonadb.exe"])
            print "Generated visibility_file in "+rotorc_f
            subprocess.call([bob_bin+"pulset_gammas.exe"]) #Generates Cmod with gammas
            subprocess.call(["cp","Cmod",folder+"Dmod_"+model[0]+"M_V"+v])
            print "Generated new Dmod"
            #print(glob.glob("*_ZAMS"))
            os.chdir(vis_path)
        
        print "Dmod with GAMMAS generated!"
        
        nro_stat = 1
        idx_iter = 0
        while nro_stat==1:
        #RUN NRO!
            nro_stat = pyNRO.run_nro(tfreq-idx_iter*1e-4,folder,model[0],v,par,mode,homedir)
            idx_iter += 1

            
        subprocess.call(['mv',folder+'MODE_'+par+'_'+str(mode),where+'/MODE_'+par+'_'+str(mode)])
        print "Mode file generation complete!"
    else:
        print "Mode file found! Not running NRO..."


    
    
    ###### del_DOT_xi>
    #modeloc = static_m+model[0]+'Msun/V'+vels[vel]+"/"
    modeloc = where+"/"
    modefname = 'MODE_'+par+'_'+str(mode)
    #modefname = 'MODE13'
    global s_model
    s_model = np.genfromtxt(glob.glob(static_m+model[0]+'Msun/'+model[0]+'Msun_V'+v+"*")[0])
    
    global xi_r_rot,xi_t_rot,dt_t_rot,zg_rot    
    global xi_r,xi_t,dt_t,zg,r,zp,cs,xi_dot_g
    global xi_r_n,xi_t_n,dt_t_n,zg_n
    
    xi_r,xi_t,dt_t,zg,r,zp,sig,cs,xi_dot_g = ddxi.calcdeldotxi(par,model,vel,modeloc,modefname)
            
    xi_r_n,xi_t_n,dt_t_n,zg_n,dr_n = ddxi.norm_and_scale(xi_r,xi_t,dt_t,r,zg,norm_f,scale,depth,reese,sig,par)

    if phase!=0:
        #dt_t_n = phaser(dt_t_n,phase,1.,tlmax)
        #xi_r_n = phaser(xi_r_n,0.,1.,tlmax)
        dt_t_n,psi_T,psi_L,mx = phaser2(dt_t_n,phase,np.max(np.abs(dt_t[-depth])),np.max(np.abs(xi_r[-depth])))
        xi_r_n = phaser(xi_r_n,0.,1.,mx+np.pi)
        print "Theta L_max-----------> ",np.rad2deg(mx),"deg"
        np.savetxt(modeloc+"phases.txt",[psi_T,psi_L,np.rad2deg(mx+np.pi)],header="psi_T[deg],psi_L[deg],theta_max_L[deg]")
        
        

    a_r = scint.trapz(dt_t_n[-1,:])
    
    inv_f = 1.
    if a_r<0:
        print "-T area"
        #xi_r_n *= -1.
        #xi_t_n *= -1.
        #dt_t_n *= -1.
        #zg_n *= -1.
        inv_f = -1.
        
    if minL==True:
        inv_f *= -1.

    #xi_r_rot,xi_t_rot,dt_t_rot,zg_rot = ddxi.to_rotorc(xi_r_n,xi_t_n,dt_t_n,zg_n)
    
    if par == "EVEN":
        xi_r_fine = lint.leg_interp(xi_r_n[:,:],8,"EVEN")
        xi_t_fine = lint.leg_interp(xi_t_n[:,:],8,"EVEN")
        dt_t_fine = lint.leg_interp(dt_t_n[:,:],8,"EVEN")
    else:
        xi_r_fine = lint.leg_interp(xi_r_n[:,:],8,"OE")
        xi_t_fine = lint.leg_interp(xi_t_n[:,:],8,"OE")
        dt_t_fine = lint.leg_interp(dt_t_n[:,:],8,"OE")
        
    global rs_n,rs_rot
    rs_n = np.empty(xi_r_fine.shape)
    for d in range(depth):
        rs_n[d,:] = np.interp(np.linspace(0,90,100),np.linspace(10,80,8),r[-d-1,:])
    
#    dr_n = xi_r_fine*rs_n
#    for i in range(len(dr_n[:,0])):
#        imax = np.argmax(np.abs(dr_n[i,:]))
#        dr_n[i,:] = dr_n[i,:]/(dr_n[i,imax]/2.28534026)
        
    
    
    p_angles = np.empty(np.shape(xi_r_n))
    p_angles_fine = np.empty(np.shape(xi_r_fine))
    rot_ang = np.arange(4.5,90.,9)
    xi_r_rot = np.empty((depth,len(rot_ang)))
    xi_t_rot = np.empty((depth,len(rot_ang)))
    dt_t_rot = np.empty((depth,len(rot_ang)))
    rs_rot = np.empty((depth,len(rot_ang)))
    for d in range(depth):
        p_angles[d,:] = (np.linspace(10,80,8))*(1.+xi_t_n[d,:])
        p_angles_fine[d,:] = (np.linspace(0,90,100))*(1.+xi_t_fine[d,:])
        xi_r_rot[d,:] = np.interp(rot_ang,p_angles_fine[d,:],xi_r_fine[d,:]) 
        xi_t_rot[d,:] = np.interp(rot_ang,p_angles_fine[d,:],xi_t_fine[d,:])
        dt_t_rot[d,:] = np.interp(rot_ang,p_angles_fine[d,:],dt_t_fine[d,:])
        rs_rot[d,:] = np.interp(rot_ang,p_angles_fine[d,:],rs_n[d,:])
        
    
    
#    
#    #plt.plot(p_angles_fine[2,:],xi_t_fine[2,:])
#    if dr_n[0,-1]>0:
#        ivrt = 1.
#    else:
#        ivrt = -1.
#    vr = ivrt*dr_n[0,:]
#    #vr = ivrt*dt_t_fine[0,:]
#    #lblb = find_vel(model,vel)+" km s$^{-1}$ "+find_name(model,vel,par,mode)
#    lblb = find_vel(model,vel)+" km s$^{-1}$ "
#    #lblb = r"$\ell$ = "+find_name(model,vel,par,mode).split()[0]
#    etsy = "-"
#    if find_name(model,vel,par,mode).split()[0] == "2": etsy = "-"
#    if par == "EVEN":
#        #plt.plot(p_angles_fine[2,:],ivrt*lint.leg_interp(vr[:,:],8,"EVEN")[0,:],label=lblb)
#        plt.plot(p_angles_fine[2,:],vr,ls=etsy,label=lblb)
#        
#    else:
#        #plt.plot(p_angles_fine[2,:],ivrt*lint.leg_interp(vr[:,:],8,"OE")[0,:],label=lblb)
#        plt.plot(p_angles_fine[2,:],vr,ls=etsy,label=lblb)
#    #plt.plot(p_angles_fine[2,:],lint.leg_interp(xi_r[-depth::,:],8,"OE")[2,:],label=find_name(model,vel,par,mode))
#    #plt.plot(p_angles[2,:],dt_t[-depth+2,:],"o",mfc="white",mec="k",mew=1)
#    #plt.grid()
#    plt.xlim(0,90)
#    plt.yticks()
#    plt.xlabel("Colatitude [deg]")
#    plt.ylabel(r"$\delta$R [R$\odot$]")
#    #plt.ylabel(r"$\delta$T/T")
#    plt.legend(loc="best")
#    #plt.title("Mode:" + find_name(model,vel,par,mode))
#    #ax.yaxis.set_major_formatter(majorFormatter) 
#    plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
#    #plt.title(r"M="+model[0]+"M$_{\odot}$, V="+v+"km s$^{-1}$")
#    plt.title(r"M="+model[0]+"M$_{\odot}$")
    
       
    
    print "Generating fine clic model..."    
    pmodels_fine = gen_fine_clic(p_angles_fine,xi_r_fine,xi_t_fine,dt_t_fine,rs_n,dr_n,s_model,model,depth,inv_f,par,tcut)

        
    ############ find the perturbed models:
    global omega
    G = 6.67259e-8
    mass = (float((model[0]).replace("p",".")))*1.99e33
    theta = np.deg2rad(s_model[:,0])
    radius = s_model[:,1]*6.96e10
    vrot = s_model[:,4]*1e5
    omega = vrot/(radius*np.sin(theta))
    omega = np.average(omega[1:-1])
    

    
    #g_r = (G*mass/(radius**2))-((omega**2)*(radius*np.sin(theta)))*np.sin(theta)
    #g_theta = ((omega**2)*(radius*np.sin(theta)))*np.cos(theta)

    #geff = (g_r**2 + g_theta**2)**0.5        
        
    
    pert_r = np.empty((len(s_model[:,0]),depth))
    pert_t = np.empty((len(s_model[:,0]),depth))
    pmodels = []
    for i in range(depth):
        pert_r[:,i] = s_model[:,1]*(1.+inv_f*xi_r_rot[-i,:])
        pert_t[:,i] = s_model[:,2]*(1.+inv_f*dt_t_rot[-i,:])
        g_pert = (G*mass/((pert_r[:,i]*6.96e10)**2))-((omega**2)*((pert_r[:,i]*6.96e10)*np.sin(theta)))*np.sin(theta)
        for j in range(len(g_pert)):
            if np.log10(g_pert[j])>4.33: g_pert[j]=10**(4.33)
        tmodel = 1.*s_model
        tmodel[:,1] = pert_r[:,i]
        tmodel[:,2] = pert_t[:,i]
        tmodel[:,3] = np.log10(g_pert)
        pmodels.append(tmodel)

    if par=="ODD":
        pert_r = np.empty((2*len(s_model[:,0]),depth))
        pert_t = np.empty((2*len(s_model[:,0]),depth))
        
        pmodels = []
        odd_angles = np.arange(4.5,180.,9)
        for i in range(depth):
            pert_r[0:10,i] = s_model[:,1]+inv_f*xi_r_rot[-i,:]*rs_rot[-i,:]
            pert_t[0:10,i] = s_model[:,2]*(1.+inv_f*dt_t_rot[-i,:])
            pert_r[10:20,i] = s_model[::-1,1]*(1.-inv_f*xi_r_rot[-i,::-1])
            pert_t[10:20,i] = s_model[::-1,2]*(1.-inv_f*dt_t_rot[-i,::-1])
            g_pert = (G*mass/((pert_r[:,i]*6.96e10)**2))-((omega**2)*((pert_r[:,i]*6.96e10)*np.sin(odd_angles)))*np.sin(odd_angles)
            for j in range(len(g_pert)):
                if np.log10(g_pert[j])>4.33: g_pert[j]=10**(4.33)
            tmodel = np.empty((20,5))
            tmodel[:,0] = odd_angles[:]
            tmodel[:,1] = pert_r[:,i]
            tmodel[:,2] = pert_t[:,i]
            #tmodel[0:10,3] = s_model[:,3]
            #tmodel[10:20,3] = s_model[::-1,3]
            tmodel[:,3] = np.log10(g_pert)
            tmodel[0:10,4] = s_model[:,4]
            tmodel[10:20,4] = s_model[::-1,4]
                
            pmodels.append(tmodel)
    
#    plt.plot(pmodels[-1][:,0],s_model[:,1],"--",color="0.5")
#    
#    global old_modes
#    import seaborn as sns
#    sns.set(style="white",rc={"figure.figsize": (8, 8),'axes.labelsize': 16,
#                              'ytick.labelsize': 12,'xtick.labelsize': 12,
#                              'legend.fontsize': 16,'axes.titlesize':18,'font.size':14})
#    #plt.plot(pmodels[2][0:-1,0],np.diff(pmodels[2][:,1])/(pmodels[2][1,0]-pmodels[2][0,0]),label=r"$\ell=$"+find_name(model,vel,par,mode).strip().split()[0])
#    #plt.plot(pmodels[2][:,0],pmodels[2][:,1],label="old way")
#    #plt.plot(pmodels_fine[2][0:-1,0],np.diff(pmodels_fine[2][:,1])/(pmodels_fine[2][1,0]-pmodels_fine[2][0,0]))
#    plt.plot(pmodels_fine[2][:,0],pmodels_fine[2][:,2],"-",lw=1.5,label=r"$\ell=$"+find_name(model,vel,par,mode).strip().split()[0])
#    #plt.plot(s_model[:,0],s_model[:,1],label="Static")
#    #plt.vlines(90,9100,9300)
#    o_lims = plt.ylim()
#    #plt.vlines(90,min(pmodels_fine[2][:,2])-100,max(pmodels_fine[2][:,2])+100)
#    plt.grid()
#    plt.xlim(0,180)
#    #plt.ylim(o_lims[0],o_lims[1])
#    plt.xlabel("Colatitude [deg]")
#    plt.ylabel("Radius [M$_{\odot}$]")
#    #plt.ylabel("Temperature [K]")
#    plt.legend(loc="best")
#    #plt.title("Mode: " + find_name(model,vel,par,mode))
    #plt.title(r"Perturbed T$_{\mathrm{eff}}$ - M="+model[0]+"M$_{\odot}$, V="+v+"km s$^{-1}$")
    
    
    return modeloc,pmodels,pmodels_fine
Ejemplo n.º 49
0
def write_and_plot(sample_dict, path):
    CSV_PATH, PLOT_PATH, RAW_CSV_DATA = get_output(path)
    # Merge all the sample data
    sample_families = list(sample_dict.keys())
    composite_data = vivdict()
    integral_data = []
    VOLUME = 1.2
    VOLUME_CALCULATED = VOLUME / (60 * 1000)

    for family in sample_families:
        for method, _ in sample_dict[family].items():
            log.info(f"Working on {method}")
            composite_data[family][method] = cd = pd.concat(
                sample_dict[family][method], axis=1
            ).apply(lambda g: pd.Series.interpolate(g, method="cubic"))
            ppm_calculated_once = cd.mul(10000 * 1.9378).droplevel(axis=1, level=1)
            # Integral
            ppm = pd.DataFrame(ppm_calculated_once[0:150])
            ppm.reset_index(inplace=True)
            ppm["Volume"] = ppm["Time"].apply(lambda x: x * VOLUME_CALCULATED)
            ppm.set_index("Volume", inplace=True)
            ppm = ppm.drop(columns=["Time"], errors="ignore")
            row_integral = (
                ppm.ewm(span=5)
                .mean()
                .apply(lambda g: integrate.trapz(x=g.index, y=g.values))
            )
            row_integral_std = row_integral.std()
            integral = pd.DataFrame(
                {"Integral": row_integral.mean(), "STD": row_integral_std},
                index=[f"{family}_{method}"],
            )
            integral["lower"] = row_integral.mean() - row_integral_std
            integral["upper"] = row_integral.mean() + row_integral_std
            integral_data.append(integral)

            # Mean Data
            cd_mean = cd.mean(axis=1).reset_index()
            cd_mean.columns = ["Time", "CO2"]
            cd_mean.to_csv(
                os.path.join(CSV_PATH, f"{family}_{method}.csv"), sep=",", index=False
            )

            # Mean/STD Data
            source_data = pd.DataFrame()
            source_data["mean"] = ppm_calculated_once.mean(axis=1)
            source_data["std"] = ppm_calculated_once.std(axis=1)
            source_data["lower"] = source_data["mean"] - source_data["std"]
            source_data["upper"] = source_data["mean"] + source_data["std"]

            # Graphing our data
            output_file(os.path.join(PLOT_PATH, f"{family}-{method}.html"))
            source_data = source_data.reset_index().rename(columns={"index": "Time"})
            source = ColumnDataSource(source_data)
            p = figure(
                title=f"{family} {method}",
                x_axis_label="Time",
                y_axis_label="CO2 Release",
                background_fill_color="#efefef",
                toolbar_location=None,
            )
            p.line(
                source=source, x="Time", y="mean",
            )
            band = Band(
                base="Time",
                lower="lower",
                upper="upper",
                source=source,
                level="underlay",
                fill_alpha=1.0,
                line_width=1,
                line_color="black",
            )
            p.add_layout(band)
            p.title.text = f"{family} {method}"
            p.xgrid[0].grid_line_color = None
            p.ygrid[0].grid_line_alpha = 0.5
            p.xaxis.axis_label = "Time"
            p.yaxis.axis_label = "CO2 (PPM)"
            p.y_range.start = source_data["mean"].min() - source_data["std"].max() / 6
            p.y_range.end = source_data["mean"].max() + source_data["std"].max()
            p.ygrid.band_fill_alpha = 0.1
            p.ygrid.band_fill_color = "#C0C0C0"
            p.add_tools(
                HoverTool(tooltips=[("Value", "@mean"), ("STD", "@std")], mode="vline")
            )

            show(p)

    integral_df = pd.concat(integral_data).reset_index()
    groups = integral_df["index"]
    output_file(os.path.join(PLOT_PATH, f"Integral Data.html"))
    source = ColumnDataSource(integral_df)
    p = figure(
        x_range=groups,
        toolbar_location=None,
        title="CO2 Integral",
        background_fill_color="#efefef",
        y_axis_label="CO2 (mg)",
        tools="tap",
    )
    p.circle(
        x="index",
        y="Integral",
        color="red",
        fill_alpha=0.4,
        line_color="firebrick",
        line_alpha=1.0,
        size=10,
        source=source,
        selection_color="firebrick",
        nonselection_fill_alpha=0.2,
        nonselection_fill_color="firebrick",
        nonselection_line_color="blue",
        nonselection_line_alpha=1.0,
    )

    p.add_layout(
        Whisker(
            source=source, base="index", upper="upper", lower="lower", level="overlay"
        )
    )
    p.xaxis.major_label_orientation = "vertical"
    p.y_range.start = integral_df["Integral"].min() - integral_df["STD"].max() * 1.2
    p.y_range.end = integral_df["Integral"].max() + integral_df["STD"].max() * 1.2
    hover = HoverTool()
    hover.tooltips = [("Sample", "@index"), ("Value", "@Integral"), ("STD", "@STD")]
    hover.mode = "vline"
    p.add_tools(hover)
    show(p)
Ejemplo n.º 50
0
 def _get_y(t, rho, gamma_eval):
     if t < 0:
         return 0
     else:
         return integrate.trapz(rho * np.exp(-t * gamma_eval), gamma_eval)
Ejemplo n.º 51
0
def eval_fakefeat_GZSL(it, netG, dataset, param, result):
    gen_feat = np.zeros([0, param.X_dim])
    for i in range(dataset.train_cls_num):
        text_feat = np.tile(dataset.train_text_feature[i].astype('float32'),
                            (opt.nSample, 1))
        text_feat = Variable(torch.from_numpy(text_feat)).cuda()
        z = Variable(torch.randn(opt.nSample, param.z_dim)).cuda()
        G_sample, _ = netG(z, text_feat)
        gen_feat = np.vstack((gen_feat, G_sample.data.cpu().numpy()))

    for i in range(dataset.test_cls_num):
        text_feat = np.tile(dataset.test_text_feature[i].astype('float32'),
                            (opt.nSample, 1))
        text_feat = Variable(torch.from_numpy(text_feat)).cuda()
        z = Variable(torch.randn(opt.nSample, param.z_dim)).cuda()
        G_sample, _ = netG(z, text_feat)
        gen_feat = np.vstack((gen_feat, G_sample.data.cpu().numpy()))

    visual_pivots = [gen_feat[i * opt.nSample:(i + 1) * opt.nSample].mean(0) \
                     for i in range(dataset.train_cls_num + dataset.test_cls_num)]
    visual_pivots = np.vstack(visual_pivots)
    """collect points for gzsl curve"""

    acc_S_T_list, acc_U_T_list = list(), list()
    seen_sim = cosine_similarity(dataset.pfc_feat_data_train, visual_pivots)
    unseen_sim = cosine_similarity(dataset.pfc_feat_data_test, visual_pivots)
    for GZSL_lambda in np.arange(-2, 2, 0.01):
        tmp_seen_sim = copy.deepcopy(seen_sim)
        tmp_seen_sim[:, dataset.train_cls_num:] += GZSL_lambda
        pred_lbl = np.argmax(tmp_seen_sim, axis=1)
        acc_S_T_list.append(
            (pred_lbl == np.asarray(dataset.labels_train)).mean())

        tmp_unseen_sim = copy.deepcopy(unseen_sim)
        tmp_unseen_sim[:, dataset.train_cls_num:] += GZSL_lambda
        pred_lbl = np.argmax(tmp_unseen_sim, axis=1)
        acc_U_T_list.append((pred_lbl == (np.asarray(dataset.labels_test) +
                                          dataset.train_cls_num)).mean())

    auc_score = integrate.trapz(y=acc_S_T_list, x=acc_U_T_list)

    result.acc_list += [auc_score]
    result.iter_list += [it]
    result.save_model = False
    if auc_score > result.best_acc:
        result.best_acc = auc_score
        result.best_iter = it
        result.save_model = True

    log_text = "AUC Score is {:.4}".format(auc_score)
    print(log_text)

    exp_info = 'NAB_EASY' if opt.splitmode == 'easy' else 'NAB_HARD'
    exp_params = 'Eu{}_Rls{}_RWz{}'.format(opt.CENT_LAMBDA, opt.REG_W_LAMBDA,
                                           opt.REG_Wz_LAMBDA)

    out_subdir = 'out_' + str(opt.epsilon) + '/{:s}/{:s}'.format(
        exp_info, exp_params)
    log_dir = out_subdir + '/log_{:s}.txt'.format(exp_info)

    with open(log_dir, 'a') as f:
        f.write(log_text + '\n')
Ejemplo n.º 52
0
def bremsstrahlung_thin_target(photon_energies, ele_dist_type='bkpl', ele_dist_params=None, efd=True):
    """
    Computes the thin-target bremsstrahlung x-ray/gamma-ray spectrum from an isotropic electron
    distribution function provided in `broken_powerlaw`. The units of the computed flux is photons
    per second per keV per square centimeter.

    The electron flux distribution function is a double power law in electron energy with a
    low-energy cutoff and a high-energy cutoff.

    Parameters
    ----------
    photon_energies : `numpy.array`
        Array of photon energies to evaluate flux at
    ele_dist_type: str
        name of the electron distribution function
        - "bkpl": broken powerlaw
        - "kappa": kappa distribution
        - "discrete": discrete distribution given by an array of electron energy "E_ele"
         and differential electron distribution "dn/dE"
    ele_dist_params: ele_dist_params of the specific distribution
        - for powerlaw 'pl', params = (p, eelow, eehigh) (see below)
        - for broken powerlaw 'bkpl', params = (p, q, eelow, eebrk, eehigh)
            p   : float
                Slope below the break energy
            q   : float
                Slope above the break energy
            eelow : float
                Low energy electron cut off
            eebrk : float
                Break energy
            eehigh : float
                High energy electron cut off
        - for kappa, params (not implemented yet)
        - for discrete, params  = (electron_energy, electron_dist)
            electron_energy: np.array of electron energies. Unit: keV
            electron_dist: np.array of differential electron distribution at the given electron energy.
    efd : `bool`
        True (default) - input electron distribution is electron flux density distribution
        (unit electrons cm^-2 s^-1 keV^-1),
        False - input electron distribution is electron density distribution.
        (unit electrons cm^-3 keV^-1),
        This input is not used in the main routine, but is passed to brm2_dmlin and Brm2_Fthin

    Returns
    -------
    flux: `numpy.array`
        Multiplying the output of Brm2_ThinTarget by a0 gives an array of
        photon fluxes in photons s^-1 keV^-1 cm^-2, corresponding to the photon energies in the
        input array eph. The detector is assumed to be 1 AU rom the source. The coefficient a0 is
        calculated as a0 = nth * V * nnth, where nth: plasma density; cm^-3) V:
        volume of source; cm^3) nnth: Integrated nonthermal electron flux density (cm^-2 s^-1), if
        efd = True, or Integrated electron number density (cm^-3), if efd = False

    Notes
    -----
    If you want to plot the derivative of the flux, or the spectral index of the photon spectrum as
    a function of photon energy, you should set RERR to 1.d-6, because it is more sensitive to RERR
    than the flux.

    Adapted from SSW `Brm2_ThinTarget
    <https://hesperia.gsfc.nasa.gov/ssw/packages/xray/idl/brm2/brm2_thintarget.pro>`_
    """
    mc2 = const.get_constant('mc2')
    clight = const.get_constant('clight')
    au = const.get_constant('au')
    # Numerical coefficient for photo flux
    fcoeff = (clight / (4 * np.pi * au ** 2)) / mc2 ** 2.

    if ele_dist_type == 'bkpl':
        p = ele_dist_params[0]
        q = ele_dist_params[1]
        eelow = ele_dist_params[2]
        eebrk = ele_dist_params[3]
        eehigh = ele_dist_params[4]

        # Max number of points
        maxfcn = 2048
        # Average atomic number
        z = 1.2
        # Relative error
        rerr = 1e-4

        # Create arrays for the photon flux and error flags.
        flux = np.zeros_like(photon_energies, dtype=np.float64)
        iergq = np.zeros_like(photon_energies, dtype=np.float64)

        if eelow >= eehigh:
            raise ValueError('eehigh must be larger than eelow!')

        l, = np.where((photon_energies < eehigh) & (photon_energies > 0))
        if l.size > 0:
            flux[l], iergq[l] = split_and_integrate(model='thin-target',
                                                    photon_energies=photon_energies[l], maxfcn=maxfcn,
                                                    rerr=rerr, eelow=eelow, eebrk=eebrk, eehigh=eehigh,
                                                    p=p, q=q, z=z, efd=efd)

            flux *= fcoeff

            return flux
        else:
            raise Warning('The photon energies are higher than the highest electron energy or not '
                          'greater than zero')

    if ele_dist_type == 'discrete':
        electron_energy = ele_dist_params[0]
        electron_dist = ele_dist_params[1]
        flux = np.full_like(photon_energies, 0., dtype=np.float64)
        for i, eph in enumerate(photon_energies):
            # only electrons with energy above E_photon can contribute to the photon flux
            l, = np.where((electron_energy > eph))
            if l.size > 0:
                # calculate integrand
                gamma = (electron_energy[l] / mc2) + 1.0
                pc = np.sqrt(electron_energy[l] * (electron_energy[l] + 2.0 * mc2))
                brem_cross = bremsstrahlung_cross_section(electron_energy[l], eph)
                # calculate photon flux per electron energy bin
                if efd:
                    # if electron flux distribution is assumed (default)
                    flux_diff = electron_dist[l] * brem_cross * (mc2 / clight)
                else:
                    # if electron density distribution is assumed
                    flux_diff = electron_dist[l] * brem_cross * pc / gamma  # that is n_e * sigma * mc2 * (v / c)
                # now integrate the differential photon_flux with electron energy
                flux[i] = fcoeff * integrate.trapz(flux_diff, electron_energy[l])
        return flux
Ejemplo n.º 53
0
def psd(x,
        fs=1.0,
        window='hanning',
        nperseg=None,
        noverlap=None,
        nfft=None,
        detrend='constant',
        show=True,
        ax=None,
        scales='linear',
        xlim=None,
        units='V'):
    """Estimate power spectral density characteristcs using Welch's method.

    This function is just a wrap of the scipy.signal.welch function with
    estimation of some frequency characteristcs and a plot. For completeness,
    most of the help from scipy.signal.welch function is pasted here.

    Welch's method [1]_ computes an estimate of the power spectral density
    by dividing the data into overlapping segments, computing a modified
    periodogram for each segment and averaging the periodograms.

    Parameters
    ----------
    x : array_like
        Time series of measurement values
    fs : float, optional
        Sampling frequency of the `x` time series in units of Hz. Defaults
        to 1.0.
    window : str or tuple or array_like, optional
        Desired window to use. See `get_window` for a list of windows and
        required parameters. If `window` is array_like it will be used
        directly as the window and its length will be used for nperseg.
        Defaults to 'hanning'.
    nperseg : int, optional
        Length of each segment.  Defaults to half of `x` length.
    noverlap: int, optional
        Number of points to overlap between segments. If None,
        ``noverlap = nperseg / 2``.  Defaults to None.
    nfft : int, optional
        Length of the FFT used, if a zero padded FFT is desired.  If None,
        the FFT length is `nperseg`. Defaults to None.
    detrend : str or function, optional
        Specifies how to detrend each segment. If `detrend` is a string,
        it is passed as the ``type`` argument to `detrend`. If it is a
        function, it takes a segment and returns a detrended segment.
        Defaults to 'constant'.
    show : bool, optional (default = False)
        True (1) plots data in a matplotlib figure.
        False (0) to not plot.
    ax : a matplotlib.axes.Axes instance (default = None)
    scales : str, optional
        Specifies the type of scale for the plot; default is 'linear' which
        makes a plot with linear scaling on both the x and y axis.
        Use 'semilogy' to plot with log scaling only on the y axis, 'semilogx'
        to plot with log scaling only on the x axis, and 'loglog' to plot with
        log scaling on both the x and y axis.
    xlim : float, optional
        Specifies the limit for the `x` axis; use as [xmin, xmax].
        The defaukt is `None` which sets xlim to [0, Fniquist].
    units : str, optional
        Specifies the units of `x`; default is 'V'.

    Returns
    -------
    Fpcntile : 1D array
        frequency percentiles of the power spectral density
        For example, Fpcntile[50] gives the median power frequency in Hz.
    mpf : float
        Mean power frequency in Hz.
    fmax : float
        Maximum power frequency in Hz.
    Ptotal : float
        Total power in `units` squared.
    f : 1D array
        Array of sample frequencies in Hz.
    P : 1D array
        Power spectral density or power spectrum of x.

    See Also
    --------
    scipy.signal.welch

    Notes
    -----
    An appropriate amount of overlap will depend on the choice of window
    and on your requirements.  For the default 'hanning' window an
    overlap of 50% is a reasonable trade off between accurately estimating
    the signal power, while not over counting any of the data.  Narrower
    windows may require a larger overlap.
    If `noverlap` is 0, this method is equivalent to Bartlett's method [2]_.

    References
    ----------
    .. [1] P. Welch, "The use of the fast Fourier transform for the
           estimation of power spectra: A method based on time averaging
           over short, modified periodograms", IEEE Trans. Audio
           Electroacoust. vol. 15, pp. 70-73, 1967.
    .. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
           Biometrika, vol. 37, pp. 1-16, 1950.

    Examples (also from scipy.signal.welch)
    --------
    >>> import numpy as np
    >>> from psd import psd
    #Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
    # 0.001 V**2/Hz of white noise sampled at 10 kHz and calculate the PSD:
    >>> fs = 10e3
    >>> N = 1e5
    >>> amp = 2*np.sqrt(2)
    >>> freq = 1234.0
    >>> noise_power = 0.001 * fs / 2
    >>> time = np.arange(N) / fs
    >>> x = amp*np.sin(2*np.pi*freq*time)
    >>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
    >>> psd(x, fs=freq);
    """

    from scipy import signal, integrate

    if not nperseg:
        nperseg = np.ceil(len(x) / 2)
    f, P = signal.welch(x, fs, window, nperseg, noverlap, nfft, detrend)
    Area = integrate.cumtrapz(P, f, initial=0)
    Ptotal = Area[-1]
    mpf = integrate.trapz(f * P, f) / Ptotal  # mean power frequency
    fmax = f[np.argmax(P)]
    # frequency percentiles
    inds = [0]
    Area = 100 * Area / Ptotal  # + 10 * np.finfo(np.float).eps
    for i in range(1, 101):
        inds.append(np.argmax(Area[inds[-1]:] >= i) + inds[-1])
    fpcntile = f[inds]

    if show:
        _plot(x, fs, f, P, mpf, fmax, fpcntile, scales, xlim, units, ax)

    return fpcntile, mpf, fmax, Ptotal, f, P
Ejemplo n.º 54
0
def spike_train_timescale(binned_spiketrain, max_tau):
    r"""
    Calculates the auto-correlation time of a binned spike train; uses the
    definition of the auto-correlation time proposed in
    :cite:`correlation-Wieland2015_040901` (Eq. 6):

    .. math::
        \tau_\mathrm{corr} = \int_{-\tau_\mathrm{max}}^{\tau_\mathrm{max}}\
            \left[ \frac{\hat{C}(\tau)}{\hat{C}(0)} \right]^2 d\tau

    where :math:`\hat{C}(\tau) = C(\tau)-\nu\delta(\tau)` denotes
    the auto-correlation function excluding the Dirac delta at zero timelag.

    Parameters
    ----------
    binned_spiketrain : elephant.conversion.BinnedSpikeTrain
        A binned spike train containing the spike train to be evaluated.
    max_tau : pq.Quantity
        Maximal integration time :math:`\tau_{max}` of the auto-correlation
        function. It needs to be a multiple of the `bin_size` of
        `binned_spiketrain`.

    Returns
    -------
    timescale : pq.Quantity
        The auto-correlation time of the binned spiketrain with the same units
        as in the input. If `binned_spiketrain` has less than 2 spikes, a
        warning is raised and `np.nan` is returned.

    Notes
    -----
    * :math:`\tau_\mathrm{max}` is a critical parameter: numerical estimates
      of the auto-correlation functions are inherently noisy. Due to the
      square in the definition above, this noise is integrated. Thus, it is
      necessary to introduce a cutoff for the numerical integration - this
      cutoff should be neither smaller than the true auto-correlation time
      nor much bigger.
    * The bin size of `binned_spiketrain` is another critical parameter as it
      defines the discretization of the integral :math:`d\tau`. If it is too
      big, the numerical approximation of the integral is inaccurate.

    Examples
    --------
    >>> import neo
    >>> import numpy as np
    >>> import quantities as pq
    >>> from elephant.spike_train_correlation import spike_train_timescale
    >>> from elephant.conversion import BinnedSpikeTrain
    >>> spiketrain = neo.SpikeTrain([1, 5, 7, 8], units='ms', t_stop=10*pq.ms)
    >>> bst = BinnedSpikeTrain(spiketrain, bin_size=1 * pq.ms)
    >>> spike_train_timescale(bst, max_tau=5 * pq.ms)
    array(14.11111111) * ms

    """
    if binned_spiketrain.get_num_of_spikes() < 2:
        warnings.warn("Spike train contains less than 2 spikes! "
                      "np.nan will be returned.")
        return np.nan

    bin_size = binned_spiketrain._bin_size
    try:
        max_tau = max_tau.rescale(binned_spiketrain.units).item()
    except (AttributeError, ValueError):
        raise ValueError("max_tau needs units of time")

    # safe casting of max_tau/bin_size to integer
    max_tau_bins = int(round(max_tau / bin_size))
    if not np.isclose(max_tau, max_tau_bins * bin_size):
        raise ValueError("max_tau has to be a multiple of the bin_size")

    cch_window = [-max_tau_bins, max_tau_bins]
    corrfct, bin_ids = cross_correlation_histogram(
        binned_spiketrain,
        binned_spiketrain,
        window=cch_window,
        cross_correlation_coefficient=True)
    # Take only t > 0 values, in particular neglecting the delta peak.
    start_id = corrfct.time_index((bin_size / 2) * binned_spiketrain.units)
    corrfct = corrfct.magnitude.squeeze()[start_id:]

    # Calculate the timescale using trapezoidal integration
    integr = (corrfct / corrfct[0])**2
    timescale = 2 * integrate.trapz(integr, dx=bin_size)
    return pq.Quantity(timescale, units=binned_spiketrain.units, copy=False)
Ejemplo n.º 55
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 20 13:40:00 2017

@author: ewen
"""

import numpy as np
import scipy.integrate as sci

def fct(x):
    return (np.cos(x)-np.sin(x))*np.exp(-x)

x = np.arange(0,3*np.pi/2,1e-3)

valeur = -np.exp(-3*np.pi/2)

print("\r\n       Calculs d'integrales de mes 2")
print("                SCIPY.trapz")
print("_"*48+'\r\n')
print("Valeur theorique :",-np.exp(-3*np.pi/2))
print("_"*48+'\r\n')
nValue = sci.trapz(fct(x),x)
print("Erreur de",abs(nValue-valeur))
print("_"*48+'\r\n')
Ejemplo n.º 56
0
    os.remove(sepFileName)
    os.remove(sortedFileName)

# Evaluating AUC
statsList = [
]  # For each method: [AUC,AUC_10%,SENS_10%,SPEC_10%,AUC_M1,SENS_M1,SPEC_M1,AUC_M2,SENS_M2,SPEC_M2,...]
statsListPr = []  # For each method: [AUPR]
for i in range(0, len(dataXList)):

    # Initialization
    vec = []
    vecPr = []

    # Evaluating regular AUC
    vec.append(auc(dataXList[i], dataYList[i]))
    vecPr.append(abs(trapz(dataRcList[i], dataPrList[i])))

    # Evaluating 10% FPR AUC
    fprVecX = []
    fprVecY = []
    for j in range(0, len(dataXList[i])):
        if (dataXList[i][j] > fpr_auc): break
        fprVecX.append(dataXList[i][j])
        fprVecY.append(dataYList[i][j])
    fprVecX.append(fpr_auc)
    fprVecY.append(fprVecY[-1])
    vec.append(auc(standardize(fprVecX), fprVecY))
    vec.append(fprVecY[-1])
    vec.append(1.0 - fprVecX[-1])

    # Evaluating 1% FPR AUC
Ejemplo n.º 57
0
from __future__ import division, print_function  # python 2 to 3 compatibility
from numpy import linspace, copy
import matplotlib.pyplot as plt
from scipy.integrate import trapz

if __name__ == "__main__":

    # Create some test data to integrate over
    x = linspace(0, 10, 10)
    y = copy(x)

    fig, ax = plt.subplots()
    ax.plot(x, y)

    print(trapz(y, x))

    plt.show()
Ejemplo n.º 58
0
def doConvolution(x_in, y_in, x_out, widths, factor=5, oversampling=1):
    '''
    Perform convolution on lists with a Gaussian filter.
    
    Reduce the input grid to the target grid by integration.
   
    @param x_in: The input x-values
    @type x_in: array
    @param y_in: The input y-values
    @type y_in: array
    @param x_out: The target x-grid
    @type x_out: array
    @param widths: The full width/half maximum spectral resolution as a 
                   function of wavelength, i.e. the fwhm of the gaussian
    @type widths: array
    
    @keyword factor: the sigma factor for determining the window pushed through
                     the gaussian filter. This avoids having to convolve the 
                     whole input grid, which takes a lot of time. Beyond 
                     sigma*factor the contribution of the y values is assumed 
                     to be negligible.
                     
                     (default: 5)
    @type factor: int
    @keyword oversampling: oversampling factor of the target x-grid with
                           respect to the given spectral resolution.
                           
                           (default: 1)
    @type oversampling: int
   
    @return: The resulting y-values
    @rtype: list
    
    '''

    x_in, y_in, x_out, widths = array(x_in), array(y_in), array(x_out), array(
        widths)
    y_out = []
    print 'Convolving for x_out between %.2f micron and %.2f micron with oversampling %i.' \
          %(x_out[0],x_out[-1],int(oversampling))
    #- Convert FWHM's to sigma for the gaussians
    sigma = [fwhm / (2. * sqrt(2. * log(2.))) for fwhm in widths]
    #- Define the binsizes of the bins that will be integrated, i.e. the
    #- apparent resolution of x_out
    binsize = [w / oversampling for w in widths]
    for delta_bin, sigi, xi_out in zip(binsize, sigma, x_out):
        yi_in = y_in[abs(x_in - xi_out) <= factor * sigi]
        #- if not empty: continue, else add 0
        if list(yi_in) and set(yi_in) != set([0.0]):
            #- all relevant xi's for the bin around xi_out, ie in this bin the
            #- y-values will be integrated
            xi_in = x_in[abs(x_in - xi_out) <= delta_bin]
            #- The window for the convolution itself, outside this window the
            #- data are assumed to be negligible, ie for a gaussian
            window = x_in[abs(x_in - xi_out) <= factor * sigi]
            convolution = convolveArray(window, yi_in, sigi)
            #- if one value in the bin, out of the window selection: add value
            if len(list(convolution[abs(window - xi_out) <= delta_bin])) == 1:
                y_out.append(convolution[abs(window - xi_out) <= delta_bin][0])
                print 'Convolution has a window of only one element at xi_out %f.' % xi_out
            #- If more than one value: integrate
            elif list(convolution[abs(window - xi_out) <= delta_bin]):
                y_out.append(
                    trapz(y=convolution[abs(window - xi_out) <= delta_bin],
                          x=xi_in) / (xi_in[-1] - xi_in[0]))
            #- If no values in the bin from the window: add average of the window
            #- This should not occur ideally!
            else:
                print 'Convolution has a window of no elements at x_out ' + \
                      '%f. Careful! Average is taken of '%(xi_out) + \
                      'sigma*factor window! This should not be happening...'
                y_out.append(sum(convolution) / float(len(convolution)))
        else:
            y_out.append(0.0)
    return y_out
    def compute_relief_force_x57(inputs, y_vector, chord_vector, wing_mass, fuel_mass, point_mass=True):

        # Recuperating the data necessary for the computation
        if point_mass:
            eng_mass_vec = [6.8, 6.8, 6.8, 6.8, 6.8, 6.8, 53.1]
            tot_lg_mass = inputs["data:weight:airframe:landing_gear:main:mass"]
        else:
            eng_mass_vec = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
            tot_lg_mass = 0.0

        z_cg = inputs["data:weight:aircraft_empty:CG:z"]

        lg_height = inputs["data:geometry:landing_gear:height"]
        lg_type = inputs["data:geometry:landing_gear:type"]
        engine_config = inputs["data:geometry:propulsion:layout"]
        engine_count = inputs["data:geometry:propulsion:count"]
        nacelle_width = inputs["data:geometry:propulsion:nacelle:width"]
        semi_span = inputs["data:geometry:wing:span"] / 2.0
        y_ratio_vec = [0.1863354, 0.30310559, 0.42236025, 0.54161491, 0.66086957, 0.79710145, 0.99171843]

        g = 9.81

        single_lg_mass = tot_lg_mass / 2.0  # We assume 2 MLG

        # Before computing the continued weight distribution we first take care of the point masses and modify the
        # y_vector accordingly

        # We create the array that will store the "point mass" which we chose to represent as distributed mass over a
        # small finite interval
        point_mass_array = np.zeros(len(y_vector))

        # Adding the motor weight
        if engine_config == 1.0:
            for i in range(len(y_ratio_vec)):
                y_ratio = y_ratio_vec[i]
                eng_mass = eng_mass_vec[i]
                y_eng = y_ratio * semi_span
                y_vector, chord_vector, point_mass_array = AerostructuralLoadX57.add_point_mass(
                    y_vector, chord_vector, point_mass_array, y_eng, eng_mass, inputs)
                test = 1.0

        # Computing and adding the lg weight
        # Overturn angle set as a fixed value, it is recommended to take over 25° and check that we can fit both LG in
        # the fuselage
        phi_ot = 35. * np.pi / 180.
        y_lg_1 = math.tan(phi_ot) * z_cg
        y_lg = max(y_lg_1, lg_height)

        y_vector, chord_vector, point_mass_array = AerostructuralLoadX57.add_point_mass(
            y_vector, chord_vector, point_mass_array, y_lg, single_lg_mass, inputs)

        # We can now choose what type of mass distribution we want for the mass and the fuel
        distribution_type = 0.0
        if distribution_type == 1.0:
            Y = y_vector / semi_span
            struct_weight_distribution = 4. / np.pi * np.sqrt(1. - Y ** 2.0)
        else:
            Y = y_vector / semi_span
            struct_weight_distribution = chord_vector / max(chord_vector)

        reajust_struct = trapz(struct_weight_distribution, y_vector)

        if distribution_type == 1.0:
            Y = y_vector / semi_span
            fuel_weight_distribution = 4. / np.pi * np.sqrt(1. - Y ** 2.0)
        else:
            Y = y_vector / semi_span
            fuel_weight_distribution = chord_vector / max(chord_vector)
            if lg_type == 1.0:
                for i in np.where(y_vector < y_lg):
                    # For now 80% size reduction in the fuel tank capacity due to the landing gear
                    fuel_weight_distribution[i] = fuel_weight_distribution[i] * 0.2
            if engine_config == 1.0:
                for i in np.where(abs(y_vector - y_eng) <= nacelle_width / 2.):
                    # For now 50% size reduction in the fuel tank capacity due to the engine
                    fuel_weight_distribution[i] = fuel_weight_distribution[i] * 0.5

        reajust_fuel = trapz(fuel_weight_distribution, y_vector)

        wing_mass_array = wing_mass * struct_weight_distribution / (2. * reajust_struct)
        fuel_mass_array = fuel_mass * fuel_weight_distribution / (2. * reajust_fuel)

        mass_array = wing_mass_array + fuel_mass_array + point_mass_array
        weight_array = - mass_array * g

        return y_vector, weight_array
from scipy import integrate, interpolate

hours = ["{0:02d}".format(h) for h in xrange(0, 24, 3)]
base_path = '/nerc/n02/n02/xb899100/CloudTrail/Control/'
mv_key = u'STASH_m01s00i391'
rho_key = u'STASH_m01s00i389'
for hour in hours:
    mr_nc = Dataset(base_path + 'mr_' + hour + '.nc', 'r')
    fluxes_nc = Dataset(base_path + 'fluxes_' + hour + '.nc', 'r')
    if hour == '00':
        z_theta = mr_nc.variables['thlev_zsea_theta'][:]
        z_rho = fluxes_nc.variables['rholev_zsea_rho'][:]
        CIWV = integrate.trapz(
            y=mr_nc.variables[mv_key][:] *
            interpolate.interp1d(x=z_rho,
                                 y=fluxes_nc.variables[rho_key][:],
                                 fill_value='extrapolate',
                                 axis=1)(z_theta),
            x=z_theta,
            axis=1)
        times = mr_nc.variables['min10_0'][:]
    else:
        CIWV = np.concatenate(
            (CIWV,
             integrate.trapz(
                 y=mr_nc.variables[mv_key][:] *
                 interpolate.interp1d(x=z_rho,
                                      y=fluxes_nc.variables[rho_key][:],
                                      fill_value='extrapolate',
                                      axis=1)(z_theta),
                 x=z_theta,
                 axis=1)),