Example #1
0
 def __init__(self, rv, pdf, w=None, bbox=[None, None], k=1):
     """Constructor.
     """
     self.__rv = rv
     self.__pdf = pdf
     InterpolatedUnivariateSpline.__init__(self, rv, pdf, w, bbox, k)
     self.ppf = self.build_ppf()
Example #2
0
    def ReIter2(self,FT):
        [a,b]=ipVar.poptDir(self.RePath);
        z=np.where(np.diff(np.sign(a)))[0];
        r1=b[z[0]];r2=b[z[0]+1];
        while(abs(r1-r2)>11):
            [a,b]=ipVar.poptDir(self.RePath);
            ReCr=funcs.calcReC(a,b);
            self.ReList.append(ReCr+5);
            self.ReList.append(ReCr-5);
            self.CritRe(FT);
            [a,b]=ipVar.poptDir(self.RePath);
            ReCr=funcs.calcReC(a,b);
            z=np.where(np.diff(np.sign(a)))[0];
            r1=b[z[0]];r2=b[z[0]+1];

        if(abs(r1-r2)< 11):
            [a,b]=ipVar.poptDir(self.RePath);
            ReCr=funcs.calcReC(a,b);
            self.ReList.append(ReCr);
            self.CritRe(FT);

        [a,b]=ipVar.poptDir(self.RePath);
        if(len(a)>3):func=InterpolatedUnivariateSpline(b,a,k=3);
        ReC=func.roots();
        return ReC;
Example #3
0
def findInflection(x, y, threshold=0.9):
    """
    Fits y = m*x to Zimm data before onset of Taylor instability.
    Returns m.
    Last updated by Kazem Edmond on Feb. 26, 2013.
    
    Inflection is found by identifying sudden change in slope.  Slope
    between each interval is calculated using a spline fit.  Data is 
    cleaned by removing outliers using a simple "threshold", where difference
    in between adjacent x is within 5 times of first interval.
    The threshold parameter defines size of inflection to look for.
    
    """
    # Clean data by removing outliers to help spline fit
    wout = np.where( np.abs(x[0:-1] - x[1:]) > 5*np.abs(x[0]-x[1]) )
    wout2 = wout[0][range(1, np.size(wout), 2)]    # Note: only use odd indices from np.where()
    
    # If necessary, new arrays for cleaned data:
    if np.size(wout2 > 0):
        xcln = np.delete(x, wout2)
        ycln = np.delete(y, wout2)
    else:
        xcln = x
        ycln = y
    
    # Fit a spline to cleaned data, giving slopes over intervals:
    spl = InterpolatedUnivariateSpline(xcln, ycln)
    
    # Store slopes in new array:
    nvals = int(np.floor(max(xcln)))
    spVals = np.zeros([nvals, 4])
    
    for i in range(1, nvals, 1): spVals[i] = spl.derivatives(i)
    
    # Find range over which it's Newtonian.
    # Only consider data that is within threshold of initial slope.
    wlin = np.where(spVals[1:, 1] > threshold * spVals[1, 1])
    
    wid = np.where(np.subtract(wlin, range(0, np.size(wlin)))[0] > 0)

    # Check if all of the data is Newtonian:    
    if np.size(wid) > 0:
        idd = int(wid[0])
        xlin = xcln[0:idd]
        ylin = ycln[0:idd]
    else:
        xlin = xcln[wlin]
        ylin = ycln[wlin]
    
    # 2.27.2013: Alternative to spline fitting
    # Remove overall trend of data, resulting peak is inflection point.
    # Requires that inflection peak actually exists
    # m, _, _, _ = np.linalg.lstsq(x[:, np.newaxis], y)
    # xp = x[yp == np.max(y - m * (x - np.mean(x)))]
    # mf, _, _, _ = np.linalg.lstsq(x[x<xp][:, np.newaxis], y[x<xp])
    
    # Least squares linear fit to get the slope:
    m, _, _, _ = np.linalg.lstsq(xlin[:, np.newaxis], ylin)
    
    return m
Example #4
0
 def build_splines(self, scale_factor):
     lM_min, lM_max = self.lM_bounds
     M_space = np.logspace(lM_min - 1, lM_max + 1, 500, base=10)
     sigmaM = np.array([cc.sigmaMtophat_exact(M, scale_factor) for M in M_space])
     ln_sig_inv_spline = IUS(M_space, -np.log(sigmaM))
     deriv_spline = ln_sig_inv_spline.derivative()
     self.deriv_spline = deriv_spline
     self.splines_built = True
     return
Example #5
0
 def __init__(self, x, y, kind, bounds_error=False, fill_value=numpy.nan, copy=True):
   if copy:
     self.x = x.copy()
     self.y = y.copy()
   else:
     self.x = x
     self.y = y
   InterpolatedUnivariateSpline.__init__(self, self.x, self.y, k=kind)
   self.xmin = self.x[0]
   self.xmax = self.x[-1]
   self.fill_value = fill_value
   self.bounds_error = bounds_error
 def build_splines(self):
     """Build the splines needed for integrals over mass bins.
     """
     lM_min,lM_max = self.l10M_bounds
     M_domain = np.logspace(lM_min-1, lM_max+1, num=1000)
     sigmaM = np.array([cc.sigmaMtophat(M, self.scale_factor) 
                        for M in M_domain])
     self.sigmaM_spline = IUS(M_domain, sigmaM)
     ln_sig_inv_spline = IUS(M_domain, -np.log(sigmaM))
     deriv_spline = ln_sig_inv_spline.derivative()
     self.deriv_spline = deriv_spline
     return
Example #7
0
def crossings(series, value):
    """Find the labels where the series passes through value.

    The labels in series must be increasing numerical values.

    series: Series
    value: number

    returns: sequence of labels
    """
    interp = InterpolatedUnivariateSpline(series.index, series-value)
    return interp.roots()
Example #8
0
def get_Vmixfn(tarr,th,Dt,getVmixdot=False,E51=1.,Vmax=None):
    sigEfn = get_sigEfn(tarr,th,E51=E51)
    Vmixarr = Vmix_base(tarr,Dt,sigEfn)
    if Vmax != None: Vmixarr[Vmixarr>Vmax]=Vmax
    Vmixfn = interp1d(tarr,Vmixarr)
    if getVmixdot:
        Vmixfnspline = InterpolatedUnivariateSpline(tarr,Vmixarr)
        Vmixdotspline = Vmixfnspline.derivative()
        Vmixdotarr = Vmixdotspline(tarr)
        Vmixdotfn = interp1d(tarr,Vmixdotarr)
        return Vmixfn,Vmixdotfn
    else:
        return Vmixfn
Example #9
0
def get_Vmixarr(tarr,th,Dt,getVmixdot=False,E51=1.,Vmax=None):
    sigEfn = get_sigEfn(tarr,th,E51=E51)
    Vmixarr = Vmix_base(tarr,Dt,sigEfn)
    if Vmax != None: Vmixarr[Vmixarr>Vmax]=Vmax
    if getVmixdot:
        Vmixfnspline = InterpolatedUnivariateSpline(tarr,Vmixarr)
        Vmixdotspline = Vmixfnspline.derivative()
        Vmixdotarr = Vmixdotspline(tarr)
        #dt = tarr[1]-tarr[0]
        #Vmixdotarr = (Vmixarr[1:]-Vmixarr[:-1])/dt
        #Vmixdotarr = np.concatenate((Vmixdotarr,[0]))
        return Vmixarr,Vmixdotarr
    else:
        return Vmixarr
Example #10
0
 def __init__(self, csv_file, vin='Vin', vout='Vout'):
     self.x_values = []
     self.y_values = []
     self.traces = {}
     self.annotations = []
     
     self.csvfile = csv_file
     try:
         f = open(csv_file, 'r')
         self.spamreader = csv.DictReader(f, delimiter=' ')
     except TypeError:
         print("THERE WAS AN ERROR!")
         raise
     for row in self.spamreader:
         self.x_values.append(float(row[vin]))
         self.y_values.append(float(row[vout]))
     f.close()
     
     # getting a spline's derivative is much more accurate for finding
     # the VTC's characteristic parameters
     self.spl = InterpolatedUnivariateSpline(self.x_values, self.y_values)
     self.spline_1drv = self.spl.derivative()
     self.get_characteristic_parameters()
     
     self.traces['VTC'] = {
             'x': self.x_values, 
             'y': self.y_values, 
             'label': 'Voltage Transfer Characteristic',
             'class': 'main',
     }
Example #11
0
def CalculateHWHM(GF_A):
	''' calculates the half-width at half-maximum of the Kondo resonance 
	and the maximum of the spectral function '''
	N = len(En_A)
	IntMin = int((N+1)/2-int(0.5/dE))
	IntMax = int((N+1)/2+int(0.5/dE))
	DOSmaxPos = sp.argmax(-sp.imag(GF_A[IntMin:IntMax])/sp.pi)
	DOSmax    = -sp.imag(GF_A[IntMin+DOSmaxPos])/sp.pi # maximum of DoS
	wmax      = En_A[IntMin+DOSmaxPos]                 # position of the maximum at energy axis
	DOS = InterpolatedUnivariateSpline(En_A-1e-12,-sp.imag(GF_A)/sp.pi-DOSmax/2.0) 
	## 1e-12 breaks symmetry for half-filling, otherway DOS.roots() loses one solution.
	DOSroots_A = sp.sort(sp.fabs(DOS.roots()))
	try:
		HWHM = (DOSroots_A[0] + DOSroots_A[1])/2.0
	except IndexError:
		HWHM = 0.0
	return [HWHM,DOSmax,wmax]
 def make_dndlM_spline(self):
     """Creates a spline for dndlM so that the integrals
     over mass bins are faster
     """
     bounds = np.log(10**self.l10M_bounds)
     lM = np.linspace(bounds[0], bounds[1], num=100)
     dndlM = np.array([self.dndlM(lMi) for lMi in lM])
     self.dndlM_spline = IUS(lM, dndlM)
     return lM, dndlM
Example #13
0
 def __call__(self, x):
   x = numpy.asarray(x)
   shape = x.shape
   x = x.ravel()
   bad = (x > self.xmax) | (x < self.xmin)
   if self.bounds_error and numpy.any(bad):
     raise ValueError("some values are out of bounds")
   y = InterpolatedUnivariateSpline.__call__(self, x.ravel())
   y[bad] = self.fill_value
   return y.reshape(shape)
Example #14
0
 def __init__(
         self,   # Spline_eos instance
         P,      # Pressure function (usually the nominal eos)
         N=magic.spline_N,
         v_min=magic.spline_min,
         v_max=magic.spline_max,
         uncertainty=magic.spline_uncertainty,
         precondition=False,
         comment='',
         ):
     v = np.logspace(np.log10(v_min), np.log10(v_max), N)
     IU_Spline.__init__(self,v,P(v))
     self.prior_mean = self.get_c().copy()
     dev = self.prior_mean*uncertainty
     self.prior_var_inv = np.diag(1.0/(dev*dev))
     self.precondition = precondition
     if precondition:
         self.U_inv = np.diag(dev)
     return
Example #15
0
    def ReIter(self,FT):
        [a,b]=ipVar.poptDir(self.RePath);
        while(all(i > 0 for i in a)):
            self.ReList=[];
            ReNew=float(b[0])- 0.2*float(b[0]);
            self.ReList.append(ReNew);
            self.CritRe(FT);
            [a,b]=ipVar.poptDir(self.RePath);

        while(all(i < 0 for i in a)):
            self.ReList=[];
            ReNew=float(b[-1])+0.3*float(b[-1]);
            self.ReList.append(ReNew);
            self.CritRe(FT);
            [a,b]=ipVar.poptDir(self.RePath); 
        [a,b]=ipVar.poptDir(self.RePath);
        z=np.where(np.diff(np.sign(a)))[0];
        i=z[0];
        r1=b[i];r2=b[i+1];
        print(r1);print(r2);
        print("the value of the ");
        temp=[0];
        ReCr=funcs.calcReC(a,b);
        temp.append(ReCr);
        while(abs(temp[-1]-temp[-2])>1):
            self.ReList=[];
            ReCr=funcs.calcReC(a,b);
            self.ReList.append(ReCr+0.1*abs(r1-r2));
            self.ReList.append(ReCr-0.1*abs(r1-r2));
            self.CritRe(FT);
            print(r1);print(r2);
            ReCr=funcs.calcReC(a,b);
            temp.append(ReCr);
            [a,b]=ipVar.poptDir(self.RePath);
            z=np.where(np.diff(np.sign(a)))[0];
            r1=b[z[0]];r2=b[z[0]+1];

        [a,b]=ipVar.poptDir(self.RePath);
        if(len(a)>3):func=InterpolatedUnivariateSpline(b,a,k=3);
        ReC=func.roots();
        return ReC;
Example #16
0
 def ReIter(self):
     [a,b]=ipVar.poptDir(self.RePath);
     z=np.where(np.diff(np.sign(a)))[0];
     i=z[0];
     r1=b[i];r2=b[i+1];
     print(r1);print(r2);
     print("the value of the ");
     while(abs(r1-r2)>20):
         self.ReList=[];
         ReCr=funcs.calcReC(a,b);
         self.ReList.append(ReCr+0.3*abs(r1-r2));
         self.ReList.append(ReCr-0.3*abs(r1-r2));
         self.CritRe();
         print(r1);print(r2);
         [a,b]=ipVar.poptDir(self.RePath);
         z=np.where(np.diff(np.sign(a)))[0];
         r1=b[z[0]];r2=b[z[0]+1];
     [a,b]=ipVar.poptDir(self.RePath);
     if(len(a)>3):func=InterpolatedUnivariateSpline(b,a,k=3);
     ReC=func.roots();
     return ReC;
Example #17
0
    def _initialize_splines(self):
        self._nu_array = numpy.zeros_like(self._ln_mass_array)

        for idx in xrange(self._ln_mass_array.size):
            mass = numpy.exp(self._ln_mass_array[idx])
            self._nu_array[idx] = self.cosmo.nu_m(mass)

        self.nu_min = 1.001*self._nu_array[0]
        self.nu_max = 0.999*self._nu_array[-1]

        #print "nu_min:",self.nu_min,"nu_max:",self.nu_max

        self._nu_spline = InterpolatedUnivariateSpline(
            self._ln_mass_array, self._nu_array)
        self._ln_mass_spline = InterpolatedUnivariateSpline(
            self._nu_array, self._ln_mass_array)

        # Set M_star, the mass for which nu == 1
        self.m_star = self.mass(1.0)
Example #18
0
 def classic_integration(self):
     k_order=10.
     # Define number of points for integration
     npoints = 2**k_order + 1
     self.R = np.zeros(self.bands.shape[0])
     self.Ia = np.zeros_like(self.R)
     self.Im = np.zeros_like(self.R)
     for i, w in enumerate(self.bands):
         if (w[0] - self.dw < self.wave[0]) or \
            (w[-1] + self.dw > self.wave[-1]):
             self.R[i] = np.nan
         # Defining indices for each section
         idxb = np.where(((self.wave > w[0] - 2 * self.dw) &
                              (self.wave < w[1] + 2 * self.dw)))
         idxr = np.where(((self.wave > w[4] - 2 * self.dw) &
                             (self.wave < w[5] + 2 * self.dw)))
         idxcen = np.where(((self.wave > w[2] - 2 * self.dw) &
                             (self.wave < w[3] + 2 * self.dw)))
         # Defining wavelenght samples
         wb = self.wave[idxb]
         wr = self.wave[idxr]
         wcen = self.wave[idxcen]
         # Defining intensity samples
         fb = self.galaxy[idxb]
         fr = self.galaxy[idxr]
         fcen = self.galaxy[idxcen]
         # Making interpolation functions
         sb = InterpolatedUnivariateSpline(wb, fb)
         sr = InterpolatedUnivariateSpline(wr, fr)
         # Make oversampled arrays of wavelenghts
         xb = np.linspace(w[0], w[1], npoints)
         xr = np.linspace(w[4], w[5], npoints)
         xcen = np.linspace(w[2], w[3], npoints)
         # Calculating the mean fluxes for the pseudocontinuum
         fp1 = sb.integral(w[0], w[1]) / (w[1] - w[0])
         fp2 = sr.integral(w[4], w[5]) / (w[5] - w[4])
         # Making pseudocontinuum vector
         x0 = (w[2] + w[3])/2.
         x1 = (w[0] + w[1])/2.
         x2 = (w[4] + w[5])/2.
         fc = fp1 + (fp2 - fp1)/ (x2 - x1) * (wcen - x1)
         # Calculating indices
         ffc = InterpolatedUnivariateSpline(wcen, fcen/fc/(w[3]-w[2]))
         self.R[i] =  ffc.integral(w[2], w[3])
         self.Ia[i] = (1 - self.R[i]) * (w[3]-w[2])
         self.Im[i] = -2.5 * np.log10(self.R[i])
     self.classic = np.copy(self.Ia)
     idx = np.array([2,3,14,15,23,24])
     self.classic[idx] = self.Im[idx]
     return
Example #19
0
def FindABS(Det_A):
	"""	determines ABS energies as zeroes of GF determinant """
	DetG = InterpolatedUnivariateSpline(En_A[EdgePos1+1:EdgePos2],sp.real(Det_A[:]))
	RootsG_A = DetG.roots()
	NABS = len(RootsG_A)
	ABSpos_A = sp.zeros(2)
	Diff_A = sp.zeros(2)
	if NABS == 0:	
		## assumes ABS states too close to gap edges
		## this also happens when using brentq to calculate densities and
		## it starts from wrong initial guess
		print("# - Warning: FindABS: no ABS found: Probably too close to band edges.")
		ABS_A = sp.array([-Delta+2.0*dE,Delta-2.0*dE])
		ABSpos_A = sp.array([EdgePos1+1,EdgePos2-1])
		Diff_A = sp.array([DetG.derivatives(ABS_A[0])[1],DetG.derivatives(ABS_A[1])[1]])
	elif NABS == 1: 
		## ABS too close to each other?
		print("# - Warning: FindABS: only one ABS found: {0: .6e}".format(RootsG_A[0]))
		print("# -          Assuming they are too close to Fermi energy.")
		print("# -          Using mirroring to get the other ABS, please check the result.")
		ABS_A = [-sp.fabs(RootsG_A[0]),sp.fabs(RootsG_A[0])]
		for i in range(2):
			ABSpos_A[i] = FindInEnergies(ABS_A[i],En_A)
			Diff_A[i] = DetG.derivatives(ABS_A[i])[1]
	elif NABS == 2:	
		## two ABS states, ideal case
		ABS_A = sp.copy(RootsG_A)
		for i in range(2):
			ABSpos_A[i] = FindInEnergies(RootsG_A[i],En_A)
			Diff_A[i] = DetG.derivatives(RootsG_A[i])[1]
	else:
		print("# - Error: FindABS: Too many zeroes of the determinant.")
		exit()
	if sp.fabs(ABS_A[0]+ABS_A[1]) > 1e-6:
		print("# - Warning: FindABS: positive and negative ABS energies don't match, diff = {0: .6e}"\
		.format(sp.fabs(ABS_A[0]-ABS_A[1])))
	if sp.fabs(ABS_A[0])<dE:
		## ABS energy smaller than the energy resolution
		print("# - Warning: FindABS: ABS energies smaller than energy resolution")
		print("# -          We put the poles to lowest possible energies.")
		ABSpos_A = [Nhalf-1,Nhalf+1]
	return [ABS_A,Diff_A,ABSpos_A]
print(r_array)

### Use same array values calculated using Hartree-Fock
E_array = [
    -107.141, -110.874, -112.204, -112.622, -112.699, -112.653, -112.570,
    -112.484, -112.410, -112.361, -112.333, -112.315, -112.304, -112.069,
    -112.291, -112.287, -112.001, -111.948, -111.927, -112.280
]

#plt.plot(r_array, E_array, 'red')
plt.show()

### use cubic spline interpolation
order = 3
### form the interpolator object for the data
sE = InterpolatedUnivariateSpline(r_array, E_array, k=order)
### form a much finer grid
r_fine = np.linspace(1.06, 5.0, 200)
### compute the interpolated/extrapolated values for E on this grid
E_fine = sE(r_fine)
### plot the interpolated data
#plt.plot(r_fine, E_fine, 'blue')
plt.show()

### minimum energy
minE = min(E_fine)

### take the derivative of potential
fE = sE.derivative()
### force is the negative of the derivative
F_fine = -1 * fE(r_fine)
Example #21
0
    def create_pdfs(self):
        #creates a large sample of MC signal and background events to determine pdfs from assuming a -2 spectrum for signal and -3.7 for background
        sig_t = gen(100000, 2, 0)
        bkg_t = gen(100000, 3.7, 0)
        sig_c = gen(100000, 2, 1)
        bkg_c = gen(100000, 3.7, 1)
        #for calculating topology ratio pdfs
        tracks_mc = np.load("./mcdata/tracks_mc.npy")
        cascs_mc = np.load("./mcdata/cascade_mc.npy")

        #makes sure any possible energy value falls within the range of interpolation
        E_x = np.linspace(
            min(sig_t['logE'].min(), sig_c['logE'].min(), bkg_t['logE'].min(),
                bkg_c['logE'].min()),
            max(sig_t['logE'].max(), sig_c['logE'].max(), bkg_t['logE'].max(),
                bkg_c['logE'].max()), 1000)
        sindec_x = np.linspace(-1, 1, 1000)
        gamma_x = np.arange(1, 5, .1)

        #Interpolation is MUCH faster to call than the scipy kde function, so we interpoalte over all kde fits for efficiency
        Bt = InterpolatedUnivariateSpline(
            sindec_x, (gaussian_kde(bkg_t['sinDec'])(sindec_x)), k=3)
        Bc = InterpolatedUnivariateSpline(
            sindec_x, (gaussian_kde(bkg_c['sinDec'])(sindec_x)), k=3)

        #Without any topology changes, we set the pdfs to all come from track events as you would in a normal PS search
        B = Bt
        print("Background spatial splines created")

        #signal and background energy pdfs
        #Signal track energy pdf
        Est = InterpolatedUnivariateSpline(E_x,
                                           (gaussian_kde(sig_t['logE'])(E_x)),
                                           k=3)
        #Background track energy pdf
        Ebt = InterpolatedUnivariateSpline(E_x,
                                           (gaussian_kde(bkg_t['logE'])(E_x)),
                                           k=3)

        #Signal cascade energy pdf
        Esc = InterpolatedUnivariateSpline(E_x,
                                           (gaussian_kde(sig_c['logE'])(E_x)),
                                           k=3)
        #Background cascade energy pdf
        Ebc = InterpolatedUnivariateSpline(E_x,
                                           (gaussian_kde(bkg_c['logE'])(E_x)),
                                           k=3)

        #Everything treated as a track for LLH
        #Energy background pdf- E^-3.7 spectrum
        Es = Est
        #Energy background pdf- (E^-2 spectrum fixed for now-- eventually this has to be 3D (gamma, E, declination)
        Eb = Ebt
        print("Energy splines created")

        percs = []

        #calculates the source topology ratio for every spectral index between 1 and 5
        for g in gamma_x:
            wc = np.power(cascs_mc['trueE'], -g) * cascs_mc['ow']
            wt = np.power(tracks_mc['trueE'], -g) * tracks_mc['ow']
            perc_casc = np.sum(wc) / (np.sum(wt) + np.sum(wc))
            percs.append(perc_casc)
        #*cascade* source percent contribution
        #(Use 1-Tau for singal track contribution)
        Tau = InterpolatedUnivariateSpline(gamma_x, percs, k=3)

        print("Topology contribution spline created")

        #fills in tester with splines for energy, background spatial term, and topology for both split topology and non split topology searches
        self.args['Bt'] = Bt
        self.args['Bc'] = Bc
        self.args['Est'] = Est
        self.args['Ebt'] = Ebt
        self.args['Esc'] = Esc
        self.args['Ebc'] = Ebc
        self.args['B'] = B
        self.args['Es'] = Es
        self.args['Eb'] = Eb
        self.args['Tau'] = Tau
Example #22
0
    def getpf(self, verbose):
        """
    Calculate the partition function for a grid of temperatures for VO.
 
    Parameters:
    -----------
    verbose: Integer
      Verbosity threshold.
 
    Returns:
    --------
    Temp: 1D float ndarray
       The array of temperatures where the partition function was evaluated.
    PF: 2D float ndarray
       A 2D array (of shape [Nisotopes,Ntemperatures]) with the partition
       function values for each isotope (columns) as function of temperature.

    Notes:
    ------
    The partition function is valid for the range of temperatures from
    1000K to 7000K.

    Sample Return:
    Temp = np.linspace(1000., 7000., 13):
    array([ 1000.,  1500.,  2000.,  2500.,  3000.,  3500.,  4000.,  4500., 5000.,  5500.,  6000.,  6500.,  7000.])
    PF
    array([[   6696.28281847,    1000.        ],
           [  12463.29391522,    1500.        ],
           [  20096.2494866 ,    2000.        ],
           [  29606.07281774,    2500.        ],
           [  41175.25306071,    3000.        ],
           [  55080.93657951,    3500.        ],
           [  71666.40723555,    4000.        ],
           [  91330.40363125,    4500.        ],
           [ 114524.07531435,    5000.        ],
           [ 141751.43620469,    5500.        ],
           [ 173571.51471102,    6000.        ],
           [ 210601.37829024,    6500.        ],
           [ 253519.63927169,    7000.        ]])

    Modification History:
    ---------------------
    2015-06-14  patricio  Initial implementation.  [email protected]
    2015-06-21  sally     Calculates pf for Vanadium (II) Oxide (VO)
    2015-06-14  Jasmina	  Extrapolated pf values from 0 to 1000 K
    """
        # Temperature array:
        Temp = np.arange(1000.0, 7001.0, 50.0)
        Ntemp = len(Temp)

        # Number of isotopes:
        Niso = 1

        # Intialize PF array:
        PF = np.zeros((Niso, Ntemp), np.double)

        # Calculate log(PF) at each Temp:
        for i in np.arange(Ntemp):
            # Formula from Irwin 1981, ApJS 45, 621 (equation #2):
            PF[0, i] = (self.PFcoeffs[0] + self.PFcoeffs[1] * np.log(Temp[i]) +
                        self.PFcoeffs[2] * (np.log(Temp[i]))**2 +
                        self.PFcoeffs[3] * (np.log(Temp[i]))**3 +
                        self.PFcoeffs[4] * (np.log(Temp[i]))**4 +
                        self.PFcoeffs[5] * (np.log(Temp[i]))**5)
        # Get the exponential of log(PF):
        PF = np.exp(PF)

        # Add start point for temp and PF arrays
        temp_ext = np.insert(Temp, 0, 0.0)
        PF_ext = np.insert(PF[0], 0, 0.0)

        # Interpolate using quadratic spline
        temp_int = np.arange(50.0, 1000.0, 50.0)
        s = InterpolatedUnivariateSpline(temp_ext, PF_ext, k=2)
        PF_int = s(temp_int)

        # Insert interpolated range into Temp and PF arrays
        Temp = np.insert(temp_ext, 1, temp_int)
        PF_insert = np.insert(PF_ext, 1, PF_int)

        # Update PF array
        PF = np.zeros((Niso, len(Temp)), np.double)
        PF[0] = PF_insert

        return Temp, PF
Example #23
0
def prep_data(met_dset, prof_dset, params):
    """
    This function prepares the forcing and profile data for the model run.
    
    Below, the surface forcing and profile data are interpolated to the user defined time steps
    and vertical resolutions, respectively. Secondary quantities are also computed and packaged 
    into dictionaries. The code also checks that the time and vertical increments meet the 
    necessary stability requirements.
    
    Lastly, this function initializes the numpy arrays to collect the model's output.
    
    INPUT:
    met_data: dictionary-like object with forcing data. Fields should include: 
            ['time', 'sw', 'lw', 'qlat', 'qsens', 'tx', 'ty', 'precip']. These fields should 
            store 1-D time series of the same length. 
            
            The model expects positive heat flux values to represent ocean warming. The time
            data field should contain a 1-D array representing fraction of day. For example, 
            for 6 hourly data, met_data['time'] should contain a number series that increases
            in steps of 0.25, such as np.array([1.0, 1.25, 1.75, 2.0, 2.25...]).

            See https://github.com/earlew/pwp_python#input-data for more info about the
            expect intput data. 
    
            TODO: Modify code to accept met_data['time'] as an array of datetime objects
    
            
    prof_data: dictionary-like object with initial profile data. Fields should include:
            ['z', 't', 's', 'lat']. These represent 1-D vertical profiles of temperature,
            salinity and density. 'lat' is expected to be a length=1 array-like object. e.g. 
            prof_data['lat'] = [25.0]
            
    params: dictionary-like object with fields defined by set_params function
    
    OUTPUT:
    
    forcing: dictionary with interpolated surface forcing data. 
    pwp_out: dictionary with initialized variables to collect model output.
    """

    #create new time vector with time step dt_d
    #time_vec = np.arange(met_dset['time'][0], met_dset['time'][-1]+params['dt_d'], params['dt_d'])
    time_vec = np.arange(met_dset['time'][0], met_dset['time'][-1],
                         params['dt_d'])
    tlen = len(time_vec)

    #debug_here()

    #interpolate surface forcing data to new time vector
    from scipy.interpolate import interp1d
    forcing = {}
    for vname in met_dset:
        p_intp = interp1d(met_dset['time'], met_dset[vname], axis=0)
        forcing[vname] = p_intp(time_vec)

    #interpolate E-P to dt resolution (not sure why this has to be done separately)
    evap_intp = interp1d(met_dset['time'],
                         met_dset['qlat'],
                         axis=0,
                         kind='nearest',
                         bounds_error=False)
    evap = (0.03456 / (86400 * 1000)) * evap_intp(
        np.floor(time_vec))  #(meters per second?)
    emp = np.abs(evap) - np.abs(forcing['precip'])
    emp[np.isnan(emp)] = 0.
    forcing['emp'] = emp
    forcing['evap'] = evap

    if params['emp_ON'] == False:
        print("WARNING: E-P is turned OFF.")
        forcing['emp'][:] = 0.0
        forcing['precip'][:] = 0.0
        forcing['evap'][:] = 0.0

    if params['heat_ON'] == False:
        print("WARNING: Surface heating is turned OFF.")
        forcing['sw'][:] = 0.0
        forcing['lw'][:] = 0.0
        forcing['qlat'][:] = 0.0
        forcing['qsens'][:] = 0.0

    #define q_in and q_out (positive values should mean ocean warming)
    forcing['q_in'] = forcing['sw']  #heat flux into ocean
    forcing['q_out'] = -(forcing['lw'] + forcing['qlat'] + forcing['qsens'])

    #add time_vec to forcing
    forcing['time'] = time_vec

    if params['winds_ON'] == False:
        print("Winds are set to OFF.")
        forcing['tx'][:] = 0.0
        forcing['ty'][:] = 0.0

    #define depth coordinate, but first check to see if profile max depth
    #is greater than user defined max depth
    zmax = max(prof_dset.z)
    if zmax < params['max_depth']:
        depth = zmax
        print('Profile input shorter than depth selected, truncating to %sm' %
              depth)

    #define new z-coordinates
    init_prof = {}
    init_prof['z'] = np.arange(0, params['max_depth'] + params['dz'],
                               params['dz'])
    zlen = len(init_prof['z'])

    #compute absorption and incoming radiation (function defined in PWP_model.py)
    absrb = PWP.absorb(params['beta1'], params['beta2'], zlen,
                       params['dz'])  #(units unclear)
    dstab = params['dt'] * params['rkz'] / params['dz']**2  #courant number
    if dstab > 0.5:
        print(
            "WARNING: unstable CFL condition for diffusion! dt*rkz/dz**2 > 0.5."
        )
        print(
            "To fix this, try to reduce the time step or increase the depth increment."
        )
        inpt = eval(input("Proceed with simulation? Enter 'y'or 'n'. "))
        if inpt is 'n':
            raise ValueError(
                "Please restart PWP.m with a larger dz and/or smaller dt. Exiting..."
            )

    forcing['absrb'] = absrb
    params['dstab'] = dstab

    #check depth resolution of profile data
    prof_incr = np.diff(prof_dset['z']).mean()
    # if params['dz'] < prof_incr/5.:
    #     message = "Specified depth increment (%s m), is much smaller than mean profile resolution (%s m)." %(params['dz'], prof_incr)
    #     warnings.warn(message)

    #debug_here()
    #interpolate profile data to new z-coordinate
    from scipy.interpolate import InterpolatedUnivariateSpline
    for vname in prof_dset:
        if vname == 'lat' or vname == 'lon':
            continue
        else:
            #first strip nans
            not_nan = np.logical_not(np.isnan(prof_dset[vname]))
            indices = np.arange(len(prof_dset[vname]))
            #p_intp = interp1d(prof_dset['z'], prof_dset[vname], axis=0, kind='linear', bounds_error=False)
            #interp1d doesn't work here because it doesn't extrapolate. Can't have Nans in interpolated profile
            p_intp = InterpolatedUnivariateSpline(prof_dset['z'][not_nan],
                                                  prof_dset[vname][not_nan],
                                                  k=1)
            init_prof[vname] = p_intp(init_prof['z'])

    #get profile variables
    temp0 = init_prof['t']  #initial profile temperature
    sal0 = init_prof['s']  #intial profile salinity
    dens0 = sw.dens0(sal0, temp0)  #intial profile density

    #initialize variables for output
    pwp_out = {}
    pwp_out['time'] = time_vec
    pwp_out['dt'] = params['dt']
    pwp_out['dz'] = params['dz']
    pwp_out['lat'] = params['lat']
    pwp_out['z'] = init_prof['z']

    tlen = int(np.floor(tlen / params['dt_save']))
    arr_sz = (zlen, tlen)
    pwp_out['temp'] = np.zeros(arr_sz)
    pwp_out['sal'] = np.zeros(arr_sz)
    pwp_out['dens'] = np.zeros(arr_sz)
    pwp_out['uvel'] = np.zeros(arr_sz)
    pwp_out['vvel'] = np.zeros(arr_sz)
    pwp_out['mld'] = np.zeros((tlen, ))

    #use temp, sal and dens profile data for the first time step
    pwp_out['sal'][:, 0] = sal0
    pwp_out['temp'][:, 0] = temp0
    pwp_out['dens'][:, 0] = dens0

    return forcing, pwp_out, params
    print('This file requires the numpy package to run properly. Please see the readme for instructions on how to install this package.')
try:
    from scipy.interpolate import InterpolatedUnivariateSpline
except ImportError:
    print('This file requires the scipy package to run properly. Please see the readme for instructions on how to install this package.')
import os
import sys

# Creating information from principle components to make w_0, w_a approximation

lok = os.path.dirname(sys.argv[0]) 
zdat = np.genfromtxt(lok+'/Growth/PC_1234.dat','float', usecols = 0, skip_header =1)
adat = np.flipud(1.0/(1.0+zdat))
a1 = np.flipud(np.genfromtxt(lok+'/Growth/PC_1234.dat','float', usecols = 1, skip_header =1))
a2 = np.flipud(np.genfromtxt(lok+'/Growth/PC_1234.dat','float', usecols = 2, skip_header =1))
e1 = InterpolatedUnivariateSpline(adat, a1, k=3)
e2 = InterpolatedUnivariateSpline(adat, a2, k=3)
e12 = InterpolatedUnivariateSpline(adat, a1**2.0, k=3)
e22 = InterpolatedUnivariateSpline(adat, a2**2.0, k=3)
norm1 = e12.integral(0.1, 1.0)
norm2 = e22.integral(0.1, 1.0)
con1 = 1.0/norm1
con2 = 1.0/norm2
beta1 = con1*e1.integral(0.1, 1.0)
beta2 = con2*e2.integral(0.1, 1.0)
gamma1s = InterpolatedUnivariateSpline(adat, adat*a1, k=3)
gamma2s = InterpolatedUnivariateSpline(adat, adat*a2, k=3)
gamma1 = con1*gamma1s.integral(0.1, 1.0)
gamma2 = con2*gamma2s.integral(0.1, 1.0)

from solcore.data_analysis_tools.ellipsometry_analysis import EllipsometryData
from solcore.graphing.Custom_Colours import colours
from solcore.absorption_calculator.cppm import Custom_CPPB as cppb
from solcore.absorption_calculator.dielectric_constant_models import Oscillator

E_eV = np.linspace(0.7, 4.2, 1000)

# Load in ellipsomery data from file...
Exp_Data = EllipsometryData("data/ge_ellipsometry_data.dat")
Exp_Angles = Exp_Data.angles

# Load in some experimental Ge n-k to compare fit with this...
Ge_nk_Exp = np.loadtxt("data/Ge_nk.csv", delimiter=",", unpack=False)

# Smooth the data with spline fitting...
n_spline = InterpolatedUnivariateSpline(x=Ge_nk_Exp[::5, 0], y=Ge_nk_Exp[::5, 1], k=3)(E_eV)
k_spline = InterpolatedUnivariateSpline(x=Ge_nk_Exp[::5, 2], y=Ge_nk_Exp[::5, 3], k=3)(E_eV)

## Step 1 :: n and k modelling...
# First model the Ge02 layer with the Sellmeier model

# Define Oscillator Structure
GeO2 = Structure([
    Oscillator(oscillator_type="Sellmeier", material_parameters=None,
               A1=0.80686642, L1=0.68972606E-1,
               A2=0.71815848, L2=0.15396605,
               A3=0.85416831, L3=0.11841931E2)
])

GeO2_nk = cppb().nk_calc(oscillator_structure=GeO2, energy_array=E_eV)
Example #26
0
def days(length):
    s = InterpolatedUnivariateSpline(
        [0, 5, 30, 50, 200, 300],
        [0, 1 * length, 5 * length, 10 * length, 30 * length, 50 * length],
        k=1)
    return s
Example #27
0
def get_field_rotation_power_from_PK(params, PK, chi_source, lmax=20000, acc=1, lsamp=None):
    results = camb.get_background(params)
    nz = int(100 * acc)
    if lmax < 3000:
        raise ValueError('field rotation assumed lmax > 3000')
    ls = np.hstack((np.arange(2, 400, 1), np.arange(401, 2600, int(10. / acc)),
                    np.arange(2650, lmax, int(50. / acc)), np.arange(lmax, lmax + 1))).astype(np.float64)

    # get grid of C_L(chi_s,k) for different redshifts
    chimaxs = np.linspace(0, chi_source, nz)
    cls = np.zeros((nz, ls.size))
    for i, chimax in enumerate(chimaxs[1:]):
        cl = cl_kappa_limber(results, PK, ls, nz, chimax)
        cls[i + 1, :] = cl
    cls[0, :] = 0
    cl_chi = RectBivariateSpline(chimaxs, ls, cls)

    # Get M(l,l') matrix
    chis = np.linspace(0, chi_source, nz, dtype=np.float64)
    zs = results.redshift_at_comoving_radial_distance(chis)
    dchis = (chis[2:] - chis[:-2]) / 2
    chis = chis[1:-1]
    zs = zs[1:-1]
    win = (1 / chis - 1 / chi_source) ** 2 / chis ** 2
    w = np.ones(chis.shape)
    cchi = cl_chi(chis, ls, grid=True)
    M = np.zeros((ls.size, ls.size))
    for i, l in enumerate(ls):
        k = (l + 0.5) / chis
        w[:] = 1
        w[k < 1e-4] = 0
        w[k >= PK.kmax] = 0
        cl = np.dot(dchis * w * PK.P(zs, k, grid=False) * win / k ** 4, cchi)
        M[i, :] = cl * l ** 4  # note we don't attempt to be accurate beyond lowest Limber
    Mf = RectBivariateSpline(ls, ls, np.log(M))

    # L sampling for output
    if lsamp is None:
        lsamp = np.hstack((np.arange(2, 20, 2), np.arange(25, 200, 10 // acc), np.arange(220, 1200, 30 // acc),
                           np.arange(1300, min(lmax // 2, 2600), 150 // acc),
                           np.arange(3000, lmax // 2 + 1, 1000 // acc)))

    # Get field rotation (curl) spectrum.
    diagm = np.diag(M)
    diagmsp = InterpolatedUnivariateSpline(ls, diagm)

    def high_curl_integrand(ll, lp):
        lp = lp.astype(np.int)
        r2 = (np.float64(ll) / lp) ** 2
        return lp * r2 * diagmsp(lp) / np.pi

    clcurl = np.zeros(lsamp.shape)
    lsall = np.arange(2, lmax + 1, dtype=np.float64)

    for i, ll in enumerate(lsamp):

        l = np.float64(ll)
        lmin = lsall[0]
        lpmax = min(lmax, int(max(1000, l * 2)))
        if ll < 500:
            lcalc = lsall[0:lpmax - 2]
        else:
            # sampling in l', with denser around l~l'
            lcalc = np.hstack((lsall[0:20:4],
                               lsall[29:ll - 200:35],
                               lsall[ll - 190:ll + 210:6],
                               lsall[ll + 220:lpmax + 60:60]))

        tmps = np.zeros(lcalc.shape)
        for ix, lp in enumerate(lcalc):
            llp = int(lp)
            lp = np.float64(lp)
            if abs(ll - llp) > 200 and lp > 200:
                nphi = 2 * int(min(lp / 10 * acc, 200)) + 1
            elif ll > 2000:
                nphi = 2 * int(lp / 10 * acc) + 1
            else:
                nphi = 2 * int(lp) + 1
            dphi = 2 * np.pi / nphi
            phi = np.linspace(dphi, (nphi - 1) / 2 * dphi, (nphi - 1) // 2)  # even and don't need zero
            w = 2 * np.ones(phi.size)
            cosphi = np.cos(phi)
            lrat = lp / l
            lfact = np.sqrt(1 + lrat ** 2 - 2 * cosphi * lrat)
            lnorm = l * lfact
            lfact[lfact <= 0] = 1
            w[lnorm < lmin] = 0
            w[lnorm > lmax] = 0

            lnorm = np.maximum(lmin, np.minimum(lmax, lnorm))
            tmps[ix] += lp * np.dot(w, (np.sin(phi) / lfact ** 2 * (cosphi - lrat)) ** 2 *
                                    np.exp(Mf(lnorm, lp, grid=False))) * dphi

        sp = InterpolatedUnivariateSpline(lcalc, tmps)
        clcurl[i] = sp.integral(2, lpmax - 1) * 4 / (2 * np.pi) ** 2

        if lpmax < lmax:
            tail = np.sum(high_curl_integrand(ll, lsall[lpmax - 2:]))
            clcurl[i] += tail

    return lsamp, clcurl
disk_UGC03546=data_array_UGC03546[:,4]
disk_UGC06446=data_array_UGC06446[:,4]
disk_UGC06930=data_array_UGC06930[:,4]
disk_UGC06983=data_array_UGC06983[:,4]
disk_UGC07261=data_array_UGC07261[:,4]
disk_UGC07690=data_array_UGC07690[:,4]


bulge_NGC7331=data_array_NGC7331[:,5]
bulge_NGC7814=data_array_NGC7814[:,5]
bulge_UGC03546=data_array_UGC03546[:,5]


#Rotation curves of gas and disk

vdisk_NGC7331=InterpolatedUnivariateSpline(radius_NGC7331,disk_NGC7331)

vgas_NGC7331=InterpolatedUnivariateSpline(radius_NGC7331,gas_NGC7331)

vbulge_NGC7331=InterpolatedUnivariateSpline(radius_NGC7331,bulge_NGC7331)

vdisk_NGC7793=InterpolatedUnivariateSpline(radius_NGC7793,disk_NGC7793)

vgas_NGC7793=InterpolatedUnivariateSpline(radius_NGC7793,gas_NGC7793)

#no bulge

vdisk_NGC7814=InterpolatedUnivariateSpline(radius_NGC7814,disk_NGC7814)

vgas_NGC7814=InterpolatedUnivariateSpline(radius_NGC7814,gas_NGC7814)
Example #29
0
    def __init__(self):
        super(Stereography_window, self).__init__()
        self.setWindowTitle('Tracker 5')
        self.setMinimumWidth(1600)
        self.setMinimumHeight(1000)
        pg.setConfigOption('background', 'w')
        pg.setConfigOption('foreground', 'k')
        self.cur = QtGui.QCursor

        # Create the QVBoxLayout that lays out the whole form
        w = QtGui.QWidget()
        self.layout = QtGui.QVBoxLayout(w)
        self.setCentralWidget(w)

        ### top row, imageview widgets
        self.images_hbox = QtGui.QHBoxLayout()
        self.iml = pg.ImageView()
        self.imr = pg.ImageView()
        # self.iml.setMouseTracking(True)
        # self.imr.setMouseTracking(True)
        # self.imr_load = QtGui.QPushButton('Load right directory')
        self.iml_load = QtGui.QPushButton('Load left Directory')
        self.lfns, self.rfns = [], []

        self.randim = n.random.randint(256, size=(640, 480))
        self.iml.setImage(self.randim)
        self.iml.setMinimumHeight(400)
        self.iml.getHistogramWidget().setMaximumWidth(100)
        self.iml.scene.sigMouseMoved.connect(self.lmouseMoved)
        self.imr.setImage(self.randim)
        self.imr.setMinimumHeight(400)
        self.imr.getHistogramWidget().setMaximumWidth(100)
        self.imr.scene.sigMouseMoved.connect(self.rmouseMoved)

        self.images_hbox.addWidget(self.iml)
        self.ltools = QtGui.QToolBar(self.iml)
        self.ltools.setStyleSheet('QToolBar{spacing:0px;}')
        self.loadl = self.ltools.addAction(QtGui.QIcon(), 'load', self.load)
        self.ltools.show()

        self.images_hbox.addWidget(self.imr)
        self.rtools = QtGui.QToolBar(self.imr)
        self.rtools.setStyleSheet('QToolBar{spacing:0px;}')
        # self.loadr = self.rtools.addAction(QtGui.QIcon(), 'load', self.load_right)
        self.rtools.show()

        ### middle row
        # frame slider
        self.frame_hbox = QtGui.QHBoxLayout()
        self.frame_slider = QtGui.QSlider(QtCore.Qt.Horizontal)
        self.frame_slider.setMinimum(1)
        self.frame_slider.setMaximum(1)
        self.frame_slider.valueChanged.connect(self.change_frame)
        # self.frame_value = pg.ValueLabel()
        self.frame_value = QtGui.QLabel('1/1')
        self.frame_hbox.addWidget(self.frame_slider)
        self.frame_hbox.addWidget(self.frame_value)

        ### bottom row
        self.bottom_hbox = QtGui.QHBoxLayout()
        # camera layout
        self.camera_view = pg.PlotWidget()
        self.camera_view.setAspectLocked(True)
        self.camera_view.setMaximumWidth(400)
        self.camera_view.setMaximumHeight(400)
        self.tab = pg.TableWidget(editable=True, sortable=False)
        self.tab.setData([[-14.13, 0, 14.13, 0.], [-15, 0, 15, 0.],
                          [53.5, 41.41, 53.5, 41.41]])
        self.tab.setVerticalHeaderLabels(['pos', 'ang', 'fov'])
        self.tab.setHorizontalHeaderLabels(
            ['cam1 x', 'cam1 y', 'cam2 x', 'cam2 y'])
        self.tab.cellChanged.connect(self.update_camera_parameters)
        self.update_camera_parameters(dist=100)
        vbox = QtGui.QVBoxLayout()
        vbox.addWidget(self.camera_view)
        vbox.addWidget(self.tab)
        self.bottom_hbox.addItem(vbox)

        # 3d plot
        self.flight_view = gl.GLViewWidget()
        self.flight_view.setBackgroundColor((0, 0, .5))
        self.flight_view.setWindowTitle('pyqtgraph example: GLLinePlotItem')
        self.flight_view.opts['distance'] = 40
        gx = gl.GLGridItem(color=pg.mkColor([255, 0, 0, 255]))
        gx.rotate(90, 0, 1, 0)
        gx.translate(-10, 0, 0)
        self.flight_view.addItem(gx)
        gy = gl.GLGridItem()
        gy.rotate(90, 1, 0, 0)
        gy.translate(0, -10, 0)
        self.flight_view.addItem(gy)
        gz = gl.GLGridItem()
        gz.translate(0, 0, -10)
        self.flight_view.addItem(gz)
        # self.flight_view.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
        # self.flight_view.show()
        self.flight_lines = []
        self.flight_pts = []
        self.bottom_hbox.addWidget(self.flight_view)

        ### set the layout
        self.layout.addLayout(self.images_hbox)
        # self.layout.addWidget(self.frame_slider)
        self.layout.addLayout(self.frame_hbox)
        self.layout.addLayout(self.bottom_hbox)

        ### now the markers
        self.num_cams = 2  #left and right
        self.num_markers = 9  #1-9
        self.num_frames = 1  #until we load a dir
        self.marker_keys = [str(i + 1) for i in range(self.num_markers)
                            ]  # keyboard strings for each marker
        self.shift_marker_keys = ['!', '@', '#', '$', '%', '^', '&', '*',
                                  '(']  # keyboard strings for each marker
        alpha = 200
        width = 4
        self.colors = [
            (255, 100, 100, alpha),  #red
            (100, 255, 100, alpha),  #green
            (60, 60, 255, alpha),  #blue
            (245, 245, 30, alpha),  #yellow
            (30, 245, 245, alpha),  #cyan
            (255, 0, 255, alpha),  #magenta
            (255, 195, 0, alpha),  #orange
            (150, 150, 255, alpha),  #indigo
            (215, 120, 255, alpha)
        ]  #purple
        # first the markers for the image
        self.data_markers = [[], []]
        for cam_ind in range(self.num_cams):
            for marker_ind in range(self.num_markers):
                # left
                data_marker = pg.PolyLineROI([[0., 0.]
                                              ])  #roi with only a single point
                data_marker.getHandles()[0].pen.setColor(
                    QtGui.QColor(*self.colors[marker_ind])
                )  #make each handle a different color
                data_marker.getHandles()[0].pen.setWidth(width)  #thicken lines
                data_marker.sigRegionChanged.connect(self.marker_moved)
                data_marker.hide()  #initially invisible
                self.data_markers[cam_ind].append(data_marker)
                if cam_ind == 0: self.iml.addItem(data_marker)
                if cam_ind == 1: self.imr.addItem(data_marker)
        self.data = n.zeros(
            (self.num_cams + 1, self.num_markers, 3, self.num_frames))
        self.data[:, :, :, :] = n.NaN
        # data positions interpolated
        self.null_interp = InterpolatedUnivariateSpline([0, 0], [n.NaN, n.NaN],
                                                        k=1)
        # self.data_interp = [[[self.null_interp]*2]*self.num_markers]*self.num_sides #[2 sides][num markers][x y] ##no t needed
        self.data_interp = [[[self.null_interp for xy in range(2)]
                             for m in range(self.num_markers)]
                            for s in range(self.num_cams)]
        # now the lines
        for marker_ind in range(self.num_markers):
            line = gl.GLLinePlotItem(pos=n.array([[0, 0, 0.], [1, 1, 1.]]),
                                     color=pg.glColor(self.colors[marker_ind]),
                                     width=2.,
                                     antialias=True)
            line.hide()
            self.flight_lines.append(line)
            self.flight_view.addItem(line)
            pt = gl.GLScatterPlotItem(pos=n.array([[0, 0, 0.]]),
                                      color=pg.glColor(
                                          self.colors[marker_ind]),
                                      size=10.)
            pt.hide()
            self.flight_pts.append(pt)
            self.flight_view.addItem(pt)
Example #30
0
def _make_splines(dx, y):
    if len(np.asarray(y).shape) > 1:
        return [_make_splines(dx, yy) for yy in y]
    else:
        return InterpolatedUnivariateSpline(np.arange(len(y)) * dx, y)
Example #31
0
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import UnivariateSpline, InterpolatedUnivariateSpline, LSQUnivariateSpline
from scipy import interpolate
import ipdb

N = 100 + 1
xx = np.arange(N) + (np.random.random(N) - 0.5)
xxx = np.linspace(-100, N, 30000)
w = np.ones(N) * 2
f = lambda x: 10 * np.sin(0.02 * x + np.random.random(1)) + 6 * np.cos(
    0.03 * x + np.random.random(1)) + 0.001 * (x - 500) * 2
yy = f(xx)

yk1 = InterpolatedUnivariateSpline(xx, yy, w=w, k=1)
yk2 = InterpolatedUnivariateSpline(xx, yy, w=w, k=2)
yk31 = InterpolatedUnivariateSpline(xx, yy, w=w, k=3)
yk3 = LSQUnivariateSpline(xx, yy, xx[2:-2], w=w, k=3)
yk311 = LSQUnivariateSpline(xx, yy, xx[1:-1], w=w, k=3)


def pp(xxx, obj, color="red", s=40):
    print("=======================")
    knots = obj.get_knots()
    xxx = np.sort(np.concatenate((xxx, knots)))
    plt.scatter(knots, obj(knots), color=color, s=s)
    plt.plot(xxx, obj(xxx), color=color)


def p123():
Example #32
0
def plot_CLASS_output(files,
                      x_axis,
                      y_axis,
                      ratio=False,
                      printing='',
                      output_name='',
                      extension='',
                      x_variable='',
                      scale='lin',
                      xlim=[],
                      ylim=[]):
    """
    Load the data to numpy arrays, write all the commands for plotting to a
    Python script for further refinment, and display them.

    Inspired heavily by the matlab version by Thomas Tram

    Parameters
    ----------
    files : list
        List of files to plot
    x-axis : string
        name of the column to use as the x coordinate
    y-axis : list, str
        List of items to plot, which should match the way they appear in the
        file, for instance: ['TT', 'BB]

    Keyword Arguments
    -----------------
    ratio : bool
        If set to yes, plots the ratio of the files, taking as a reference the
        first one
    output_name : str
        Specify a different name for the produced figure (by default, it takes
        the name of the first file, and replace the .dat by .pdf)
    extension : str

    """
    # Define the python script name, and the pdf path
    python_script_path = files[0] + '.py'
    pdf_path = files[0] + '.pdf'

    # The variable text will contain all the lines to be printed in the end to
    # the python script path, joined with newline characters. Beware of the
    # indentation.
    text = [
        'import matplotlib.pyplot as plt', 'import numpy as np',
        'import itertools', ''
    ]

    # Load all the graphs
    data = []
    for data_file in files:
        data.append(np.loadtxt(data_file))

    # Create the full_path_files list, that contains the absolute path, so that
    # the future python script can import them directly.
    full_path_files = [os.path.abspath(elem) for elem in files]

    text += ['files = %s' % full_path_files]
    text += [
        'data = []', 'for data_file in files:',
        '    data.append(np.loadtxt(data_file))'
    ]

    # Recover the base name of the files, everything before the .
    roots = [elem.split(os.path.sep)[-1].split('.')[0] for elem in files]
    text += ['roots = [%s]' % ', '.join(["'%s'" % root for root in roots])]

    # Create the figure and ax objects
    fig, ax = plt.subplots()
    text += ['', 'fig, ax = plt.subplots()']

    # if ratio is not set, then simply plot them all
    original_y_axis = y_axis
    legend = []
    if not ratio:
        for index, curve in enumerate(data):
            # Recover the number of columns in the first file, as well as their
            # title.
            num_columns, names, tex_names = extract_headers(files[index])

            # Check if everything is in order
            if num_columns == 2:
                y_axis = [names[1]]
            elif num_columns > 2:
                # in case y_axis was only a string, cast it to a list
                if isinstance(original_y_axis, str):
                    y_axis = [original_y_axis]
                else:
                    y_axis = original_y_axis

            # Store the selected text and tex_names to the script
            selected = []
            for elem in y_axis:
                selected.extend([
                    name for name in names
                    if name.find(elem) != -1 and name not in selected
                ])
            y_axis = selected

            text += ['y_axis = %s' % selected]
            text += [
                'tex_names = %s' % [
                    elem for (elem, name) in zip(tex_names, names)
                    if name in selected
                ]
            ]

            # Decide for the x_axis, by default the index will be set to zero
            x_index = 0
            if x_axis:
                for index_name, name in enumerate(names):
                    if name.find(x_axis) != -1:
                        x_index = index_name
                        break
            text += ["x_axis = '%s'" % tex_names[x_index]]

            # Store the limits variable in the text
            text += ["ylim = %s" % ylim]
            text += ["xlim = %s" % xlim]

            for selec in y_axis:
                index_selec = names.index(selec)
                plot_line = '    ax.'
                if scale == 'lin':
                    plot_line += 'plot(curve[:, %i], curve[:, %i])' % (
                        x_index, index_selec)
                    ax.plot(curve[:, x_index], curve[:, index_selec])
                elif scale == 'loglog':
                    plot_line += 'loglog(curve[:, %i], abs(curve[:, %i]))' % (
                        x_index, index_selec)
                    ax.loglog(curve[:, x_index], abs(curve[:, index_selec]))
                elif scale == 'loglin':
                    plot_line += 'semilogx(curve[:, %i], curve[:, %i])' % (
                        x_index, index_selec)
                    ax.semilogx(curve[:, x_index], curve[:, index_selec])
                elif scale == 'george':
                    plot_line += 'plot(curve[:, %i], curve[:, %i])' % (
                        x_index, index_selec)
                    ax.plot(curve[:, x_index], curve[:, index_selec])
                    ax.set_xscale('planck')
                text += [plot_line]

            legend.extend([roots[index] + ': ' + elem for elem in y_axis])

        ax.legend(legend, loc='best')
        text += [
            "", "ax.legend([root+': '+elem for (root, elem) in",
            "    itertools.product(roots, y_axis)], loc='best')", ""
        ]
    else:
        ref = data[0]
        num_columns, ref_curve_names, ref_tex_names = extract_headers(files[0])
        # Check if everything is in order
        if num_columns == 2:
            y_axis_ref = [ref_curve_names[1]]
        elif num_columns > 2:
            # in case y_axis was only a string, cast it to a list
            if isinstance(original_y_axis, str):
                y_axis_ref = [original_y_axis]
            else:
                y_axis_ref = original_y_axis

        # Store the selected text and tex_names to the script
        selected = []
        for elem in y_axis_ref:
            selected.extend([
                name for name in ref_curve_names
                if name.find(elem) != -1 and name not in selected
            ])
        y_axis_ref = selected

        # Decide for the x_axis, by default the index will be set to zero
        x_index_ref = 0
        if x_axis:
            for index_name, name in enumerate(ref_curve_names):
                if name.find(x_axis) != -1:
                    x_index_ref = index_name
                    break

        for idx in range(1, len(data)):
            current = data[idx]
            num_columns, names, tex_names = extract_headers(files[idx])

            # Check if everything is in order
            if num_columns == 2:
                y_axis = [names[1]]
            elif num_columns > 2:
                # in case y_axis was only a string, cast it to a list
                if isinstance(original_y_axis, str):
                    y_axis = [original_y_axis]
                else:
                    y_axis = original_y_axis

            # Store the selected text and tex_names to the script
            selected = []
            for elem in y_axis:
                selected.extend([
                    name for name in names
                    if name.find(elem) != -1 and name not in selected
                ])
            y_axis = selected

            text += ['y_axis = %s' % selected]
            text += [
                'tex_names = %s' % [
                    elem for (elem, name) in zip(tex_names, names)
                    if name in selected
                ]
            ]

            # Decide for the x_axis, by default the index will be set to zero
            x_index = 0
            if x_axis:
                for index_name, name in enumerate(names):
                    if name.find(x_axis) != -1:
                        x_index = index_name
                        break

            text += ["x_axis = '%s'" % tex_names[x_index]]
            for selec in y_axis:
                # Do the interpolation
                axis = ref[:, x_index_ref]
                reference = ref[:, ref_curve_names.index(selec)]
                #plt.loglog(current[:, x_index], current[:, names.index(selec)])
                # plt.show()
                # interpolated = splrep(current[:, x_index],
                # current[:, names.index(selec)])
                interpolated = InterpolatedUnivariateSpline(
                    current[:, x_index], current[:, names.index(selec)])
                if scale == 'lin':
                    # ax.plot(axis, splev(ref[:, x_index_ref],
                    # interpolated)/reference-1)
                    ax.plot(axis,
                            interpolated(ref[:, x_index_ref]) / reference - 1)
                elif scale == 'loglin':
                    # ax.semilogx(axis, splev(ref[:, x_index_ref],
                    # interpolated)/reference-1)
                    ax.semilogx(
                        axis,
                        interpolated(ref[:, x_index_ref]) / reference - 1)
                elif scale == 'loglog':
                    raise InputError("loglog plot is not available for ratios")

    if 'TT' in names:
        ax.set_xlabel('$\ell$', fontsize=16)
        text += ["ax.set_xlabel('$\ell$', fontsize=16)"]
    elif 'P' in names:
        ax.set_xlabel('$k$ [$h$/Mpc]', fontsize=16)
        text += ["ax.set_xlabel('$k$ [$h$/Mpc]', fontsize=16)"]
    else:
        ax.set_xlabel(tex_names[x_index], fontsize=16)
        text += ["ax.set_xlabel('%s', fontsize=16)" % tex_names[x_index]]
    if xlim:
        if len(xlim) > 1:
            ax.set_xlim(xlim)
            text += ["ax.set_xlim(xlim)"]
        else:
            ax.set_xlim(xlim[0])
            text += ["ax.set_xlim(xlim[0])"]
        ax.set_ylim()
        text += ["ax.set_ylim()"]
    if ylim:
        if len(ylim) > 1:
            ax.set_ylim(ylim)
            text += ["ax.set_ylim(ylim)"]
        else:
            ax.set_ylim(ylim[0])
            text += ["ax.set_ylim(ylim[0])"]
    text += ['plt.show()']
    plt.show()

    # If the use wants to print the figure to a file
    if printing:
        fig.savefig(printing)
        text += ["fig.savefig('%s')" % printing]

    # Write to the python file all the issued commands. You can then reproduce
    # the plot by running "python output/something_cl.dat.py"
    with open(python_script_path, 'w') as python_script:
        print('Creating a python script to reproduce the figure')
        print('--> stored in %s' % python_script_path)
        python_script.write('\n'.join(text))

    # If the use wants to print the figure to a file
    if printing:
        fig.savefig(printing)
Example #33
0
class VTC(object):
    
    bracket_size = 1000
    stop_error = 0.001
    max_iters = 10
    
    def __init__(self, csv_file, vin='Vin', vout='Vout'):
        self.x_values = []
        self.y_values = []
        self.traces = {}
        self.annotations = []
        
        self.csvfile = csv_file
        try:
            f = open(csv_file, 'r')
            self.spamreader = csv.DictReader(f, delimiter=' ')
        except TypeError:
            print("THERE WAS AN ERROR!")
            raise
        for row in self.spamreader:
            self.x_values.append(float(row[vin]))
            self.y_values.append(float(row[vout]))
        f.close()
        
        # getting a spline's derivative is much more accurate for finding
        # the VTC's characteristic parameters
        self.spl = InterpolatedUnivariateSpline(self.x_values, self.y_values)
        self.spline_1drv = self.spl.derivative()
        self.get_characteristic_parameters()
        
        self.traces['VTC'] = {
                'x': self.x_values, 
                'y': self.y_values, 
                'label': 'Voltage Transfer Characteristic',
                'class': 'main',
        }
    
    def find_vm(self, start=None, stop=None, bracket_size=1000,
                stop_error=0.001, max_iters=10):
        if not start:
            start = self.min_x
        if not stop:
            stop = self.max_x
        
        iter_count = 0
        smallest = numpy.nan_to_num(numpy.inf)
        smallest_at = 0
        while smallest > stop_error and iter_count < max_iters:
            q = numpy.linspace(start, stop, bracket_size)
            for (x, y) in zip(q, self.spl.__call__(q)):
                if abs(y-x) < smallest:
                    smallest = abs(y-x)
                    smallest_at = x
            length = float(stop - start)
            if not float(smallest_at - (length/4)) < start:
                start = float(smallest_at - (length/4))
            if not float(smallest_at + (length/4)) > stop:
                stop = float(smallest_at + (length/4))
            iter_count += 1
        return smallest_at
        
    
    def find_inflection_point(self, **iter_params):
        if not hasattr(self, 'spline_1drv'):
            self.spline_1drv = self.spl.derivative()
        
        p = self.defaults
        p.update(iter_params)
        start, stop = p['start'], p['stop']
                
        iter_count = 0
        biggest_y = 0
        biggest_x = 0
        while iter_count < p['max_iters']:
            q = numpy.linspace(start, stop, p['bracket_size'])
            for (dx, dy) in zip(q, self.spline_1drv.__call__(q)):
                if abs(dy) > biggest_y:
                    biggest_y = abs(dy)
                    biggest_x = dx
            length = float(stop - start)
            if not float(biggest_x - (length/4)) < start:
                start = float(biggest_x - (length/4))
            if not float(biggest_x + (length/4)) > stop:
                stop = float(biggest_x + (length/4))            
            iter_count += 1
        self.inflection_point = biggest_x
        return biggest_x
            
    def find_where_derivative_is(self, value, start=None,
                                 stop=None, bracket_size=1000,
                                 stop_error=0.001, max_iters=10):
        if not hasattr(self, 'spline_1drv'):
            self.spline_1drv = self.spl.derivative()
        if not start:
            start = self.min_x
        if not stop:
            stop = self.max_x

        iter_count = 0
        closest_y = numpy.inf
        closest_x = numpy.inf
        while abs(value - closest_y) > stop_error and iter_count < max_iters:
            q = numpy.linspace(start, stop, bracket_size)
            for (dx, dy) in zip(q, self.spline_1drv.__call__(q)):
                if abs(value - dy) < abs(value - closest_y):
                    closest_y = dy
                    closest_x = dx
            length = float(stop - start)
            if not float(closest_x - (length/4)) < start:
                start = float(closest_x - (length/4))
            if not float(closest_x + (length/4)) > stop:
                stop = float(closest_x + (length/4))
            iter_count += 1
        return closest_x
    
    def get_tangent_line_at(self, x):
        y = self.spl.__call__(x)
        m = self.spl.__call__(x, 1)
        b = y - (m * x)
        return y, m, b
    
    def make_tangent_line_at(self, x):
        # make tangent line occupy 1/6 of plot
        x_extend = self.range_x / 9
        y_extend = self.range_y / 9
        y, m, b = self.get_tangent_line_at(x)
        x1 = x - x_extend
        x2 = x + x_extend
        y1 = m*x1+b
        y2 = m*x2+b
        return x1, x2, y1, y2
        
    def get_characteristic_parameters(self):
        
        self.min_x = float(min(self.x_values))
        self.max_x = float(max(self.x_values))
        self.min_y = float(min(self.y_values))
        self.max_y = float(max(self.y_values))
        self.range_x = float(self.max_x - self.min_x)
        self.range_y = float(self.max_y - self.min_y)
        
        self.defaults = {
            'bracket_size': self.bracket_size,
            'stop_error': self.stop_error,
            'max_iters': self.max_iters,
            'start': self.min_x,
            'stop': self.max_x,
        }
        
        if not hasattr(self, 'inflection_point'):
            self.find_inflection_point()
        self.voh = self.max_y
        self.vih = self.find_where_derivative_is(-1, start=self.inflection_point)
        self.vm = self.find_vm()
        self.vil = self.find_where_derivative_is(-1, stop=self.inflection_point)
        self.vol = self.min_y
        self.nml = self.vil - self.vol
        self.nmh = self.voh - self.vih
        
#         print(self.vol, self.vil, self.vm, self.vih, self.voh, self.nml, self.nmh)
    
    def plot_ly(self, filename):
        offset = (self.max_y/20)
        line_style = Line(
            color='rgb(44, 160, 44)',
            opacity=0.25,
            dash='dot',
        )
        helper_line_style = {
            'line': line_style,
            'showlegend': False,
            'mode': 'lines',
            'connectgaps': True,
        }
        
        traces = []
        traces.append(Scatter(
                x=self.x_values,
                y=self.y_values,
        ))
        
        for (point, name) in {self.vil: 'V_IL', self.vih: 'V_IH'}.items():
            x1, x2, y1, y2 = self.make_tangent_line_at(point)
            traces.append(
                Scatter(
                    x=[x1, x2], 
                    y=[y1, y2], 
                    mode='lines',
                    name='tangent at dy/dx=-1',
                    showlegend=False,
                    connectgaps=True,
                    opacity=0.5,
                    line=Line(
                        color='#AAAAAA',
                    )
                )
            )
            traces.append(
                Scatter(
                    x=[point, point],
                    y=[self.min_y, self.spl(point)],
                    **helper_line_style
                )
            )
        
        for (point, name) in dict({self.vol: 'V_{OL}', self.voh: 'V_{OH}'}).items():
            traces.append(Scatter(
                    x=[self.min_x, self.max_x], 
                    y=[point, point],
                    name=name,
                    **helper_line_style
            ))
        
        traces.append(Scatter(
                x=[0, self.vm],
                y=[0, self.vm],
                mode='lines',
                name=['V_M'],
                line=line_style,
        ))
        
        data = Data(traces)
        
        annotations = []
        annotations.append(Annotation(x=self.max_x, xanchor='left', align='left', yanchor='top', y=self.vol, text='$V_{OL}$', showarrow=False))
        annotations.append(Annotation(x=self.vil, y=self.min_y, yanchor='top', text='$V_{IL}$', showarrow=False))
        annotations.append(Annotation(x=self.vm, y=self.vm, xanchor='left', align='left', text='$V_{M}$', showarrow=False))
        annotations.append(Annotation(x=self.vih, y=self.min_y, yanchor='top', text='$V_{IH}$', showarrow=False))
        annotations.append(Annotation(x=self.max_x, xanchor='left', align='left', y=self.voh, text='$V_{OH}$', showarrow=False))
        
        layout = Layout(
            title='Voltage Transfer Characteristic',
            xaxis=XAxis(title='$V_{in} \\left(\\text{V}\\right)$', showgrid=False),
            yaxis=YAxis(title='$V_{out} \\left(\\text{V}\\right)$', showgrid=False),
            annotations=Annotations(annotations),
            showlegend=False,
            autosize=False,
            width=500,
            height=500,
            margin=Margin(
                l=50,
                r=50,
                b=50,
                t=50,
            ),
        )
        fig = Figure(data=data, layout=layout)
        plot_url = py.plot(fig, filename=filename)
        
    def matplotlib(self, filename):
        import seaborn as sns
        sns.set_style('white')

        offset = (self.voh - self.vol)/50
        self.figure = plt.figure(facecolor='white', figsize=(3.5, 3.6))
        ax = plt.gca()
        ax.spines['top'].set_visible(False)
        ax.spines['right'].set_visible(False)
        ax.clip_on=False
        plt.tick_params(top='off', right='off')
        plt.locator_params('both', tight=True, nbins=4)
        # set text and line formatting stuff
        # plt.rc('text', usetex=True)
        plt.rc('lines', linewidth=1)
        plt.rc('font', size=12)
        main_plot = dict(linewidth=3, zorder=20)
        tangent_lines = dict(color='grey', linewidth=1)
        marker_lines = dict(color='grey', linewidth=1, linestyle='--')
        # 
        plt.xlabel(r"$V_{in}$")
        plt.ylabel(r"$V_{out}$")
        # plot the main VTC
        plt.plot(self.x_values, self.y_values, label='VTC', **main_plot)
        plt.plot([0, 1, self.vm], [0, 1, self.vm], **marker_lines)
        ax.annotate('$V_{M}$', xy=(self.vm, self.vm), xytext=(0, -8), textcoords='offset points',
                     horizontalalignment='left', verticalalignment='middle')


        for label, point in {r'$V_{OL}$': self.vol, r'$V_{IL}$': self.vil, r'$V_{IH}$': self.vih,
                      r'$V_{OH}$': self.voh}.items():
            x1, x2, y1, y2 = self.make_tangent_line_at(point)
            ax.plot([x1, x2], [y1, y2], **tangent_lines)
            ax.axvline(x=point, **marker_lines)
            # ax.annotate(label, xy=(point, 0), xytext=(0, -8), textcoords='offset points',
            #              horizontalalignment='center', verticalalignment='top')

        # ax.axvline(x=self.vil, **marker_lines)
        # ax.axvline(x=self.vih, **marker_lines)
        ax.annotate('$V_{OL}$', xy=(self.vol, 0), xytext=(0, -8), textcoords='offset points',
                     horizontalalignment='left', verticalalignment='top')
        ax.annotate('$V_{IL}$', xy=(self.vil, 0), xytext=(0, -8), textcoords='offset points',
                     horizontalalignment='center', verticalalignment='top')
        ax.annotate('$V_{IH}$', xy=(self.vih, 0), xytext=(0, -8), textcoords='offset points',
                     horizontalalignment='center', verticalalignment='top')
        ax.annotate('$V_{OH}$', xy=(self.voh, 0), xytext=(0, -8), textcoords='offset points',
                     horizontalalignment='center', verticalalignment='top')
        ax.annotate('', xy=(0, self.voh/1.5), xycoords='data',
                    xytext=(self.vil, self.voh/1.5), textcoords='data',
                    arrowprops=dict(arrowstyle="<->", ec="k",))
        ax.annotate('$NM_L$', xy=(self.vil/2, self.voh/1.5), xytext=(0, -5), textcoords='offset points',
                    horizontalalignment='center', verticalalignment='top')
        ax.annotate('', xy=(self.vih, self.voh/1.5), xycoords='data',
                    xytext=(self.voh, self.voh/1.5), textcoords='data',
                    arrowprops=dict(arrowstyle="<->", ec="k",))
        ax.annotate('$NM_H$', xy=((self.voh-self.vih)/2+self.vih, self.voh/1.5), xytext=(0, -5), textcoords='offset points',
                    horizontalalignment='center', verticalalignment='top')


        plt.tight_layout()

        ax.locator_params(axis='both', tight=True)
        ax.set_ylim(0-0.02)
        ax.set_xlim(0)
        self.figure.savefig(filename)
        
    
    def matplotly(self, filename):
        plot_url = py.plot_mpl(self.figure)
Example #34
0
x1 = np.linspace(1, N, N)
x2 = np.linspace(N + 13, 2 * N + 12, N)
x = np.hstack([x1, x2])

x_p = np.linspace(N + 1, N + 12, 12)


for i in range(N):
    userinput = sys.stdin.readline()
    words = userinput.split()
    print i, N + i
    Data[i] = int(words[1])
    Data[N + i] = Data[i]

ius = InterpolatedUnivariateSpline(x, Data)
# print x
# print x_p
rbf = Rbf(x, Data)
y_p = rbf(x_p)
for i in range(12):
    print int(y_p[i])
#plt.plot(x, Data)
# y = np.hstack([Data[0:60],y_p])
# y = np.hstack([y, Data[60:]])
# plt.plot(y)
# plt.show()
# z_p = rbf(x_p)
#print y_p

Example #35
0
def DataForSpline(LivetimeFile=None,
                    EAFile=None,
                    DispersonFile=None,
                    Mchi=None,
                    WindowEMIN=None,
                    WindowEMAX=None,
                    WhicArray='Front',
                    ):
                    

    #log of the DM mass
    y=np.log10(Mchi)

    
    #returns 1) an Array of the  The Weighted Exposure ie( livetime times EffectiveArea) for An average Healpix given the log of the DMMass ie y
    #as a function of instrument angle
    #2) the instrument angle
    WeightedEffArea,CosAveLTcube,TotalExposure=WeightedExposure(LivetimeFile=LivetimeFile,EAFile=EAFile,y=y)


    
    #dispersion
    #plotdifferentEsposuresFordifferntenergies(SplineEA=SplineEA,CosAveLTcube=CosAveLTcube,AveHealpix=AveHealpix)
    Norm,LS1,LS2,RS1,RS2,BIAS=SplinesforDispersionEq(DispersonFile)

    Dic={
        'Mchi':Mchi,
        'Norm':Norm,
        'LS1':LS1,
        'LS2':LS2,
        'RS1':RS1,
        'RS2':RS2,
        'Bias':BIAS,
        'WhicArray':WhicArray,
        }

    #returns an array of the Energy dispersion Given a range of observed energy (DeltaE) and a given instrument incination angle cth
    #passes a dictions of the elemments from the Dispersion function (Dic) and also the log of the DM mass (y)        
    def fsum(x=None,cth=None):
        return ED_array(DeltaE=x,CTheta=cth,Dic=Dic,y=y)
    
    x=np.linspace(WindowEMIN/3.,WindowEMAX*3,1600)
    fsumTheta=np.zeros(len(x))

    #average over the instrument angle weighted by the Effective Exposure as a function of instrument angle
    for i,cth in enumerate(CosAveLTcube):
        fsumTheta=fsumTheta+fsum(x=x,cth=cth)*WeightedEffArea[i]

    #normalize the Averaged dispersion function.
    fsumTheta=fsumTheta/np.trapz(y=fsumTheta,x=x)
    print 'integration over the dispersion function', np.trapz(y=fsumTheta,x=x)  
        
        
    #generate Pline of the averaged dispersion fucnito.
    Spline_fsumTheta=InterpolatedUnivariateSpline(x,fsumTheta)
    print 'integral of Spline', Spline_fsumTheta.integral(WindowEMIN/2.,WindowEMAX*2)

   
   
    
    return fsum,fsumTheta,Spline_fsumTheta,x,TotalExposure
from csv import writer
from random import uniform
from scipy.interpolate import InterpolatedUnivariateSpline

from commonpvs import *

# Reading from the graph at ./docs/PV%20Simulator%20Challenge.pdf
graph_points = [(0, 0), (1, 0), (2, 0), (3, 0), (4, 0), (6, 0.1), (8, 0.4),
                (12, 2.7), (14, 3.2), (16, 3), (20, 0.1), (21, 0), (22, 0),
                (23, 0), (24, 0)]

x = [i[0] * resolution
     for i in graph_points]  # time converted from hour to timestamp format
y = [i[1] * 1000 for i in graph_points]  # power converted from kW to W
interpolation_function = InterpolatedUnivariateSpline(x, y)


def simulator(timestamp, noise_multiplier=0.05):
    reading = interpolation_function(int(timestamp))
    noise = uniform(-1, 1) * (reading * noise_multiplier)
    return max(0, round(reading + noise, 2))  # cannot generate negative power


def process_data(chan, method, properties, body):
    timestamp, formatted_time, meter_value = str(body)[2:-1].split(
        ',')  # [2:-1] selects b'<.....>'
    meter_value = float(meter_value)
    pvs_value = simulator(timestamp)
    if int(timestamp) % resolution == 0:
        print(timestamp, formatted_time, meter_value, pvs_value)
    with open('output.csv', 'a') as file:
Example #37
0
class MassFunction(object):
    """Object representing a mass function for a given input cosmology.

    A MassFunction object can return a properly normalized halo abundance or
    halo bias as a function of halo mass or as a function of nu, as well as
    translate between mass and nu. Current definition is from Sheth & Torman

    Attributes:
        redshift: float redshift at which to compute the mass function
        cosmo_single_epoch: SingleEpoch cosmology object from cosmology.py
        halo_dict: dictionary of floats defining halo and mass function 
            parameters (see defualts.py for details)
    """
    def __init__(self, redshift=0.0, cosmo_single_epoch=None, 
                 halo_dict=None, **kws):
        self._redshift = redshift
        #self.cosmo = cosmology.SingleEpoch(self._redshift, cosmo_dict)
        if cosmo_single_epoch is None:
            cosmo_single_epoch = cosmology.SingleEpoch(self._redshift)
        self.cosmo = cosmo_single_epoch
        self.cosmo.set_redshift(self._redshift)
        self.delta_c = self.cosmo.delta_c()

        if halo_dict is None:
            halo_dict = defaults.default_halo_dict
        self.halo_dict = halo_dict

        self.delta_v = self.halo_dict['delta_v']
        if self.delta_v == -1:
            self.delta_v = self.cosmo.delta_v()
        self.stq = halo_dict["stq"]
        self.st_little_a = halo_dict["st_little_a"]
        self.c0 = halo_dict["c0"]/(1.0 + redshift)

        self._set_mass_limits()
        self._initialize_splines()
        self._normalize()
        
    def get_redshift(self):
        """
        Return the internal redshift valriable.
        """
        return self._redshift

    def set_redshift(self, redshift):
        """
        Reset mass function parameters at redshift.

        Args:
            redshift: float value of redshift
            cosmo_dict: dictionary of floats defining a cosmology (see
                defaults.py for details)
        """
        self._redshift = redshift

        self.cosmo.set_redshift(redshift)

        self.delta_c = self.cosmo.delta_c()
        self.c0 = self.halo_dict["c0"]/(1.0 + redshift)
        self.delta_v = self.halo_dict['delta_v']
        if self.delta_v == -1:
            self.delta_v = self.cosmo.delta_v()

        self._set_mass_limits()
        self._initialize_splines()
        self._normalize()
        
    def get_cosmology(self):
        """
        Return the internal cosmology dictionary.
        """
        return self.cosmo.get_cosmology()

    def set_cosmology(self, cosmo_dict, redshift = None):
        """
        Reset mass function parameters for cosmology cosmo_dict.

        Args:
            cosmo_dict: dictionary of floats defining a cosmology (see
                defaults.py for details)
            redshift: float value of redshift
        """
        if redshift is None:
            redshift = self._redshift
        self.cosmo.set_cosmology(cosmo_dict, redshift)

        self.delta_c = self.cosmo.delta_c()
        self.delta_v = self.halo_dict['delta_v']
        if self.delta_v == -1:
            self.delta_v = self.cosmo.delta_v()
        self.c0 = self.halo_dict["c0"]/(1.0 + redshift)

        self._set_mass_limits()
        self._initialize_splines()
        self._normalize()

    def set_cosmology_object(self, cosmo_single_epoch):
        self._redshift = cosmo_single_epoch.redshift()
        self.cosmo = cosmo_single_epoch

        self.delta_c = self.cosmo.delta_c()
        self.delta_v = self.halo_dict['delta_v']
        if self.delta_v == -1:
            self.delta_v = self.cosmo.delta_v()
        self.c0 = self.halo_dict["c0"]/(1.0 + self._redshift)

        self._set_mass_limits()
        self._initialize_splines()
        self._normalize()
        
    def get_halo(self):
        """
        Return the internal dictionary defining a halo.
        """
        return self.halo_dict

    def set_halo(self, halo_dict):
        """
        Reset mass function parameters for halo_dict.

        Args:
            halo_dict: dictionary of floats defining halos (see
                defaults.py for details)
        """
        self.halo_dict = halo_dict

        self.stq = self.halo_dict["stq"]
        self.st_little_a = self.halo_dict["st_little_a"]
        self.c0 = self.halo_dict["c0"]/(1.0 + self._redshift)
        self.delta_v = self.halo_dict['delta_v']
        if self.delta_v == -1:
            self.delta_v = self.cosmo.delta_v()

        self._normalize()

    def _set_mass_limits(self):
        mass_min = 1.0e9
        mass_max = 1.0e16
        if (defaults.default_limits["mass_min"] > 0 and 
            defaults.default_limits["mass_max"] > 0):
            self.ln_mass_min = numpy.log(defaults.default_limits["mass_min"])
            self.ln_mass_max = numpy.log(defaults.default_limits["mass_max"])
            self._ln_mass_array = numpy.linspace(
                self.ln_mass_min, self.ln_mass_max,
                defaults.default_precision["mass_npoints"])
            return None

        mass_limit_not_set = True
        while mass_limit_not_set:
            if 0.1*(1.0+0.05) < self.cosmo.nu_m(mass_min):
                #print "Min mass", mass_min,"too high..."
                mass_min = mass_min/1.05
                #print "\tSetting to",mass_min,"..."
                continue
            elif 0.1*(1.0-0.05) > self.cosmo.nu_m(mass_min):
                #print "Min mass", mass_min,"too low..."
                mass_min = mass_min*1.05
                #print "\tSetting to",mass_min,"..."
                continue
            if  50.0*(1.0-0.05) > self.cosmo.nu_m(mass_max):
                #print "Max mass", mass_max,"too low..."
                mass_max = mass_max*1.05
                #print "\tSetting to",mass_max,"..."
                continue
            elif 50.0*(1.0+0.05) < self.cosmo.nu_m(mass_max):
                #print "Max mass", mass_max,"too high..."
                mass_max = mass_max/1.05
                #print "\tSetting to",mass_max,"..."
                continue
            mass_limit_not_set = False

        #print "Mass Limits:",mass_min*(0.95),"-",mass_max*(1.05)

        self.ln_mass_min = numpy.log(mass_min)
        self.ln_mass_max = numpy.log(mass_max)

        self._ln_mass_array = numpy.linspace(
            self.ln_mass_min, self.ln_mass_max,
            defaults.default_precision["mass_npoints"])

    def _initialize_splines(self):
        self._nu_array = numpy.zeros_like(self._ln_mass_array)

        for idx in xrange(self._ln_mass_array.size):
            mass = numpy.exp(self._ln_mass_array[idx])
            self._nu_array[idx] = self.cosmo.nu_m(mass)

        self.nu_min = 1.001*self._nu_array[0]
        self.nu_max = 0.999*self._nu_array[-1]

        #print "nu_min:",self.nu_min,"nu_max:",self.nu_max

        self._nu_spline = InterpolatedUnivariateSpline(
            self._ln_mass_array, self._nu_array)
        self._ln_mass_spline = InterpolatedUnivariateSpline(
            self._nu_array, self._ln_mass_array)

        # Set M_star, the mass for which nu == 1
        self.m_star = self.mass(1.0)

    def _normalize(self):
        self.f_norm = 1.0
        norm = integrate.romberg(
            self.f_nu, self.nu_min, self.nu_max, vec_func=True,
            tol=defaults.default_precision["global_precision"],
            rtol=defaults.default_precision["mass_precision"],
            divmax=defaults.default_precision["divmax"])
        self.f_norm = 1.0/norm

        self.bias_norm = 1.0
        norm = integrate.romberg(
            lambda x: self.f_nu(x)*self.bias_nu(x),
            self.nu_min, self.nu_max, vec_func=True,
            tol=defaults.default_precision["global_precision"],
            rtol=defaults.default_precision["mass_precision"],
            divmax=defaults.default_precision["divmax"])
        self.bias_norm = 1.0/norm

    def f_nu(self, nu):
        """
        Halo mass function as a function of normalized mass over-density nu

        Args:
            nu: float array normalized mass over-density nu
        Returns:
            float array number of halos
        """
        nu_prime = nu*self.st_little_a
        return (
            self.f_norm*(1.0 + nu_prime**(-1.0*self.stq))*
            numpy.sqrt(nu_prime)*numpy.exp(-0.5*nu_prime)/nu)

    def f_m(self, mass):
        """
        Halo mass function as a function of halo mass

        Args:
            mass: float array halo mass
        Returns:
            float array number of halos
        """
        return self.f_nu(self.nu(mass))
    
    def dndm(self, mass):
        """
        Convenience function for computing the number of halos per mass.
        
        Args:
            mass: float value or array of halo mass in M_solar/h
        Returns:
            float value or array number of halos per mass per (Mpc/h)^3
            
        """
        try:
            _dndm = numpy.empty(len(mass))
            for idx, m in enumerate(mass):
                _dndm[idx] = 0.5*(self.cosmo.rho_bar()/(m*m)*
                                  self.f_m(m)*
                                  self._nu_spline.derivatives(numpy.log(m))[1])
            return _dndm
        except TypeError:
            return 0.5(self.cosmo.rho_bar()/(mass*mass)*
                       self.f_m(mass)*
                       self._nu_spline.derivatives(numpy.log(mass))[1])

    def bias_nu(self, nu):
        """
        Halo bias as a function of nu.

        Args:
            mass: float array mass over-density mu
        Returns:
            float array halo bias
        """
        nu_prime = nu*self.st_little_a
        return self.bias_norm*(
            1.0 + (nu_prime - 1.0)/self.delta_c +
            2.0*self.stq/(self.delta_c*(1.0 + nu_prime**self.stq)))
        
    def bias_m(self, mass):
        """
        Halo bias as a function of mass.

        Args:
            mass: float array halo mass
        Returns:
            float array halo bias
        """
        return self.bias_nu(self.nu(mass))

    def nu(self, mass):
        """
        nu as a function of halo mass.

        Args:
            nu: float array mass M [M_Solar]
        Returns:
            float array 
        """
        return self._nu_spline(numpy.log(mass))

    def ln_mass(self, nu):
        """
        Natural log of halo mass as a function of nu.

        Args:
            nu: float array normalized mass over-density
        Returns:
            float array natural log mass [M_Solar]
        """
        return self._ln_mass_spline(nu)

    def mass(self, nu):
        """
        Halo mass as a function of nu.

        Args:
            nu: float array normalized mass over-density
        Returns:
            float array halo mass [M_Solar]
        """
        return numpy.exp(self.ln_mass(nu))

    def write(self, output_file_name):
        """
        Write current mass function values

        Args:
            output_file_name: string file name to write mass function parameters
        """
        print "M* = 10^%1.4f M_sun" % numpy.log10(self.m_star)
        output_file = open(output_file_name, "w")
        output_file.write("#ttype1 = mass [M_solar/h]\n#ttype2 = nu\n"
                          "#ttype3 = f(nu)\n#ttype4 = bias(nu)\n")
        for ln_mass, nu, in zip(self._ln_mass_array, self._nu_array):
            output_file.write("%1.10f %1.10f %1.10f %1.10f\n" % (
                numpy.exp(ln_mass), nu, self.f_nu(nu), self.bias_nu(nu)))
        output_file.close()
Example #38
0
def main():
    """\
    Script for plotting ci^2 values from EVB simulations along an US
    reaction coordinate 
    """

    # Number of bins for 2D histogram, first US window, last US window
    # *** The spline function seems to do weird things when I have
    # asymmetric binning of r and ci^2.  I'd keep these the same for
    # now.
    spline = False
    # Perform boxcar averaging (or not)
    boxcar = True
    # Decide how to normalize the plot.  Either "Max" (normalize to the
    # maximum of the probability density) or "PDF" (construct the
    # probability density).
    norm = "Max"
    if spline:
        ncbins = 450
        nrbins = 450
    else:
        ncbins = 600
        nrbins = 900
    fbin = 0
    lbin = 36
    # Minimum and maximum distances along the reaction coordinate
    rcmin = 1.0
    rcmax = 10.0  # May need to adjust this!!!

    # Style of plotting errors on the PMF
    error_style = 'dashed'

    # Thickness of the frame
    mpl.rcParams['axes.linewidth'] = 1.25

    # Plot parameters (font, font size, use LaTeX)
    plt.rc('text', usetex=True)
    plt.rc('font', **{'family': 'serif', 'serif': ['Times'], 'size': 45})

    # Make a figure object and two subplots with the same (shared) x-axis
    #fig = plt.figure(figsize=(7, 7))
    fig = plt.figure(figsize=(6, 6))
    sub = fig.add_subplot(111)

    # Adjust graph scale (my widescreen monitor makes graphs that are
    # too large, so I reduce the dimensions)
    #fig.subplots_adjust(left=0.12, right=0.705, bottom=0.1, top=0.8)
    fig.subplots_adjust(left=0.12, right=0.588, bottom=0.1, top=0.8)

    # Axis labels
    sub.set_xlabel(r'RC (\AA)')
    sub.set_ylabel(r'c$_{\mathrm{max}}^2$')

    # Minor tick marks
    xminor = MultipleLocator(0.25)  # Reaction coordinate
    yminor = MultipleLocator(0.05)  # ci^2
    sub.xaxis.set_minor_locator(xminor)
    sub.yaxis.set_minor_locator(yminor)

    # Tick mark thickness
    sub.tick_params('both', length=5, width=1.25, which='major')
    sub.tick_params('both', length=2.5, width=1.25, which='minor')

    # x-axis limits
    sub.set_xlim([0.9, 10])
    sub.set_xticks([i + 1 for i in range(10)])

    # Bin along the RC and ci^2
    bin_r = np.linspace(rcmin, rcmax, nrbins)
    bin_ci2 = np.linspace(0.40, 1.0,
                          ncbins)  # Values are always bounded by 0 and 1

    # Store the 2D histogram
    hist2d = None

    # Collect data from the files storing the RC and ci values
    for i in range(fbin, lbin + 1):
        # Filenames as processed by the automate_ci2.sh script
        f = 'RCEC_CI2_BIN_' + str(i)
        # Collect the data from columns (column 0 is the timestep,
        # column 1 is the value of the RC, and column 2 is the
        # max ci^2 at that timestep).  We use NumPy's loadtxt which
        # is a fast way to collect data from columns.
        r, c = np.loadtxt(f, usecols=(1, 2), unpack=True)

        # Since we "throw away" the first 100 ps when creating the PMF
        # we also throw away the first 100 ps when generating the ci^2
        # distribution.
        if len(r) > 190001:
            # The final 100 ps of my simulations do weird things, so
            # I throw them out as well.
            r = r[10001:190001]
            c = c[10001:190001]
        else:
            r = r[10001:]
            c = c[10001:]

        # Make a 2D histogram (stored in H)
        H, bin_ci2, bin_r = np.histogram2d(c, r, bins=(bin_ci2, bin_r))

        # Since H is a NumPy array, the addition operation acts like
        # adding two matrices.  This allows us to avoid writing loops
        # ourselves.
        if hist2d == None:
            hist2d = H
        else:
            hist2d += H

    # Determine normalization factors
    normfac = [0] * len(hist2d[0])
    for i in range(len(hist2d)):
        for j in range(len(hist2d[i])):
            if norm == "Max":
                if hist2d[i][j] > normfac[j]:
                    normfac[j] = hist2d[i][j]
            elif norm == "PDF":
                normfac[j] += hist2d[i][j]

    # Remove the island pixels
    for i in range(len(hist2d)):
        # Zero the "island" pixels (those that are surrounded by white)
        for j in range(len(hist2d[i])):
            if i != 0 and i != len(hist2d) - 1:
                if j != 0 and j != len(hist2d[i]) - 1:
                    test = []
                    if hist2d[i - 1][j] == 0: test.append(True)
                    if hist2d[i + 1][j] == 0: test.append(True)
                    if hist2d[i][j - 1] == 0: test.append(True)
                    if hist2d[i][j + 1] == 0: test.append(True)
                    if len(test) == 4: hist2d[i][j] = 0
                    if len(test) == 3: hist2d[i][j] = 0
            elif i == 0:
                hist2d[i][j] = 0
            elif i == len(hist2d) - 1:
                hist2d[i][j] = 0
        # Do another pass to verify that the "island" pixels are zeroed,
        # since the above algorithm does not catch all pixels
        for j in range(len(hist2d[i])):
            # Zero the "island" pixels (those that are surrounded by white)
            if i != 0 and i != len(hist2d) - 1:
                if j != 0 and j != len(hist2d[i]) - 1:
                    test = []
                    if hist2d[i - 1][j] == 0: test.append(True)
                    if hist2d[i + 1][j] == 0: test.append(True)
                    if hist2d[i][j - 1] == 0: test.append(True)
                    if hist2d[i][j + 1] == 0: test.append(True)
                    if len(test) == 4: hist2d[i][j] = 0

    # Boxcar average the data
    if boxcar:
        hist2d_boxcar = smooth_data(hist2d)

    # Normalize the histogram
    for i in range(len(hist2d)):
        # Zero the "island" pixels (those that are surrounded by white)
        for j in range(len(hist2d[i])):
            if int(normfac[j]) != 0:
                hist2d[i][j] = hist2d[i][j] / normfac[j]

    # Mask where hist2d is zero
    if boxcar:
        histmasked = np.ma.masked_where(hist2d_boxcar == 0, hist2d)
    else:
        histmasked = np.ma.masked_where(hist2d == 0, hist2d)
    # 3D plot
    # The 'cmap' option controls the color of the 3D plot.  Other
    # options are given at the website (http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps)
    # or we could make our own.

    cdict = {
        'red': ((0.0, 0.0, 0.0), (0.02, 0.0, 0.0), (0.05, 0.0, 0.0),
                (0.2, 0.75, 0.75), (1.0, 1.0, 1.0)),
        'green': ((0.0, 0.0, 0.0), (0.02, 0.75, 0.75), (0.05, 1.0, 1.0),
                  (0.2, 0.75, 0.75), (1.0, 0.0, 0.0)),
        'blue': ((0.0, 1.0, 1.0), (0.02, 0.75, 0.75), (0.05, 0.0, 0.0),
                 (0.2, 0.0, 0.0), (1.0, 0.0, 0.0))
    }
    my_cmap = mpl.colors.LinearSegmentedColormap('my_colormap', cdict, 256)

    # Decide whether or not to spline the data
    if not spline:
        plt.pcolormesh(bin_r, bin_ci2, histmasked, cmap=my_cmap)
    else:
        # In order for the spline function to be happy, the dimensions of
        # the histogram (nrbins x ncbins) must match the dimensions of
        # the x- and y-variables.  The bin variables, bin_r and bin_ci2
        # have an extra value corresponding to one of the endpoints.
        # To get the right lengths of all arrays and not shift the
        # location of the features of ci^2, the midpoint of each bin is
        # taken.
        avg_r = [(bin_r[i] + bin_r[i - 1]) / 2 for i in range(1, nrbins)]
        avg_ci2 = [(bin_ci2[i] + bin_ci2[i - 1]) / 2 for i in range(1, ncbins)]
        # Use SciPy's function to spline the data
        #spl = RectBivariateSpline(avg_r, avg_ci2, histmasked, kx=3, ky=3)
        spl = RectBivariateSpline(avg_r, avg_ci2, hist2d, kx=3, ky=3)
        # For the splined data, we use more points than we did for
        # making the 2D histogram.
        nr = np.linspace(rcmin, rcmax, 600)
        nci2 = np.linspace(0.4, 1.0, 600)
        nh = spl(nr, nci2)
        # This part doesn't really work as well as we'd like.
        nhmasked = np.ma.masked_where(nh <= 8.0E-3, nh)
        nhmasked /= np.amax(nhmasked)
        plt.pcolormesh(nr, nci2, nhmasked, cmap=my_cmap)

    # This allows us to put a colorbar scale for the z-dimension
    # of the 2D plot.
    cbar = plt.colorbar(orientation='horizontal')

    # Make a second subplot
    sub2 = sub.twinx()
    sub2.set_xlim([0.9, 10])
    sub2.set_ylabel(r'Free Energy (kcal/mol)', rotation=270)
    y2minor = MultipleLocator(0.5)  # PMF (Free energy)
    sub2.yaxis.set_minor_locator(y2minor)
    sub2.set_ylim([-11, 1.0])
    sub2.set_yticks([-10, -8, -6, -4, -2, 0])

    # Tick mark thickness
    sub2.tick_params('both', length=5, width=1.25, which='major')
    sub2.tick_params('both', length=2.5, width=1.25, which='minor')

    # Collect and plot the PMF.  We need to do this second or the
    # line will be behind the 3D plot.
    fh = 'all.pmf.crt'
    r, pmf = np.loadtxt(fh, usecols=(0, 2), unpack=True)

    # Error bars
    fh = 'lst.pmf.crt'
    rl, pmf_l = np.loadtxt(fh, usecols=(0, 2), unpack=True)

    fh = 'frst.pmf.crt'
    rf, pmf_f = np.loadtxt(fh, usecols=(0, 2), unpack=True)

    # Spline data
    s = InterpolatedUnivariateSpline(r, pmf)
    rs = np.linspace(r[0], r[-1], 1000)
    pmfs = s(rs)

    sl = InterpolatedUnivariateSpline(rl, pmf_l)
    rls = np.linspace(rl[0], rl[-1], 1000)
    pmfls = sl(rls)

    sf = InterpolatedUnivariateSpline(rf, pmf_f)
    rfs = np.linspace(rf[0], rf[-1], 1000)
    pmffs = sf(rfs)

    if error_style == 'error_bar':
        # Use fewer points than used in the splines to do error bars
        reb = np.linspace(r[0], r[-1], 125)
        pmfeb = s(reb)
        pmfleb = sl(reb)
        pmffeb = sf(reb)

        # Error bars
        ebar = []
        ebar.append(pmfeb - pmfleb)
        ebar.append(pmffeb - pmfeb)

        sub2.errorbar(reb,
                      pmfeb,
                      yerr=ebar,
                      ecolor='k',
                      fmt=None,
                      elinewidth=3,
                      markeredgewidth=3)
    if error_style == 'shade':
        sub2.fill_between(
            rs,
            pmffs,
            pmfls,
            lw=0,
            facecolor='k',
            alpha=0.5,
        )
    sub2.plot(rs, pmfs, 'k', linewidth=3)
    if error_style == 'dashed':
        sub2.plot(rls[2:],
                  pmfls[2:],
                  'k',
                  linewidth=2,
                  linestyle='--',
                  dashes=(4, 3))
        sub2.plot(rfs[2:],
                  pmffs[2:],
                  'k',
                  linewidth=2,
                  linestyle='--',
                  dashes=(4, 3))

    #fig.tight_layout()
    plt.show()

    # Uncomment this line if you want to save a figure
    fig.savefig('pmf_2d_ci2histogram.png',
                dpi=300,
                transparent=True,
                format='png',
                bbox_inches='tight')
Example #39
0
    def evaluate_color_term(self, sources, solution_num=0):
        """
        Evaluate color term for a given astrometric solution, using the
        source data and reference catalog.

        Parameters
        ----------
        sources: SourceTable object
            Source catalog with plate magnitudes and external catalog
            (Gaia DR2) magnitudes
        solution_num: int
            Astrometric solution number

        """

        cat_mag1 = sources['gaiaedr3_bpmag'].data
        cat_mag2 = sources['gaiaedr3_rpmag'].data
        plate_mag = sources['mag_auto'].data
        mag_corr = sources['natmag_correction'].data
        mag_err = sources['magerr_auto'].data
        # Replace nans with numerical values
        mag_corr[np.isnan(mag_corr)] = 0.
        mag_err[np.isnan(mag_err)] = 1.
        num_calstars = len(sources)

        # Evaluate color term in 3 iterations

        self.log.write('Determining color term: {:d} stars'
                       ''.format(num_calstars),
                       double_newline=False,
                       level=4,
                       event=72,
                       solution_num=solution_num)

        if num_calstars < 10:
            self.log.write('Determining color term: too few stars!',
                           level=2,
                           event=72,
                           solution_num=solution_num)
            return None

        _, uind1 = np.unique(cat_mag1, return_index=True)
        plate_mag_u, uind2 = np.unique(plate_mag[uind1], return_index=True)
        cat_mag1_u = cat_mag1[uind1[uind2]]
        cat_mag2_u = cat_mag2[uind1[uind2]]
        mag_corr_u = mag_corr[uind1[uind2]]
        mag_err_u = mag_err[uind1[uind2]]

        # Discard faint sources (within 1 mag from the plate limit),
        # if the number of sources is larger than 100
        if len(plate_mag_u) > 100:
            diff_from_limit = 1.
        else:
            diff_from_limit = 0.

        kde = sm.nonparametric.KDEUnivariate(plate_mag_u.astype(np.double))
        kde.fit()
        ind_dense = np.where(kde.density > 0.2 * kde.density.max())[0]
        plate_mag_lim = kde.support[ind_dense[-1]]
        ind_nofaint = np.where(
            plate_mag_u < plate_mag_lim - diff_from_limit)[0]
        num_nofaint = len(ind_nofaint)

        self.log.write('Determining color term: {:d} stars after discarding '
                       'faint sources ({:.1f} mag from faint limit)'.format(
                           num_nofaint, diff_from_limit),
                       double_newline=False,
                       level=4,
                       event=72,
                       solution_num=solution_num)

        if num_nofaint < 10:
            self.log.write(
                'Determining color term: too few stars after '
                'discarding faint sources!',
                level=2,
                event=72,
                solution_num=solution_num)
            return None

        frac = 0.2

        if num_nofaint < 500:
            frac = 0.2 + 0.3 * (500 - num_nofaint) / 500.

        plate_mag_u = plate_mag_u[ind_nofaint]
        cat_mag1_u = cat_mag1_u[ind_nofaint]
        cat_mag2_u = cat_mag2_u[ind_nofaint]
        mag_corr_u = mag_corr_u[ind_nofaint]
        mag_err_u = mag_err_u[ind_nofaint]

        # Iteration 1
        cterm_list = np.arange(45) * 0.25 - 4.
        stdev_list = []

        for cterm in cterm_list:
            cat_mag = cat_mag2_u + cterm * (cat_mag1_u - cat_mag2_u)
            z = sm.nonparametric.lowess(cat_mag,
                                        plate_mag_u,
                                        frac=frac,
                                        it=0,
                                        delta=0.2,
                                        return_sorted=True)
            s = InterpolatedUnivariateSpline(z[:, 0], z[:, 1], k=1)
            mag_diff = cat_mag - s(plate_mag_u) - mag_corr_u
            stdev_val = (
                np.sqrt(np.sum((mag_diff / mag_err_u)**2) / len(mag_diff)) *
                np.sqrt(np.sum(mag_err_u**2) / len(mag_diff)))
            stdev_list.append(stdev_val)

            # Store cterm data
            self.phot_cterm_list.append(
                OrderedDict([('solution_num', solution_num), ('iteration', 1),
                             ('cterm', cterm), ('stdev', stdev_val),
                             ('num_stars', len(mag_diff))]))

        if max(stdev_list) < 0.01:
            self.log.write('Color term fit failed! '
                           '(iteration 1, num_stars = {:d}, '
                           'max_stdev = {:.3f})'.format(
                               len(mag_diff), max(stdev_list)),
                           level=2,
                           event=72,
                           solution_num=solution_num)
            return None

        # Fit curve to stdev_list and get the cterm_min value
        params, pcov = curve_fit(_abscurve, cterm_list, stdev_list)
        perr = np.sqrt(np.diag(pcov))
        cterm_min = params[0]

        self.log.write('Color term fit (iteration 1, num_stars = {:d}, '
                       'min_stdev = {:.3f}, max_stdev = {:.3f}): '
                       'parameters {:.4f} {:.4f} {:.4f} {:.4f}, '
                       'errors {:.4f} {:.4f} {:.4f} {:.4f}'.format(
                           len(mag_diff), min(stdev_list), max(stdev_list),
                           *params, *perr),
                       double_newline=False,
                       level=4,
                       event=72,
                       solution_num=solution_num)

        if cterm_min < -3 or cterm_min > 5:
            self.log.write('Color term outside of allowed range!',
                           level=2,
                           event=72,
                           solution_num=solution_num)
            return None

        # Eliminate outliers (over 1.5 mag + sigma clip)
        cat_mag = cat_mag2_u + cterm_min * (cat_mag1_u - cat_mag2_u)
        z = sm.nonparametric.lowess(cat_mag,
                                    plate_mag_u,
                                    frac=frac,
                                    it=3,
                                    delta=0.2,
                                    return_sorted=True)
        s = InterpolatedUnivariateSpline(z[:, 0], z[:, 1], k=1)
        mag_diff = cat_mag - s(plate_mag_u) - mag_corr_u
        ind1 = np.where(np.absolute(mag_diff) <= 1.5)[0]
        flt = sigma_clip(mag_diff[ind1], maxiters=None)
        ind_good1 = ~flt.mask
        ind_good = ind1[ind_good1]

        # Iteration 2
        cterm_list = np.arange(45) * 0.25 - 4.
        stdev_list = []

        frac = 0.2

        if len(ind_good) < 500:
            frac = 0.2 + 0.3 * (500 - len(ind_good)) / 500.

        for cterm in cterm_list:
            cat_mag = cat_mag2_u + cterm * (cat_mag1_u - cat_mag2_u)
            z = sm.nonparametric.lowess(cat_mag[ind_good],
                                        plate_mag_u[ind_good],
                                        frac=frac,
                                        it=0,
                                        delta=0.2,
                                        return_sorted=True)
            s = InterpolatedUnivariateSpline(z[:, 0], z[:, 1], k=1)
            mag_diff = (cat_mag[ind_good] - s(plate_mag_u[ind_good]) -
                        mag_corr_u[ind_good])
            stdev_val = (
                np.sqrt(
                    np.sum(
                        (mag_diff / mag_err_u[ind_good])**2) / len(mag_diff)) *
                np.sqrt(np.sum(mag_err_u[ind_good]**2) / len(mag_diff)))
            stdev_list.append(stdev_val)

            # Store cterm data
            self.phot_cterm_list.append(
                OrderedDict([('solution_num', solution_num), ('iteration', 2),
                             ('cterm', cterm), ('stdev', stdev_val),
                             ('num_stars', len(mag_diff))]))

        stdev_list = np.array(stdev_list)

        if max(stdev_list) < 0.01:
            self.log.write('Color term fit failed! '
                           '(iteration 2, num_stars = {:d}, '
                           'max_stdev = {:.3f})'.format(
                               len(mag_diff), max(stdev_list)),
                           level=2,
                           event=72,
                           solution_num=solution_num)
            return None

        # Fit curve to stdev_list and get the cterm_min value
        params, pcov = curve_fit(_abscurve, cterm_list, stdev_list)
        perr = np.sqrt(np.diag(pcov))
        cterm_min = params[0]
        cterm_min_err = perr[0]

        self.log.write('Color term fit (iteration 2, num_stars = {:d}, '
                       'min_stdev = {:.3f}, max_stdev = {:.3f}): '
                       'parameters {:.4f} {:.4f} {:.4f} {:.4f}, '
                       'errors {:.4f} {:.4f} {:.4f} {:.4f}'.format(
                           len(mag_diff), min(stdev_list), max(stdev_list),
                           *params, *perr),
                       double_newline=False,
                       level=4,
                       event=72,
                       solution_num=solution_num)

        if cterm_min < -3 or cterm_min > 5:
            self.log.write('Color term outside of allowed range!',
                           level=2,
                           event=72,
                           solution_num=solution_num)
            return None

        stdev_fit_iter2 = np.nan
        stdev_min_iter2 = np.min(stdev_list)
        cterm_minval_iter2 = np.min(cterm_list)
        cterm_maxval_iter2 = np.max(cterm_list)
        num_stars_iter2 = len(mag_diff)

        if params[1] < 0 or min(stdev_list) < 0.01:
            self.log.write('Color term fit failed! '
                           '(iteration 2, num_stars = {:d}, params[1] = {:f}, '
                           'min_stdev = {:.3f})'.format(
                               len(mag_diff), params[1], min(stdev_list)),
                           level=2,
                           event=72,
                           solution_num=solution_num)
            return None

        # Iteration 3
        cterm_list = (np.arange(41) * 0.02 + round(cterm_min * 50.) / 50. -
                      0.4)
        stdev_list = []

        for cterm in cterm_list:
            cat_mag = cat_mag2_u + cterm * (cat_mag1_u - cat_mag2_u)
            z = sm.nonparametric.lowess(cat_mag[ind_good],
                                        plate_mag_u[ind_good],
                                        frac=frac,
                                        it=0,
                                        delta=0.2,
                                        return_sorted=True)
            s = InterpolatedUnivariateSpline(z[:, 0], z[:, 1], k=1)
            mag_diff = (cat_mag[ind_good] - s(plate_mag_u[ind_good]) -
                        mag_corr_u[ind_good])
            stdev_val = (
                np.sqrt(
                    np.sum(
                        (mag_diff / mag_err_u[ind_good])**2) / len(mag_diff)) *
                np.sqrt(np.sum(mag_err_u[ind_good]**2) / len(mag_diff)))
            stdev_list.append(stdev_val)

            # Store cterm data
            self.phot_cterm_list.append(
                OrderedDict([('solution_num', solution_num), ('iteration', 3),
                             ('cterm', cterm), ('stdev', stdev_val),
                             ('num_stars', len(mag_diff))]))

        stdev_list = np.array(stdev_list)

        cf, cov = np.polyfit(cterm_list,
                             stdev_list,
                             2,
                             w=1. / stdev_list**2,
                             cov=True)
        cterm = -0.5 * cf[1] / cf[0]
        cf_err = np.sqrt(np.diag(cov))
        cterm_err = np.sqrt((-0.5 * cf_err[1] / cf[0])**2 +
                            (0.5 * cf[1] * cf_err[0] / cf[0]**2)**2)
        p2 = np.poly1d(cf)
        stdev_fit = p2(cterm)
        stdev_min = np.min(stdev_list)
        cterm_minval = np.min(cterm_list)
        cterm_maxval = np.max(cterm_list)
        num_stars = len(mag_diff)
        iteration = 3

        self.log.write('Color term fit (iteration 3, num_stars = {:d}, '
                       'min_stdev = {:.3f}, max_stdev = {:.3f}, '
                       'min_cterm = {:.3f}, max_cterm = {:.3f}): '
                       'parameters {:.4f} {:.4f} {:.4f}, '
                       'errors {:.4f} {:.4f} {:.4f}'.format(
                           num_stars, min(stdev_list), max(stdev_list),
                           cterm_minval, cterm_maxval, *cf, *cf_err),
                       double_newline=False,
                       level=4,
                       event=72,
                       solution_num=solution_num)

        if cf[0] < 0 or cterm < -3 or cterm > 5:
            if cf[0] < 0:
                self.log.write('Color term fit not reliable!',
                               level=2,
                               event=72,
                               solution_num=solution_num)
            else:
                self.log.write('Color term outside of allowed range '
                               '({:.3f})!'.format(cterm),
                               level=2,
                               event=72,
                               solution_num=solution_num)

            if cterm_min < -3 or cterm_min > 5:
                self.log.write('Color term from previous iteration '
                               'outside of allowed range ({:.3f})!'
                               ''.format(cterm_min),
                               level=2,
                               event=72,
                               solution_num=solution_num)
                return None
            else:
                cterm = cterm_min
                cterm_err = cterm_min_err
                stdev_fit = stdev_fit_iter2
                stdev_min = stdev_min_iter2
                cterm_minval = cterm_minval_iter2
                cterm_maxval = cterm_maxval_iter2
                num_stars = num_stars_iter2
                iteration = 2

            self.log.write('Taking color term from previous iteration',
                           level=4,
                           event=72,
                           solution_num=solution_num)

        # Create dictionary for calibration results, if not exists
        if self.phot_calib is None:
            self.phot_calib = OrderedDict()
            self.phot_calib['solution_num'] = solution_num
            self.phot_calib['iteration'] = 0

        # Store color term result
        self.phot_calib['color_term'] = cterm
        self.phot_calib['color_term_error'] = cterm_err
        self.phot_calib['cterm_stdev_fit'] = stdev_fit
        self.phot_calib['cterm_stdev_min'] = stdev_min
        self.phot_calib['cterm_range_min'] = cterm_minval
        self.phot_calib['cterm_range_max'] = cterm_maxval
        self.phot_calib['cterm_iterations'] = iteration
        self.phot_calib['cterm_num_stars'] = num_stars

        self.log.write(
            'Plate color term (solution {:d}): {:.3f} ({:.3f})'.format(
                solution_num, cterm, cterm_err),
            level=4,
            event=72,
            solution_num=solution_num)
Example #40
0
def get_emission_delay_BB(params,
                          kmax=100,
                          lmax=3000,
                          non_linear=True,
                          CMB_unit='muK',
                          raw_cl=False,
                          acc=1,
                          lsamp=None,
                          return_terms=False,
                          include_reionization=True):
    r"""
    Get B modes from emission angle and time delay effects.
    Uses full-sky result from appendix of `arXiv:1706.02673 <https://arxiv.org/abs/1706.02673>`_

    :param params: :class:`.model.CAMBparams` instance with cosmological parameters etc.
    :param kmax: maximum k (in :math:`{\rm Mpc}^{-1}` units)
    :param lmax: maximum :math:`\ell`
    :param non_linear: include non-linear corrections
    :param CMB_unit: normalization for the result
    :param raw_cl: if true return :math:`C_\ell`, else :math:`\ell(\ell+1)C_\ell/2\pi`
    :param acc: accuracy setting, increase to test stability
    :param lsamp: array of :math:`\ell` values to compute output at. If not set, set to sampling good for interpolation
    :param return_terms: return the three sub-terms separately rather than the total
    :param include_reionization: approximately include reionization terms by second scattering surface
    :return: InterpolatedUnivariateSpline for :math:`C_\ell^{BB}`
    """

    from scipy.interpolate import InterpolatedUnivariateSpline

    assert (np.isclose(params.omk, 0))
    camb_background = isitgr.get_background(params)
    chi_source = camb_background.tau0 - camb_background.tau_maxvis
    z_source = camb_background.redshift_at_comoving_radial_distance(chi_source)

    PK = isitgr.get_matter_power_interpolator(params,
                                              nonlinear=non_linear,
                                              hubble_units=False,
                                              k_hunit=False,
                                              kmax=kmax,
                                              var1=model.Transfer_Weyl,
                                              var2=model.Transfer_Weyl,
                                              zmax=z_source)

    assert (lmax > 250)
    lsampvelcl = np.hstack((np.arange(2, 20, 2), np.arange(25, 200, 20 // acc),
                            np.arange(220, lmax, 40 // acc)))

    lmax_e = max(1500, lmax * 2)
    pars = params.copy()
    pars.set_for_lmax(lmax_e, lens_potential_accuracy=1)
    cmb = get_source_cmb_cl(pars, CMB_unit=CMB_unit)

    totautoB = np.zeros(lsampvelcl.shape)
    totBEterm = np.zeros(lsampvelcl.shape)
    totBxterm = np.zeros(lsampvelcl.shape)

    for reion in [False, True]:
        if reion:
            if not include_reionization:
                break
            zreion = params.get_zre()
            chi_source = camb_background.tau0 - camb_background.conformal_time(
                zreion)
            lmax_e = 300
            tag_E = 'E2'
            tag_zeta = 'emit2'
            lstep = 1
        else:
            tag_E = 'E1'
            tag_zeta = 'emit1'
            lstep = 5

        cl_psi_d_sp, cl_psi_d_x_lens_sp = get_emission_angle_powers(
            camb_background, PK, chi_source, lmax_e, acc, lsamp)

        lsarr = np.arange(2, lmax_e + 1, dtype=np.float64)
        llp1 = lsarr * (lsarr + 1.)
        cdd = cl_psi_d_sp(lsarr) * (lsarr + 2) * (lsarr - 1) * (2 * lsarr + 1)
        cd = cl_psi_d_sp(lsarr) / llp1 * (2 * lsarr + 1)
        cxdd = cl_psi_d_x_lens_sp(lsarr) * (2 * lsarr + 1)
        cxd = cxdd / llp1
        cEE = cmb['%sx%s' % (tag_E, tag_E)][2:lmax_e + 1] / llp1 * (
            2 * lsarr + 1)  # raw CL
        cEx = cmb['%sx%s' % (tag_E, tag_zeta)][2:lmax_e + 1] * (2 * lsarr + 1)
        cExx = cEx / llp1
        czeta = cmb['%sx%s' % (tag_zeta, tag_zeta)][2:lmax_e +
                                                    1] / llp1 * (2 * lsarr + 1)

        for i, ll in enumerate(lsampvelcl):
            if reion and ll > lmax_e:
                break
            for llp in range(2, lmax_e, lstep):
                lp = np.float64(llp)
                wig = threej(llp, ll, 2, -2)
                minl = np.abs(llp - ll)
                if minl < 2:
                    wig = wig[2 - minl:]

                wigx = threej(llp, ll, 0, -2)
                minl = max(2, np.abs(llp - ll))
                maxl = min(lmax_e, np.abs(llp + ll))
                off = 0
                if (minl + llp + ll) % 2 == 0:
                    off = 1
                wig2 = wig[off:maxl - minl + 1:2]**2
                totautoB[i] += lstep * np.dot(
                    wig2, czeta[minl + off - 2:maxl + 1 - 2:2]) * cdd[llp - 2]
                totBEterm[i] += lstep * np.dot(
                    wig2, cd[minl + off - 2:maxl + 1 - 2:2] -
                    cxdd[minl + off - 2:maxl + 1 - 2:2] - (lp * (lp + 1) - ll *
                                                           (ll + 1)) *
                    cxd[minl + off - 2:maxl + 1 - 2:2]) * cEE[llp - 2]
                wigx2 = wigx[off:maxl - minl + 1:2] * wig[off:maxl - minl +
                                                          1:2]
                totBxterm[i] += lstep * (
                    np.dot(wigx2, cExx[minl + off - 2:maxl + 1 - 2:2]) *
                    (2 * cd[llp - 2] - ((lp * (lp + 1) - ll *
                                         (ll + 1)) * cxd[llp - 2])) -
                    np.dot(wigx2, cEx[minl + off - 2:maxl + 1 - 2:2]) *
                    cxd[llp - 2]) * np.sqrt(lp * (lp + 1) * (lp + 2) *
                                            (lp - 1))

    fac = 1 / 2.  # (4 * np.pi)  [CMB CL already have 1/2pi in ]
    if not raw_cl:
        fac *= lsampvelcl * (lsampvelcl + 1) / (2 * np.pi)
    totautoB *= fac
    totBEterm *= fac
    totBxterm *= fac
    if return_terms:
        return InterpolatedUnivariateSpline(lsampvelcl, totautoB), \
               InterpolatedUnivariateSpline(lsampvelcl, totBEterm), \
               InterpolatedUnivariateSpline(lsampvelcl, totBxterm)
    else:
        return InterpolatedUnivariateSpline(lsampvelcl,
                                            totBxterm + totBEterm + totautoB)
Example #41
0
 def __init__(self, x, y, comment=''):
     IU_Spline.__init__(self, x, y)
     Component.__init__(self, self.get_c(), comment)
    def __init__(self,
                 xi_r_filename,
                 delta_r_filename,
                 xi_smu_filename,
                 covmat_filename=None,
                 sv_filename=None,
                 vr_filename=None,
                 full_fit=1,
                 smin=0,
                 smax=150,
                 model=1,
                 const_sv=0,
                 model_as_truth=0,
                 Omega_m0=0.285,
                 s8=0.828,
                 eff_z=0.57,
                 vr_coupling='spherical'):

        self.xi_r_filename = xi_r_filename
        self.delta_r_filename = delta_r_filename
        self.sv_filename = sv_filename
        self.vr_filename = vr_filename
        self.xi_smu_filename = xi_smu_filename
        self.covmat_filename = covmat_filename
        self.smin = smin
        self.smax = smax
        self.const_sv = const_sv
        self.model = model
        self.model_as_truth = model_as_truth
        self.vr_coupling = vr_coupling

        # full fit (monopole + quadrupole)
        self.full_fit = bool(full_fit)

        print("Setting up redshift-space distortions model.")

        # cosmology for Minerva
        self.Omega_m0 = Omega_m0
        self.s8 = s8
        self.cosmo = Cosmology(om_m=self.Omega_m0)
        self.nmocks = 299  # hardcoded for Minerva

        self.eff_z = eff_z
        self.dA = self.cosmo.get_angular_diameter_distance(self.eff_z)

        self.growth = self.cosmo.get_growth(self.eff_z)
        self.f = self.cosmo.get_f(self.eff_z)
        self.b = 2.01
        self.beta = self.f / self.b
        self.s8norm = self.s8 * self.growth

        eofz = np.sqrt(
            (self.Omega_m0 * (1 + self.eff_z)**3 + 1 - self.Omega_m0))
        self.iaH = (1 + self.eff_z) / (100. * eofz)

        # set this to true if you want to test the
        # performance of the model using the
        # measured radial velocities
        if self.vr_coupling == 'true':
            print('Using true radial velocity profile.')
        elif self.vr_coupling == 'spherical':
            print('Calculating peculiar velocities from spherical collapse.')
        else:
            sys.exit('Velocity-to-density coupling not recognized.')

        # read real-space galaxy monopole
        data = np.genfromtxt(self.xi_r_filename)
        self.r_for_xi = data[:, 0]
        xi_r = data[:, 1]
        self.xi_r = InterpolatedUnivariateSpline(self.r_for_xi,
                                                 xi_r,
                                                 k=3,
                                                 ext=0)

        int_xi_r = np.zeros_like(self.r_for_xi)
        dr = np.diff(self.r_for_xi)[0]
        for i in range(len(int_xi_r)):
            int_xi_r[i] = 1. / (self.r_for_xi[i] + dr / 2)**3 * (np.sum(
                xi_r[:i + 1] * ((self.r_for_xi[:i + 1] + dr / 2)**3 -
                                (self.r_for_xi[:i + 1] - dr / 2)**3)))
        self.int_xi_r = InterpolatedUnivariateSpline(self.r_for_xi,
                                                     int_xi_r,
                                                     k=3,
                                                     ext=0)

        # read void-matter correlation function
        data = np.genfromtxt(self.delta_r_filename)
        self.r_for_delta = data[:, 0]
        delta_r = data[:, -2]

        Delta_r = np.zeros_like(self.r_for_delta)
        dr = np.diff(self.r_for_delta)[0]
        for i in range(len(Delta_r)):
            Delta_r[i] = 1. / (self.r_for_delta[i] + dr / 2)**3 * (np.sum(
                delta_r[:i + 1] * ((self.r_for_delta[:i + 1] + dr / 2)**3 -
                                   (self.r_for_delta[:i + 1] - dr / 2)**3)))
        self.Delta_r = InterpolatedUnivariateSpline(self.r_for_delta,
                                                    Delta_r,
                                                    k=3,
                                                    ext=3)
        self.delta_r = InterpolatedUnivariateSpline(self.r_for_delta,
                                                    delta_r,
                                                    k=3,
                                                    ext=3)

        # read los velocity dispersion profile
        self.r_for_v, self.mu_for_v, sv = Utilities.ReadData_TwoDims(
            self.sv_filename)
        self.sv_converge = sv[-1, -1]
        sv /= self.sv_converge  # normalize velocity dispersion
        self.sv = RectBivariateSpline(self.r_for_v, self.mu_for_v, sv)

        if self.vr_coupling == 'true':
            # read radial velocity profile
            data = np.genfromtxt(self.vr_filename)
            self.r_for_v = data[:, 0]
            vr = data[:, 1]
            self.vr = InterpolatedUnivariateSpline(self.r_for_v,
                                                   vr,
                                                   k=3,
                                                   ext=0)

        # read redshift-space correlation function
        self.s_for_xi, self.mu_for_xi, self.xi_smu = Utilities.ReadData_TwoDims(
            self.xi_smu_filename)

        if self.model_as_truth:
            print('Using the model prediction as the measurement.')
            sigma_v = self.sv_converge
            alpha = 1.0
            epsilon = 1.0
            alpha_para = alpha * epsilon**(-2 / 3)
            alpha_perp = epsilon * alpha_para

            self.xi0_s, self.xi2_s, self.xi4_s = self.multipoles_theory(
                self.Omega_m0, sigma_v, alpha_perp, alpha_para, self.s_for_xi,
                self.mu_for_xi)
        else:
            s, self.xi0_s = Utilities.getMultipole(0, self.s_for_xi,
                                                   self.mu_for_xi, self.xi_smu)
            s, self.xi2_s = Utilities.getMultipole(2, self.s_for_xi,
                                                   self.mu_for_xi, self.xi_smu)
            s, self.xi4_s = Utilities.getMultipole(4, self.s_for_xi,
                                                   self.mu_for_xi, self.xi_smu)

        # read covariance matrix
        if os.path.isfile(self.covmat_filename):
            print('Reading covariance matrix: ' + self.covmat_filename)
            self.cov = np.load(self.covmat_filename)
            self.icov = np.linalg.inv(self.cov)
        else:
            sys.exit('Covariance matrix not found.')

        # restrict measured vectors to the desired fitting scales
        if (self.smax < self.s_for_xi.max()) or (self.smin >
                                                 self.s_for_xi.min()):

            scales = (self.s_for_xi >= self.smin) & (self.s_for_xi <=
                                                     self.smax)

            # truncate redshift-space data vectors
            self.s_for_xi = self.s_for_xi[scales]
            self.xi0_s = self.xi0_s[scales]
            self.xi2_s = self.xi2_s[scales]
            self.xi4_s = self.xi4_s[scales]

        # build data vector
        if self.full_fit:
            self.datavec = np.concatenate((self.xi0_s, self.xi2_s))
        else:
            self.datavec = self.xi2_s
Example #43
0
def combine_templates(temps,
                      As=None,
                      noise=None,
                      scaling=None,
                      vel=0,
                      unit='wl'):
    """
    make a Spectrum from a linear combination of template Spectrum objects
    
    temps should be a sequence of Spectrum objects, and As either None 
    (random weights) or a sequence equal to the length of temps
    
    if scaling is not None, it sets the maximum value of the resultant spectrum
    
    noise can be:
    * None/False/0: no noise
    * 'poisson##.##': poisson noise where ##.## is a factor to multiply the flux 
      by before scaling
    * a float:random noise of given amplitude
    
    returns newspec,As
    """
    from astropysics.spec import Spectrum
    from scipy.interpolate import InterpolatedUnivariateSpline

    if As is None:
        As = np.random.rand(len(temps))

    As = np.array(As, copy=False)
    if len(As) != len(temps):
        raise ValueError('As does not match templates size')

    xs = []
    fs = []
    for temp in temps:
        oldunit = temp.unit
        try:
            temp.unit = unit
            xs.append(temp.x)
            fs.append(temp.flux)
        finally:
            temp.unit = oldunit

    x0 = xs[0]
    for i, x in enumerate(xs):
        if np.any(x != x0):
            raise ValueError('Spectrum %i x does not match' % i)

    fx = (np.array(fs) * As.reshape((As.size, 1))).sum(axis=0)

    err = None
    if noise:
        if isinstance(noise, basestring):
            if 'poisson' in noise:
                pscale = noise.replace('poisson', '')
                if pscale.strip() == '':
                    pscale = 1
                else:
                    pscale = float(pscale)
                err = sqrt(pscale * fx) / pscale
                fx = np.random.poisson(pscale * fx) / pscale
            else:
                raise ValueError('noise parameter is an invalid string')
        else:
            noisescale = float(noise)
            fx += noisescale * np.random.randn(*fx.shape)
            err = noisescale

    if vel != 0:
        xnew = x0 * (1 + vel / 3e5)
        bad = (x0 > xnew.max()) | (x0 < xnew.min())

        s = InterpolatedUnivariateSpline(xnew, fx)
        fx = s(x0)
        fx[bad] = np.mean(fx[~bad])

        if err is not None:
            se = InterpolatedUnivariateSpline(xnew, err)
            err = se(x0)
            err[bad] = np.mean(err[~bad])

    return Spectrum(x=x0, flux=fx, unit=unit, err=err), As
    def multipoles_theory(self, Omega_m0, sigma_v, alpha_perp, alpha_para, s,
                          mu):
        '''
        RSD model that relies on spherical collapse model 
        to map from densities to velocities.
        '''

        monopole = np.zeros(len(s))
        quadrupole = np.zeros(len(s))
        hexadecapole = np.zeros(len(s))
        true_mu = np.zeros(len(mu))
        xi_model = np.zeros(len(mu))

        if self.vr_coupling == 'spherical':
            # set up parameters for spherical collapse
            Omega_L0 = 1 - Omega_m0
            zi = 999
            zf = self.eff_z
            z = np.linspace(zi, zf, 100)
            a = 1 / (1 + z)
            t = CosmologicalTime(zi, zf)

            # array of initial linear densities
            Delta_i = np.linspace(-0.01, 0.0025, 1000)

            # find solutions to spherical collapse ODE
            sol1 = []
            sol2 = []
            for dl in Delta_i:
                g0 = [1 - dl / 3, -dl / 3]
                sol = odeint(SphericalCollapse,
                             g0,
                             t,
                             args=(Omega_m0, Omega_L0))
                y = sol[:, 0]
                yprime = sol[:, 1]
                sol1.append(y[-1]**-3 - 1)
                sol2.append(yprime[-1])

            # find the initial Deltas that match the late ones
            interp_Delta = InterpolatedUnivariateSpline(sol1,
                                                        Delta_i,
                                                        k=3,
                                                        ext=0)
            matched_Delta_i = interp_Delta(self.Delta_r(self.r_for_delta))

            # find the peculiar velocities associated to late Deltas
            interp_vpec = InterpolatedUnivariateSpline(Delta_i,
                                                       sol2,
                                                       k=3,
                                                       ext=0)
            matched_vpec = interp_vpec(matched_Delta_i)

            # transform peculiar velocities to desired units
            H = Hubble(a=a[-1], Omega_m0=Omega_m0, Omega_L0=Omega_L0)
            q = self.r_for_delta * a[-1] * (
                1 + self.Delta_r(self.r_for_delta))**(1 / 3)
            vpec = matched_vpec * H * q
            dvpec = np.gradient(vpec, self.r_for_delta)
            self.vr = InterpolatedUnivariateSpline(self.r_for_delta,
                                                   vpec,
                                                   k=3,
                                                   ext=0)
            self.dvr = InterpolatedUnivariateSpline(self.r_for_delta,
                                                    dvpec,
                                                    k=3,
                                                    ext=0)

        # rescale input monopole functions to account for alpha values
        mus = np.linspace(0, 1., 80)
        r = self.r_for_delta
        rescaled_r = np.zeros_like(r)
        for i in range(len(r)):
            rescaled_r[i] = np.trapz(
                (r[i] * alpha_para) *
                np.sqrt(1. + (1. - mus**2) *
                        (alpha_perp**2 / alpha_para**2 - 1)), mus)

        x = rescaled_r
        y1 = self.xi_r(r)
        y2 = self.vr(r)
        y3 = self.dvr(r)
        y4 = self.sv(r)

        # build rescaled interpolating functions using the relabelled separation vectors
        rescaled_xi_r = InterpolatedUnivariateSpline(x, y1, k=3, ext=0)
        rescaled_vr = InterpolatedUnivariateSpline(x, y2, k=3, ext=0)
        rescaled_dvr = InterpolatedUnivariateSpline(x, y3, k=3, ext=0)
        rescaled_sv = InterpolatedUnivariateSpline(x, y4, k=3, ext=0)
        sigma_v = alpha_para * sigma_v

        for i in range(len(s)):
            for j in range(len(mu)):
                true_sperp = s[i] * np.sqrt(1 - mu[j]**2) * alpha_perp
                true_spar = s[i] * mu[j] * alpha_para
                true_s = np.sqrt(true_spar**2. + true_sperp**2.)
                true_mu[j] = true_spar / true_s

                # solve Eq. 7 from arXiv 1712.07575
                def residual(rpar):
                    rperp = true_sperp
                    r = np.sqrt(rpar**2 + rperp**2)
                    mu = rpar / r
                    res = rpar - true_spar + rescaled_vr(r) * mu * self.iaH
                    return res

                rpar = fsolve(func=residual, x0=true_spar)[0]

                sy_central = sigma_v * rescaled_sv(
                    np.sqrt(true_sperp**2 + rpar**2)) * self.iaH
                y = np.linspace(-5 * sy_central, 5 * sy_central, 200)

                rpary = rpar - y
                rr = np.sqrt(true_sperp**2 + rpary**2)
                sy = sigma_v * rescaled_sv(rr) * self.iaH

                integrand = (1 + rescaled_xi_r(rr)) * (1 + rescaled_vr(rr)/(rr/self.iaH) +\
                                                (rescaled_dvr(rr) - rescaled_vr(rr)/rr)*self.iaH * true_mu[j]**2)**(-1)

                integrand = integrand * np.exp(
                    -(y**2) / (2 * sy**2)) / (np.sqrt(2 * np.pi) * sy)

                xi_model[j] = np.trapz(integrand, y) - 1

            # build interpolating function for xi_smu at true_mu
            mufunc = InterpolatedUnivariateSpline(
                true_mu[np.argsort(true_mu)],
                xi_model[np.argsort(true_mu)],
                k=3,
                ext=0)

            if true_mu.min() < 0:
                mumin = -1
                factor = 2
            else:
                mumin = 0
                factor = 1

            # get multipoles
            xaxis = np.linspace(mumin, 1, 1000)

            ell = 0
            lmu = eval_legendre(ell, xaxis)
            yaxis = mufunc(xaxis) * (2 * ell + 1) / factor * lmu
            monopole[i] = simps(yaxis, xaxis)

            ell = 2
            lmu = eval_legendre(ell, xaxis)
            yaxis = mufunc(xaxis) * (2 * ell + 1) / factor * lmu
            quadrupole[i] = simps(yaxis, xaxis)

            ell = 4
            lmu = eval_legendre(ell, xaxis)
            yaxis = mufunc(xaxis) * (2 * ell + 1) / factor * lmu
            hexadecapole[i] = simps(yaxis, xaxis)

        return monopole, quadrupole, hexadecapole
Example #45
0
 def __init__(self, x, y, w=None, bbox=[None, None], k=3,
              xname=None, xunits=None, yname=None, yunits=None):
     """Constructor.
     """
     xUnivariateSplineBase.__init__(self, x, y, xname, xunits, yname, yunits)
     InterpolatedUnivariateSpline.__init__(self, self.x, self.y, w, bbox, k)
Example #46
0
############################################################
####################### EOS data ###########################
############################################################

# Load EOS from MUSIC (generated with https://github.com/j-f-paquet/eos_maker - SMASH branch )
#eos_location="./music/EOS/hotQCD/hrg_hotqcd_eos_binary.dat"
#eos_location="/home/jp401/Dropbox/work/my_papers/effective_visc_bjorken/results/bjorken_relax/music/EOS/hotQCD/hrg_hotqcd_eos_binary.dat"
eos_location = "./hrg_hotqcd_eos_binary.dat"

raw = np.fromfile(eos_location, dtype=(float, 4))

e = raw[:, 0]  # GeV/fm^3
p = raw[:, 1]  # GeV/fm^3
s = raw[:, 2]  # fm^-3
T = raw[:, 3]  # GeV

#cs2=CubicSpline(e, p)(e, nu=1)
cs2 = InterpolatedUnivariateSpline(e, p, k=1).derivative(n=1)(e)

#print(cs2)
#print(InterpolatedUnivariateSpline(e,p,k=1).derivative(n=1)(e))

# cs2_fct takes the temperature in fm^-1
cs2_qcd_fct = InterpolatedUnivariateSpline(T / hbarc, cs2)

#hotQCD=np.loadtxt("hotQCD-EOS.dat")
#
#for line in hotQCD:
#    print(line[0],line[16],cs2_fct(line[0]/1000))
Example #47
0
def lsd_multiorder(tmpl_wave,
                   tmpl_flux,
                   tmpl_e_flux,
                   tmpl_msk,
                   wave,
                   flux,
                   e_flux,
                   msk,
                   orders,
                   vl,
                   vh,
                   nv,
                   kreg,
                   emchop=True):
    zl = vl * 1000.0 / lfa.LIGHT
    zh = vh * 1000.0 / lfa.LIGHT

    zvals = numpy.linspace(zl, zh, nv)
    vels = zvals * lfa.LIGHT / 1000.0

    AA = numpy.zeros([nv, nv])
    bb = numpy.zeros([nv])

    for order in orders:
        # Extract order and clean.
        thistmpl_wave, thistmpl_flux, thistmpl_e_flux = prepord(
            order, tmpl_wave, tmpl_flux, tmpl_e_flux, tmpl_msk)
        thiswave, thisflux, thise_flux = prepord(order, wave, flux, e_flux,
                                                 msk)

        # Take off sky.
        ss = makesky(thistmpl_wave, thistmpl_flux, 4)

        thistmpl_flux /= ss
        thistmpl_e_flux /= ss

        ss = makesky(thiswave, thisflux, 4)

        thisflux /= ss
        thise_flux /= ss

        tmpl_ww = numpy.isfinite(thistmpl_flux)
        ww = numpy.isfinite(thisflux)

        if emchop:
            # Clip emission lines.
            medflux, sigflux = medsig(thistmpl_flux[tmpl_ww])
            tmpl_ww = numpy.logical_and(tmpl_ww,
                                        thistmpl_flux < medflux + 5 * sigflux)

            medflux, sigflux = medsig(thisflux[ww])
            ww = numpy.logical_and(ww, thisflux < medflux + 5 * sigflux)

        thistmpl_wave = thistmpl_wave[tmpl_ww]
        thistmpl_flux = thistmpl_flux[tmpl_ww]
        thistmpl_e_flux = thistmpl_e_flux[tmpl_ww]

        thiswave = thiswave[ww]
        thisflux = thisflux[ww]
        thise_flux = thise_flux[ww]

        # Figure out which pixels in are always in range.
        wanttmpl = thiswave - zl * thiswave

        inrangel = numpy.logical_and(wanttmpl >= thistmpl_wave[0],
                                     wanttmpl <= thistmpl_wave[-1])

        wanttmpl = thiswave - zh * thiswave

        inrangeh = numpy.logical_and(wanttmpl >= thistmpl_wave[0],
                                     wanttmpl <= thistmpl_wave[-1])

        inrange = numpy.logical_and(inrangel, inrangeh)

        # Restrict to that...
        thiswave = thiswave[inrange]
        thisflux = thisflux[inrange]
        thise_flux = thise_flux[inrange]

        #    plt.plot(thistmpl_wave, thistmpl_flux)
        #    plt.plot(thiswave, thisflux)
        #    plt.show()

        nwave = len(thiswave)

        # Form design matrix.
        A = numpy.empty([nwave, nv])

        # Interpolating spline.
        spl = InterpolatedUnivariateSpline(thistmpl_wave, thistmpl_flux, k=3)

        for iz, z in enumerate(zvals):
            wanttmpl = thiswave - z * thiswave

            interp_flux = spl(wanttmpl)

            A[:, iz] = interp_flux

        # Accumulate.
        AA += numpy.dot(A.transpose(), A)
        bb += numpy.dot(A.transpose(), thisflux)

    # Regularization.
    AA += kreg * numpy.identity(nv)  # need to calculate this constant properly

    prof, chisq, rank, s = numpy.linalg.lstsq(AA, bb, rcond=-1)

    return vels, prof
Example #48
0
def sigma_scale(cube, scaleX=False, scaleY=False, scaleZ=True, edgeX=0, edgeY=0, edgeZ=0, statistic="mad", fluxRange="all", method="global", windowSpatial=20, windowSpectral=20, gridSpatial=0, gridSpectral=0, interpolation="none"):
	# Print some informational messages
	err.message("    Generating noise-scaled data cube:")
	err.message("      Selecting " + str(method) + " noise measurement method.")
	
	if statistic == "mad": err.message("      Applying median absolute deviation to " + str(fluxRange) + " pixels.")
	if statistic == "std": err.message("      Applying standard deviation to " + str(fluxRange) + " pixels.")
	if statistic == "gauss": err.message("      Applying Gaussian fit to " + str(fluxRange) + " pixels.")
	if statistic == "negative": err.message("      Applying Gaussian fit to negative pixels.")
	
	# Check the dimensions of the cube (could be obtained from header information)
	dimensions = np.shape(cube)
	
	# LOCAL noise measurement within running window (slower and less memory-friendly)
	if method == "local":
		# Make window sizes integers >= 1
		windowSpatial = max(int(windowSpatial), 1)
		windowSpectral = max(int(windowSpectral), 1)
		
		# Ensure that window sizes are odd
		windowSpatial += (1 - windowSpatial % 2)
		windowSpectral += (1 - windowSpectral % 2)
		
		# Set grid sizes to half the window sizes if undefined
		if not gridSpatial: gridSpatial = windowSpatial // 2
		if not gridSpectral: gridSpectral = windowSpectral // 2
		
		# Make grid sizes integers >= 1
		gridSpatial = max(int(gridSpatial), 1)
		gridSpectral = max(int(gridSpectral), 1)
		
		# Ensure that grid sizes are odd
		gridSpatial += (1 - gridSpatial % 2)
		gridSpectral += (1 - gridSpectral % 2)
		
		# Print grid and window sizes adopted
		err.message("      Using grid size of [" + str(gridSpatial) + " pix , " + str(gridSpectral) + " chan ]")
		err.message("      and window size of [" + str(windowSpatial) + " pix , " + str(windowSpectral) + " chan ].")
		
		# Generate grid points to be used
		gridPointsZ = np.arange((dimensions[0] - gridSpectral * (int(math.ceil(float(dimensions[0]) / float(gridSpectral))) - 1)) // 2, dimensions[0], gridSpectral)
		gridPointsY = np.arange((dimensions[1] - gridSpatial  * (int(math.ceil(float(dimensions[1]) / float(gridSpatial)))  - 1)) // 2, dimensions[1], gridSpatial)
		gridPointsX = np.arange((dimensions[2] - gridSpatial  * (int(math.ceil(float(dimensions[2]) / float(gridSpatial)))  - 1)) // 2, dimensions[2], gridSpatial)
		
		# Divide grid and window sizes by 2 to get radii
		radiusGridSpatial = gridSpatial // 2
		radiusGridSpectral = gridSpectral // 2
		radiusWindowSpatial = windowSpatial // 2
		radiusWindowSpectral = windowSpectral // 2
		
		# Create empty cube (filled with NaN) to hold noise values
		rms_cube = np.full(cube.shape, np.nan, dtype=cube.dtype)
		
		# Determine RMS across window centred on grid cell
		for z in gridPointsZ:
			for y in gridPointsY:
				for x in gridPointsX:
					grid = (max(0, z - radiusGridSpectral), min(dimensions[0], z + radiusGridSpectral + 1), max(0, y - radiusGridSpatial), min(dimensions[1], y + radiusGridSpatial + 1), max(0, x - radiusGridSpatial), min(dimensions[2], x + radiusGridSpatial + 1))
					
					window = (max(0, z - radiusWindowSpectral), min(dimensions[0], z + radiusWindowSpectral + 1), max(0, y - radiusWindowSpatial), min(dimensions[1], y + radiusWindowSpatial + 1), max(0, x - radiusWindowSpatial), min(dimensions[2], x + radiusWindowSpatial + 1))
					
					if not np.all(np.isnan(cube[window[0]:window[1], window[2]:window[3], window[4]:window[5]])):
						if interpolation == "linear" or interpolation == "cubic":
							# Write value into grid point for later interpolation
							rms_cube[z, y, x] = GetRMS(cube[window[0]:window[1], window[2]:window[3], window[4]:window[5]], rmsMode=statistic, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=0)
						else:
							# Fill entire grid cell
							rms_cube[grid[0]:grid[1], grid[2]:grid[3], grid[4]:grid[5]] = GetRMS(cube[window[0]:window[1], window[2]:window[3], window[4]:window[5]], rmsMode=statistic, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=0)
					del grid, window
		
		# Carry out interpolation if requested, taking NaNs into account
		if interpolation == "linear" or interpolation == "cubic":
			err.message("      Interpolating in between grid points (" + str(interpolation) + ").")
			
			# First across each spatial plane
			if gridSpatial > 1:
				for z in gridPointsZ:
					for y in gridPointsY:
						data_values   = rms_cube[z, y, gridPointsX]
						not_nan = np.logical_not(np.isnan(data_values))
						if any(not_nan):
							interp_coords = np.arange(0, dimensions[2])
							if interpolation == "cubic":
								spline = InterpolatedUnivariateSpline(gridPointsX[not_nan], data_values[not_nan])
								rms_cube[z, y, 0:dimensions[2]] = spline(interp_coords)
								del spline
							else:
								interp_values = np.interp(interp_coords, gridPointsX[not_nan], data_values[not_nan])
								rms_cube[z, y, 0:dimensions[2]] = interp_values
								del interp_values
							del interp_coords
						del data_values, not_nan
					for x in range(dimensions[2]):
						data_values   = rms_cube[z, gridPointsY, x]
						not_nan = np.logical_not(np.isnan(data_values))
						if any(not_nan):
							interp_coords = np.arange(0, dimensions[1])
							if interpolation == "cubic":
								spline = InterpolatedUnivariateSpline(gridPointsY[not_nan], data_values[not_nan])
								rms_cube[z, 0:dimensions[1], x] = spline(interp_coords)
								del spline
							else:
								interp_values = np.interp(interp_coords, gridPointsY[not_nan], data_values[not_nan])
								rms_cube[z, 0:dimensions[1], x] = interp_values
								del interp_values
							del interp_coords
						del data_values, not_nan
					# Alternative option: 2-D spatial interpolation using SciPy's interp2d
					#from scipy.interpolate import interp2d
					#xx, yy = np.meshgrid(gridPointsX, gridPointsY)
					#data_values = rms_cube[z, yy, xx]
					#f = interp2d(gridPointsX, gridPointsY, data_values, kind="cubic")
					#interp_coords_x = np.arange(0, dimensions[2])
					#interp_coords_y = np.arange(0, dimensions[1])
					#rms_cube[z, :, :] = f(interp_coords_x, interp_coords_y)
			
			# Then along the spectral axis
			if gridSpectral > 1:
				for y in range(dimensions[1]):
					for x in range(dimensions[2]):
						data_values   = rms_cube[gridPointsZ, y, x]
						not_nan = np.logical_not(np.isnan(data_values))
						if any(not_nan):
							interp_coords = np.arange(0, dimensions[0])
							if interpolation == "cubic":
								spline = InterpolatedUnivariateSpline(gridPointsZ[not_nan], data_values[not_nan])
								rms_cube[0:dimensions[0], y, x] = spline(interp_coords)
								del spline
							else:
								interp_values = np.interp(interp_coords, gridPointsZ[not_nan], data_values[not_nan])
								rms_cube[0:dimensions[0], y, x] = interp_values
								del interp_values
							del interp_coords
						del data_values, not_nan
		
		# Replace any invalid RMS values with NaN
		with np.errstate(invalid="ignore"):
			rms_cube[rms_cube <= 0] = np.nan
		
		# Divide data cube by RMS cube
		cube /= rms_cube
		
		# Delete the RMS cube again to release its memory
		#del rms_cube
	
	# GLOBAL noise measurement on entire 2D plane (faster and more memory-friendly)
	elif method=='global':
		# Define the range over which statistics are calculated
		z1 = int(edgeZ)
		z2 = int(dimensions[0] - edgeZ)
		y1 = int(edgeY)
		y2 = int(dimensions[1] - edgeY)
		x1 = int(edgeX)
		x2 = int(dimensions[2] - edgeX)
		
		# Make sure edges don't exceed cube size
		err.ensure(z1 < z2 and y1 < y2 and x1 < x2, "Edge size exceeds cube size for at least one axis.")
		
		# Create empty cube (filled with 1) to hold noise values
		rms_cube = np.ones(cube.shape, dtype=cube.dtype)
		
		# Measure noise across 2D planes and scale cube accordingly
		if scaleZ:
			for i in range(dimensions[0]):
				if not np.all(np.isnan(cube[i, y1:y2, x1:x2])):
					rms = GetRMS(cube[i, y1:y2, x1:x2], rmsMode=statistic, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=0)
					if rms > 0:
						rms_cube[i, :, :] *= rms
						cube[i, :, :] /= rms
		
		if scaleY:
			for i in range(dimensions[1]):
				if not np.all(np.isnan(cube[z1:z2, i, x1:x2])):
					rms = GetRMS(cube[z1:z2, i, x1:x2], rmsMode=statistic, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=0)
					if rms > 0:
						rms_cube[:, i, :] *= rms
						cube[:, i, :] /= rms
		
		if scaleX:
			for i in range(dimensions[2]):
				if not np.all(np.isnan(cube[z1:z2, y1:y2, i])):
					rms = GetRMS(cube[z1:z2, y1:y2, i], rmsMode=statistic, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=0)
					if rms > 0:
						rms_cube[:, :, i] *= rms
						cube[:, :, i] /= rms

	# 1D2D noise measurement: first channel-by-channel, then map noise variations on the sky independent of frequency (so using all channels together)
	elif method=='1d2d':

		# Define the range over which statistics are calculated
		y1 = int(edgeY)
		y2 = int(dimensions[1] - edgeY)
		x1 = int(edgeX)
		x2 = int(dimensions[2] - edgeX)
		
		# Make sure edges don't exceed cube size
		err.ensure(y1 < y2 and x1 < x2, "Edge size exceeds cube size for at least one axis.")
		
		# Create empty all-channels/single-pixel cube (filled with 1) to hold noise values along Z axis
		rms_cube_z = np.ones((cube.shape[0],1,1), dtype=cube.dtype)
		
		# Measure noise across 2D z-planes
		err.message("      Mapping Z noise variation channel by channel")
		for i in range(dimensions[0]):
			if not np.all(np.isnan(cube[i, y1:y2, x1:x2])):
				rms = GetRMS(cube[i, y1:y2, x1:x2], rmsMode=statistic, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=0)
				if rms > 0:
					rms_cube_z[i, :, :] = rms

		# Divide data cube by Z RMS cube
		cube /= rms_cube_z

		# Measure noise across 3D XYZ subcubes each consisting of a 2D XY window and all Z channels

		# Make window sizes integers >= 1
		windowSpatial = max(int(windowSpatial), 1)
		windowSpectral = 2*cube.shape[0]
		
		# Ensure that window sizes are odd
		windowSpatial += (1 - windowSpatial % 2)
		windowSpectral += (1 - windowSpectral % 2)
		
		# Set grid sizes to half the window sizes if undefined
		if not gridSpatial: gridSpatial = windowSpatial // 2
		gridSpectral = windowSpectral // 2
		
		# Make grid sizes integers >= 1
		gridSpatial = max(int(gridSpatial), 1)
		gridSpectral = max(int(gridSpectral), 1)
		
		# Ensure that grid sizes are odd
		gridSpatial += (1 - gridSpatial % 2)
		gridSpectral += (1 - gridSpectral % 2)
		
		# Print grid and window sizes adopted
		err.message("      Mapping XY noise variation")
		err.message("        using grid size of " + str(gridSpatial) + "pix")
		err.message("        and window size of " + str(windowSpatial) + "pix")

		# Generate grid points to be used
		gridPointsZ = np.arange((dimensions[0] - gridSpectral * (int(math.ceil(float(dimensions[0]) / float(gridSpectral))) - 1)) // 2, dimensions[0], gridSpectral)
		gridPointsY = np.arange((dimensions[1] - gridSpatial  * (int(math.ceil(float(dimensions[1]) / float(gridSpatial)))  - 1)) // 2, dimensions[1], gridSpatial)
		gridPointsX = np.arange((dimensions[2] - gridSpatial  * (int(math.ceil(float(dimensions[2]) / float(gridSpatial)))  - 1)) // 2, dimensions[2], gridSpatial)

		# Divide grid and window sizes by 2 to get radii
		radiusGridSpatial = gridSpatial // 2
		radiusGridSpectral = gridSpectral // 2
		radiusWindowSpatial = windowSpatial // 2
		radiusWindowSpectral = windowSpectral // 2
		
		# Create empty cube (filled with NaN) to hold noise values
		rms_cube = np.full(cube.shape, np.nan, dtype=cube.dtype)
		
		# Determine RMS across window centred on grid cell
		for z in gridPointsZ:
			for y in gridPointsY:
				for x in gridPointsX:
					grid = (max(0, z - radiusGridSpectral), min(dimensions[0], z + radiusGridSpectral + 1), max(0, y - radiusGridSpatial), min(dimensions[1], y + radiusGridSpatial + 1), max(0, x - radiusGridSpatial), min(dimensions[2], x + radiusGridSpatial + 1))
					window = (max(0, z - radiusWindowSpectral), min(dimensions[0], z + radiusWindowSpectral + 1), max(0, y - radiusWindowSpatial), min(dimensions[1], y + radiusWindowSpatial + 1), max(0, x - radiusWindowSpatial), min(dimensions[2], x + radiusWindowSpatial + 1))
					if not np.all(np.isnan(cube[window[0]:window[1], window[2]:window[3], window[4]:window[5]])): # +1 needed?
						if interpolation == "linear" or interpolation == "cubic":
							# Write value into grid point for later interpolation
							rms_cube[z, y, x] = GetRMS(cube[window[0]:window[1], window[2]:window[3], window[4]:window[5]], rmsMode=statistic, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=0) # +1 needed?
						else:
							# Fill entire grid cell
							rms_cube[grid[0]:grid[1], grid[2]:grid[3], grid[4]:grid[5]] = GetRMS(cube[window[0]:window[1], window[2]:window[3], window[4]:window[5]], rmsMode=statistic, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=0) # +1 needed?
					del grid, window
		
		# Carry out interpolation if requested, taking NaNs into account
		if interpolation == "linear" or interpolation == "cubic":
			err.message("      Interpolating in between grid points (" + str(interpolation) + ").")
			
			# First across each spatial plane
			if gridSpatial > 1:
				for z in gridPointsZ:
					for y in gridPointsY:
						data_values   = rms_cube[z, y, gridPointsX]
						not_nan = np.logical_not(np.isnan(data_values))
						if any(not_nan):
							interp_coords = np.arange(0, dimensions[2])
							if interpolation == "cubic":
								spline = InterpolatedUnivariateSpline(gridPointsX[not_nan], data_values[not_nan])
								rms_cube[z, y, 0:dimensions[2]] = spline(interp_coords)
								del spline
							else:
								interp_values = np.interp(interp_coords, gridPointsX[not_nan], data_values[not_nan])
								rms_cube[z, y, 0:dimensions[2]] = interp_values
								del interp_values
							del interp_coords
						del data_values, not_nan
					for x in range(dimensions[2]):
						data_values   = rms_cube[z, gridPointsY, x]
						not_nan = np.logical_not(np.isnan(data_values))
						if any(not_nan):
							interp_coords = np.arange(0, dimensions[1])
							if interpolation == "cubic":
								spline = InterpolatedUnivariateSpline(gridPointsY[not_nan], data_values[not_nan])
								rms_cube[z, 0:dimensions[1], x] = spline(interp_coords)
								del spline
							else:
								interp_values = np.interp(interp_coords, gridPointsY[not_nan], data_values[not_nan])
								rms_cube[z, 0:dimensions[1], x] = interp_values
								del interp_values
							del interp_coords
						del data_values, not_nan
		
			# Then along the spectral axis
			if gridSpectral > 1:
				for y in range(dimensions[1]):
					for x in range(dimensions[2]):
						data_values   = rms_cube[gridPointsZ, y, x]
						not_nan = np.logical_not(np.isnan(data_values))
						if any(not_nan):
							interp_coords = np.arange(0, dimensions[0])
							if interpolation == "cubic":
								spline = InterpolatedUnivariateSpline(gridPointsZ[not_nan], data_values[not_nan])
								rms_cube[0:dimensions[0], y, x] = spline(interp_coords)
								del spline
							else:
								interp_values = np.interp(interp_coords, gridPointsZ[not_nan], data_values[not_nan])
								rms_cube[0:dimensions[0], y, x] = interp_values
								del interp_values
							del interp_coords
						del data_values, not_nan
						
		# Replace any invalid RMS values with NaN
		with np.errstate(invalid="ignore"):
			rms_cube[rms_cube <= 0] = np.nan
		
		# Divide data cube by XY RMS cube
		cube /= rms_cube

		# Multiply rms_cube by rms_cube_z to get the Z variation and absolute scale right in the final 3D noise cube
		rms_cube *= rms_cube_z
		
	err.message("    Noise-scaled data cube generated.\n")

	return cube, rms_cube
import s4gutils
import datautils

# Read in composite data table for Parent Disk Sample from S4G
columnHeaderRow = 25
s4gdata = datautils.ReadCompositeTable('data/s4gbars_table.dat',
                                       columnRow=columnHeaderRow,
                                       dataFrame=True)

# Generate spline-interpolation objects for cosmology calculations (*much* faster
# than repeated calls to original astropy.cosmology.FlatLambdaCDM methods)
print("Generating comology interpolation functions...")
zz = np.arange(0.001, 1.1, 0.001)
lumDistances = np.array([cosmo.luminosity_distance(z).value for z in zz])
arcsecScales = np.array([cosmo.arcsec_per_kpc_proper(z).value for z in zz])
luminosityDistFn = InterpolatedUnivariateSpline(zz, lumDistances)
arcsecPerKpcFn = InterpolatedUnivariateSpline(zz, arcsecScales)

maxInclination = 60.0
minCosValue = math.cos(math.radians(maxInclination))

PI_OVER_TWO = math.pi / 2.0

random.seed()

# construct dataset vectors, using min(logMstar) = 9.0 and max(distance) = 25 Mpc
nDisksTot = len(s4gdata.name)
ii_gmr = [i for i in range(nDisksTot) if s4gdata.gmr_tc[i] > -1]
nTot_gmr = len(ii_gmr)

index_MHI = 9
Example #50
0
def contrast_curve(cube,
                   angle_list,
                   psf_template,
                   fwhm,
                   pxscale,
                   starphot,
                   algo,
                   sigma=5,
                   nbranch=1,
                   theta=0,
                   inner_rad=1,
                   wedge=(0, 360),
                   fc_snr=100,
                   student=True,
                   transmission=None,
                   smooth=True,
                   interp_order=2,
                   plot=True,
                   dpi=100,
                   imlib='opencv',
                   debug=False,
                   verbose=True,
                   full_output=False,
                   save_plot=None,
                   object_name=None,
                   frame_size=None,
                   fix_y_lim=(),
                   figsize=(8, 4),
                   **algo_dict):
    """ Computes the contrast curve at a given SIGMA (``sigma``) level for an
    ADI cube or ADI+IFS cube. The contrast is calculated as
    sigma*noise/throughput. This implementation takes into account the small
    sample statistics correction proposed in Mawet et al. 2014.

    Parameters
    ----------
    cube : numpy ndarray
        The input cube, 3d (ADI data) or 4d array (IFS data), without fake
        companions.
    angle_list : numpy ndarray
        Vector with the parallactic angles.
    psf_template : numpy ndarray
        Frame with the psf template for the fake companion(s).
        PSF must be centered in array. Normalization is done internally.
    fwhm: int or float or 1d array, optional
        The the Full Width Half Maximum in pixels. It can handle a different
        FWHM value for different wavelengths (IFS data).
    pxscale : float
        Plate scale or pixel scale of the instrument.
    starphot : int or float or 1d array
        If int or float it corresponds to the aperture photometry of the
        non-coronagraphic PSF which we use to scale the contrast. If a vector
        is given it must contain the photometry correction for each frame.
    algo : callable or function
        The post-processing algorithm, e.g. vip_hci.pca.pca.
    sigma : int
        Sigma level for contrast calculation. Note this is a "Gaussian sigma"
        regardless of whether Student t correction is performed (set by the 
        'student' parameter). E.g. setting sigma to 5 will yield the contrast 
        curve corresponding to a false alarm probability of 3e-7.
    nbranch : int, optional
        Number of branches on which to inject fakes companions. Each branch
        is tested individually.
    theta : float, optional
        Angle in degrees for rotating the position of the first branch that by
        default is located at zero degrees. Theta counts counterclockwise from
        the positive x axis. When working on a wedge, make sure that theta is
        located inside of it.
    inner_rad : int, optional
        Innermost radial distance to be considered in terms of FWHM.
    wedge : tuple of floats, optional
        Initial and Final angles for using a wedge. For example (-90,90) only
        considers the right side of an image.
    fc_snr: float optional
        Signal to noise ratio of injected fake companions (w.r.t a Gaussian 
        distribution).
    student : bool, optional
        If True uses Student t correction to inject fake companion.
    transmission : tuple of 2 1d arrays, optional
        If not None, then the tuple contains a vector with the factors to be
        applied to the sensitivity and a vector of the radial distances [px]
        where it is sampled (in this order).
    smooth : bool, optional
        If True the radial noise curve is smoothed with a Savitzky-Golay filter
        of order 2.
    interp_order : int or None, optional
        If True the throughput vector is interpolated with a spline of order
        ``interp_order``. Takes values from 1 to 5. If None, then no the
        throughput is not interpolated.
    plot : bool, optional
        Whether to plot the final contrast curve or not. True by default.
    dpi : int optional
        Dots per inch for the plots. 100 by default. 300 for printing quality.
    imlib : {'opencv', 'ndimage-fourier', 'ndimage-interp'}, string optional
        Library or method used for image operations (shifts). Opencv is the
        default for being the fastest.
    debug : bool, optional
        Whether to print and plot additional info such as the noise, throughput,
        the contrast curve with different X axis and the delta magnitude instead
        of contrast.
    verbose : {True, False, 0, 1, 2}, optional
        If True or 1 the function prints to stdout intermediate info and timing,
        if set to 2 more output will be shown. 
    full_output : bool, optional
        If True returns intermediate arrays.
    save_plot: string
        If provided, the contrast curve will be saved to this path.
    object_name: string
        Target name, used in the plot title.
    frame_size: int
        Frame size used for generating the contrast curve, used in the plot
        title.
    fix_y_lim: tuple
        If provided, the y axis limits will be fixed, for easier comparison
        between plots.
    **algo_dict
        Any other valid parameter of the post-processing algorithms can be
        passed here.

    Returns
    -------
    datafr : pandas dataframe
        Dataframe containing the sensitivity (Gaussian and Student corrected if
        Student parameter is True), the interpolated throughput, the distance in
        pixels, the noise and the sigma corrected (if Student is True).

    If full_output is True then the function returns: 
        datafr, cube_fc_all, frame_fc_all, frame_nofc and fc_map_all.

    frame_fc_all : numpy ndarray
        3d array with the 3 frames of the 3 (patterns) processed cubes with
        companions.
    frame_nofc : numpy ndarray
        2d array, PCA processed frame without companions.
    fc_map_all : numpy ndarray
        3d array with 3 frames containing the position of the companions in the
        3 patterns.
    """
    if cube.ndim != 3 and cube.ndim != 4:
        raise TypeError('The input array is not a 3d or 4d cube')
    if cube.ndim == 3 and (cube.shape[0] != angle_list.shape[0]):
        raise TypeError('Input parallactic angles vector has wrong length')
    if cube.ndim == 4 and (cube.shape[1] != angle_list.shape[0]):
        raise TypeError('Input parallactic angles vector has wrong length')
    if cube.ndim == 3 and psf_template.ndim != 2:
        raise TypeError('Template PSF is not a frame (for ADI case)')
    if cube.ndim == 4 and psf_template.ndim != 3:
        raise TypeError('Template PSF is not a cube (for ADI+IFS case)')
    if transmission is not None:
        if not isinstance(transmission, tuple) or not len(transmission) == 2:
            raise TypeError('transmission must be a tuple with 2 1d vectors')

    if isinstance(fwhm, (np.ndarray, list)):
        fwhm_med = np.median(fwhm)
    else:
        fwhm_med = fwhm

    if verbose:
        start_time = time_ini()
        if isinstance(starphot, float) or isinstance(starphot, int):
            msg0 = 'ALGO : {}, FWHM = {}, # BRANCHES = {}, SIGMA = {},'
            msg0 += ' STARPHOT = {}'
            print(
                msg0.format(algo.__name__, fwhm_med, nbranch, sigma, starphot))
        else:
            msg0 = 'ALGO : {}, FWHM = {}, # BRANCHES = {}, SIGMA = {}'
            print(msg0.format(algo.__name__, fwhm_med, nbranch, sigma))
        print(sep)

    # throughput
    verbose_thru = False
    if verbose == 2:
        verbose_thru = True
    res_throug = throughput(cube,
                            angle_list,
                            psf_template,
                            fwhm,
                            pxscale,
                            nbranch=nbranch,
                            theta=theta,
                            inner_rad=inner_rad,
                            wedge=wedge,
                            fc_snr=fc_snr,
                            full_output=True,
                            algo=algo,
                            imlib=imlib,
                            verbose=verbose_thru,
                            **algo_dict)
    vector_radd = res_throug[3]
    if res_throug[0].shape[0] > 1:
        thruput_mean = np.mean(res_throug[0], axis=0)
    else:
        thruput_mean = res_throug[0][0]
    frame_fc_all = res_throug[4]
    frame_nofc = res_throug[5]
    fc_map_all = res_throug[6]

    if verbose:
        print('Finished the throughput calculation')
        timing(start_time)

    if interp_order is not None:
        # noise measured in the empty frame with better sampling, every px
        # starting from 1*FWHM
        noise_samp, res_lev_samp, rad_samp = noise_per_annulus(
            frame_nofc,
            separation=1,
            fwhm=fwhm_med,
            init_rad=fwhm_med,
            wedge=wedge)
        radmin = vector_radd.astype(int).min()
        cutin1 = np.where(rad_samp.astype(int) == radmin)[0][0]
        noise_samp = noise_samp[cutin1:]
        res_lev_samp = res_lev_samp[cutin1:]
        rad_samp = rad_samp[cutin1:]
        radmax = vector_radd.astype(int).max()
        cutin2 = np.where(rad_samp.astype(int) == radmax)[0][0]
        noise_samp = noise_samp[:cutin2 + 1]
        res_lev_samp = res_lev_samp[:cutin2 + 1]
        rad_samp = rad_samp[:cutin2 + 1]

        # interpolating the throughput vector, spline order 2
        f = InterpolatedUnivariateSpline(vector_radd,
                                         thruput_mean,
                                         k=interp_order)
        thruput_interp = f(rad_samp)

        # interpolating the transmission vector, spline order 1
        if transmission is not None:
            trans = transmission[0]
            radvec_trans = transmission[1]
            f2 = InterpolatedUnivariateSpline(radvec_trans, trans, k=1)
            trans_interp = f2(rad_samp)
            thruput_interp *= trans_interp
    else:
        rad_samp = vector_radd
        noise_samp = res_throug[1]
        res_lev_samp = res_throug[2]
        thruput_interp = thruput_mean
        if transmission is not None:
            if not transmission[0].shape == thruput_interp.shape[0]:
                msg = 'Transmiss. and throughput vectors have different length'
                raise ValueError(msg)
            thruput_interp *= transmission[0]

    rad_samp_arcsec = rad_samp * pxscale

    if smooth:
        # smoothing the noise vector using a Savitzky-Golay filter
        win = min(noise_samp.shape[0] - 2, int(2 * fwhm_med))
        if win % 2 == 0:
            win += 1
        noise_samp_sm = savgol_filter(noise_samp,
                                      polyorder=2,
                                      mode='nearest',
                                      window_length=win)
        res_lev_samp_sm = savgol_filter(res_lev_samp,
                                        polyorder=2,
                                        mode='nearest',
                                        window_length=win)
    else:
        noise_samp_sm = noise_samp
        res_lev_samp_sm = res_lev_samp

    # calculating the contrast
    if isinstance(starphot, float) or isinstance(starphot, int):
        cont_curve_samp = ((sigma * noise_samp_sm + res_lev_samp_sm) /
                           thruput_interp) / starphot
    else:
        cont_curve_samp = ((sigma * noise_samp_sm + res_lev_samp_sm) /
                           thruput_interp) / np.median(starphot)
    cont_curve_samp[np.where(cont_curve_samp < 0)] = 1
    cont_curve_samp[np.where(cont_curve_samp > 1)] = 1

    # calculating the Student corrected contrast
    if student:
        n_res_els = np.floor(rad_samp / fwhm_med * 2 * np.pi)
        ss_corr = np.sqrt(1 + 1 / n_res_els)
        sigma_corr = stats.t.ppf(stats.norm.cdf(sigma),
                                 n_res_els - 1) * ss_corr
        if isinstance(starphot, float) or isinstance(starphot, int):
            cont_curve_samp_corr = (
                (sigma_corr * noise_samp_sm + res_lev_samp_sm) /
                thruput_interp) / starphot
        else:
            cont_curve_samp_corr = (
                (sigma_corr * noise_samp_sm + res_lev_samp_sm) /
                thruput_interp) / np.median(starphot)
        cont_curve_samp_corr[np.where(cont_curve_samp_corr < 0)] = 1
        cont_curve_samp_corr[np.where(cont_curve_samp_corr > 1)] = 1

    if debug:
        plt.rc("savefig", dpi=dpi)
        plt.figure(figsize=figsize, dpi=dpi)
        # throughput
        plt.plot(vector_radd * pxscale,
                 thruput_mean,
                 '.',
                 label='computed',
                 alpha=0.6)
        plt.plot(rad_samp_arcsec,
                 thruput_interp,
                 ',-',
                 label='interpolated',
                 lw=2,
                 alpha=0.5)
        plt.grid('on', which='both', alpha=0.2, linestyle='solid')
        plt.xlabel('Angular separation [arcsec]')
        plt.ylabel('Throughput')
        plt.legend(loc='best')
        plt.xlim(0, np.max(rad_samp * pxscale))
        # noise
        plt.figure(figsize=figsize, dpi=dpi)
        plt.plot(rad_samp_arcsec, noise_samp, '.', label='computed', alpha=0.6)
        if smooth:
            plt.plot(rad_samp_arcsec,
                     noise_samp_sm,
                     ',-',
                     label='noise smoothed',
                     lw=2,
                     alpha=0.5)
        plt.grid('on', alpha=0.2, linestyle='solid')
        plt.xlabel('Angular separation [arcsec]')
        plt.ylabel('Noise')
        plt.legend(loc='best')
        plt.xlim(0, np.max(rad_samp_arcsec))
        # mean residual level
        plt.figure(figsize=figsize, dpi=dpi)
        plt.plot(rad_samp_arcsec,
                 res_lev_samp,
                 '.',
                 label='computed residual level',
                 alpha=0.6)
        if smooth:
            plt.plot(rad_samp_arcsec,
                     res_lev_samp_sm,
                     ',-',
                     label='smoothed residual level',
                     lw=2,
                     alpha=0.5)
        plt.grid('on', alpha=0.2, linestyle='solid')
        plt.xlabel('Angular separation [arcsec]')
        plt.ylabel('Mean residual level')
        plt.legend(loc='best')
        plt.xlim(0, np.max(rad_samp_arcsec))

    # plotting
    if plot or debug:
        if student:
            label = [
                'Sensitivity (Gaussian)', 'Sensitivity (Student-t correction)'
            ]
        else:
            label = ['Sensitivity (Gaussian)']

        plt.rc("savefig", dpi=dpi)
        fig = plt.figure(figsize=figsize, dpi=dpi)
        ax1 = fig.add_subplot(111)
        con1, = ax1.plot(rad_samp_arcsec,
                         cont_curve_samp,
                         '-',
                         alpha=0.2,
                         lw=2,
                         color='green')
        con2, = ax1.plot(rad_samp_arcsec,
                         cont_curve_samp,
                         '.',
                         alpha=0.2,
                         color='green')
        if student:
            con3, = ax1.plot(rad_samp_arcsec,
                             cont_curve_samp_corr,
                             '-',
                             alpha=0.4,
                             lw=2,
                             color='blue')
            con4, = ax1.plot(rad_samp_arcsec,
                             cont_curve_samp_corr,
                             '.',
                             alpha=0.4,
                             color='blue')
            lege = [(con1, con2), (con3, con4)]
        else:
            lege = [(con1, con2)]
        plt.legend(lege, label, fancybox=True, fontsize='medium')
        plt.xlabel('Angular separation [arcsec]')
        plt.ylabel(str(sigma) + ' sigma contrast')
        plt.grid('on', which='both', alpha=0.2, linestyle='solid')
        ax1.set_yscale('log')
        ax1.set_xlim(0, np.max(rad_samp_arcsec))

        # Give a title to the contrast curve plot
        if object_name is not None and frame_size is not None:
            # Retrieve ncomp and pca_type info to use in title
            ncomp = algo_dict['ncomp']
            if algo_dict['cube_ref'] is None:
                pca_type = 'ADI'
            else:
                pca_type = 'RDI'
            title = "{} {} {}pc {} + {}".format(pca_type, object_name, ncomp,
                                                frame_size, inner_rad)
            plt.title(title, fontsize=14)

        # Option to fix the y-limit
        if len(fix_y_lim) == 2:
            min_y_lim = min(fix_y_lim[0], fix_y_lim[1])
            max_y_lim = max(fix_y_lim[0], fix_y_lim[1])
            ax1.set_ylim(min_y_lim, max_y_lim)

        # Optionally, save the figure to a path
        if save_plot is not None:
            fig.savefig(save_plot, dpi=100)

        if debug:
            fig2 = plt.figure(figsize=figsize, dpi=dpi)
            ax3 = fig2.add_subplot(111)
            cc_mags = -2.5 * np.log10(cont_curve_samp)
            con4, = ax3.plot(rad_samp_arcsec,
                             cc_mags,
                             '-',
                             alpha=0.2,
                             lw=2,
                             color='green')
            con5, = ax3.plot(rad_samp_arcsec,
                             cc_mags,
                             '.',
                             alpha=0.2,
                             color='green')
            if student:
                cc_mags_corr = -2.5 * np.log10(cont_curve_samp_corr)
                con6, = ax3.plot(rad_samp_arcsec,
                                 cc_mags_corr,
                                 '-',
                                 alpha=0.4,
                                 lw=2,
                                 color='blue')
                con7, = ax3.plot(rad_samp_arcsec,
                                 cc_mags_corr,
                                 '.',
                                 alpha=0.4,
                                 color='blue')
                lege = [(con4, con5), (con6, con7)]
            else:
                lege = [(con4, con5)]
            plt.legend(lege, label, fancybox=True, fontsize='medium')
            plt.xlabel('Angular separation [arcsec]')
            plt.ylabel('Delta magnitude')
            plt.gca().invert_yaxis()
            plt.grid('on', which='both', alpha=0.2, linestyle='solid')
            ax3.set_xlim(0, np.max(rad_samp * pxscale))
            ax4 = ax3.twiny()
            ax4.set_xlabel('Distance [pixels]')
            ax4.plot(rad_samp, cc_mags, '', alpha=0.)
            ax4.set_xlim(0, np.max(rad_samp))

    if student:
        datafr = pd.DataFrame({
            'sensitivity_gaussian': cont_curve_samp,
            'sensitivity_student': cont_curve_samp_corr,
            'throughput': thruput_interp,
            'distance': rad_samp,
            'distance_arcsec': rad_samp_arcsec,
            'noise': noise_samp_sm,
            'residual_level': res_lev_samp_sm,
            'sigma corr': sigma_corr
        })
    else:
        datafr = pd.DataFrame({
            'sensitivity_gaussian': cont_curve_samp,
            'throughput': thruput_interp,
            'distance': rad_samp,
            'distance_arcsec': rad_samp_arcsec,
            'noise': noise_samp_sm,
            'residual_level': res_lev_samp_sm
        })

    if full_output:
        return datafr, frame_fc_all, frame_nofc, fc_map_all
    else:
        return datafr
Example #51
0
def SpecHWHM(GFint_A):
	''' Half-width at half-maximum of the spectral function '''
	N = len(En_A)
	DOSF = -sp.imag(GFint_A[N/2])/sp.pi	# value at Fermi energy
	DOS = InterpolatedUnivariateSpline(En_A,-sp.imag(GFint_A)/sp.pi-DOSF/2.0)
	return sp.amin(sp.fabs(DOS.roots()))
Example #52
0
    def _band_calculations(self, rsr, flux, scale, **options):
        """Derive in band solar flux.

        Derive the in band solar flux or in band solar irradiance for a given
        instrument relative spectral response valid for an earth-sun distance
        of one AU.

        rsr: Relative Spectral Response (one detector only)
        Dictionary with two members 'wavelength' and 'response'
        options:
        detector: Detector number (between 1 and N - N=number of detectors
        for channel)

        """
        from scipy.interpolate import InterpolatedUnivariateSpline

        if 'detector' in options:
            detector = options['detector']
        else:
            detector = 1

        # Resample/Interpolate the response curve:
        if self.wavespace == 'wavelength':
            if 'response' in rsr:
                wvl = rsr['wavelength'] * scale
                resp = rsr['response']
            else:
                wvl = rsr['det-{0:d}'.format(detector)]['wavelength'] * scale
                resp = rsr['det-{0:d}'.format(detector)]['response']
        else:
            if 'response' in rsr:
                wvl = rsr['wavenumber'] * scale
                resp = rsr['response']
            else:
                wvl = rsr['det-{0:d}'.format(detector)]['wavenumber'] * scale
                resp = rsr['det-{0:d}'.format(detector)]['response']

        start = wvl[0]
        end = wvl[-1]
        # print "Start and end: ", start, end
        LOG.debug("Begin and end wavelength/wavenumber: %f %f ", start, end)
        dlambda = self._dlambda
        xspl = np.linspace(start, end, int(round((end - start) / self._dlambda)) + 1)

        ius = InterpolatedUnivariateSpline(wvl, resp)
        resp_ipol = ius(xspl)

        # Interpolate solar spectrum to specified resolution and over specified
        # Spectral interval:
        self.interpolate(dlambda=dlambda, ival_wavelength=(start, end))

        # Mask out outside the response curve:
        maskidx = np.logical_and(np.greater_equal(self.ipol_wavelength, start),
                                 np.less_equal(self.ipol_wavelength, end))
        wvl = np.repeat(self.ipol_wavelength, maskidx)
        irr = np.repeat(self.ipol_irradiance, maskidx)

        # Calculate the solar-flux: (w/m2)
        if flux:
            return np.trapz(irr * resp_ipol, wvl)
        else:
            # Divide by the equivalent band width:
            return np.trapz(irr * resp_ipol, wvl) / np.trapz(resp_ipol, wvl)
Example #53
0
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy import integrate
k = np.inf

file = np.loadtxt('podaci.txt')
tal = file[:,0]
inte = file[:,1] 

spl = InterpolatedUnivariateSpline(tal, inte, k=3)
spl.set_smoothing_factor(0.1)
xs = file[:,0]
ys = spl(xs)
plt.plot(tal, inte, '.-')
plt.plot(xs, ys)
plt.show()

print (spl.integral(0, np.inf))
Example #54
0
    def visualize(self, plotname=None, usecols=None,
                    usez=None,fracdev=False, sharex=True,
                    sharey=True,
                    ref_y=None, ref_x=[None], ref_ye=None,
                    xlim=None, ylim=None,
                    fylim=None, f=None, ax=None, label=None,
                    xlabel=None, ylabel=None,compare=False,
                    logx=False, logy=True, rusecols=None,
                    rusez=None, **kwargs):
        """
        Plot the calculated metric.

        Arguments
        ---------
        plotname : string, optional
            If provided, the plot will be saved to a file by this name.
        usecols : array-like, optional
            The indices of the bands to plot. Default is to use all the
            bands the metric was measured in.
        fracdev : bool, optional
            Whether or not to plot fractional deviation of this metric
            from the reference metric function provided in
            ref_y.
        ref_y : 3-d array-like, optional
            If fracdev is set to True, this is the metric that will be compared.
            If must have the same number of bands and z bins, but the
            magnitude bins may differ.
        ref_x : 1-d array-like, optional
            The mean magnitudes of the bins in which ref_x is measured.
            If ref_x is measured in a different number of magnitude bins
            than this metric, interpolation is performed in order to compare at
            the same mean magnitudes.
        xlim : array-like, optional
            A list [xmin, xmax], the range of magnitudes to
            plot the metric for.
        ylim: array-like, optional
            A list [ymin, ymax], the range of y values to plot
            the metric for.
        fylim : array-like, optional
            A list [fymin, fymax]. If fracdev is True, plot the
            fractional deviations over this range.
        f : Figure, optional
           A figure object. If provided the metric will be ploted using this
           figure.
        ax : array of Axes, optional
           An array of Axes objects. If provided, the metrics
           will be plotted on these axes.
        """
        print('xlabel: {0}'.format(xlabel))
        print('ylabel: {0}'.format(ylabel))

        if usecols is None:
            usecols = range(self.nbands)

        if (rusecols is None) and (ref_y is not None):
            rusecols = range(ref_y.shape[1])

        if usez is None:
            usez = range(self.nzbins)

        if (rusez is None) and (ref_y is not None):
            rusez = range(ref_y.shape[2])

        nzbins = len(usez)

        l1 = None

        #format x-values
        if hasattr(self, 'xmean'):
            
            if len(self.xmean.shape)==1:
                mxs = np.tile(self.xmean, [self.nzbins, self.nbands, 1]).T
            elif len(self.xmean.shape)==2:
                mxs = np.tile(self.xmean.reshape(self.xmean.shape[1],1,self.xmean.shape[0]),
                                [1, self.nbands, 1]).T
            else:
                mxs = self.xmean
        else:
            if len(self.xbins.shape)==1:
                xbins = np.tile(self.xbins, [self.nzbins, self.nbands, 1]).T
            elif len(self.xbins.shape)==2:
                xbins = np.tile(self.xbins.reshape(self.xbins.shape[1],1,self.xbins.shape[0]),
                                [1, self.nbands, 1]).T
            else:
                xbins = self.xbins

            mxs = ( xbins[1:,:,:] + xbins[:-1,:,:] ) / 2

        if fracdev:

            if len(ref_x.shape)==1:
                ref_x = np.tile(ref_x, [self.nzbins, self.nbands, 1]).T
            elif len(ref_x.shape)==2:
                ref_x = np.tile(ref_x.reshape(ref_x.shape[1],1,ref_x.shape[0]),
                                [1, self.nbands, 1]).T



        #If want to plot fractional deviations, and ref_y
        #uses different bins, interpolate ref_y to
        #magniutdes given at mxs. Don't extrapolate!

        if fracdev:
            lidx = np.zeros((len(usecols), len(usez)), dtype=np.int)
            hidx = np.zeros((len(usecols), len(usez)), dtype=np.int)

            rxs = ref_x.shape
            xs  = mxs.shape
            rls = ref_y.shape
            iref_y = np.zeros(rxs)
            if ref_ye is not None:
                iref_ye = np.zeros(rxs)

            for i, c in enumerate(usecols):
                for j in range(len(usez)):
                    xi  = mxs[:,usecols[i],usez[j]]
                    rxi = ref_x[:,rusecols[i],rusez[j]]

                    if fracdev & ((rxs[0]!=xs[0]) | ((rxi[0]!=xi[0])  | (rxi[-1]!=xi[-1]))):
                        lidx[i,j] = xi.searchsorted(rxi[0])
                        hidx[i,j] = xi.searchsorted(rxi[-1])
                        nanidx = np.isnan(ref_y[:,rusecols[i],rusez[j]]) | np.isnan(ref_x[:,rusecols[i],rusez[j]])
                        sply = InterpolatedUnivariateSpline(ref_x[~nanidx,rusecols[i],rusez[j]], ref_y[~nanidx,rusecols[i],rusez[j]])
                        iref_y[lidx[i,j]:hidx[i,j],rusecols[i],rusez[j]] = sply(mxs[lidx[i,j]:hidx[i,j],c,usez[j]])

                        if ref_ye is not None:
                            nanidx = np.isnan(ref_ye[:,rusecols[i],rusez[j]]) | np.isnan(ref_x[:,rusecols[i],rusez[j]])
                            splye = InterpolatedUnivariateSpline(ref_x[~nanidx,rusecols[i],rusez[j]], ref_ye[~nanidx,rusecols[i],rusez[j]])
                            iref_ye[lidx[i,j]:hidx[i,j],rusecols[i],rusez[j]] = splye(mxs[lidx[i,j]:hidx[i,j],c,usez[j]])
                    else:
                        lidx[i,j] = 0
                        hidx[i,j] = len(mxs)
                        iref_y[lidx[i,j]:hidx[i,j],rusecols[i],rusez[j]] = ref_y[:,rusecols[i],rusez[j]]
                        if ref_ye is not None:
                            iref_ye[lidx[i,j]:hidx[i,j],rusecols[i],rusez[j]] = ref_ye[:,rusecols[i],rusez[j]]

            ref_y = iref_y
            if ref_ye is not None:
                ref_ye = iref_ye

        #if no figure provided, set up figure and axes
        if f is None:
            if fracdev==False:
                f, ax = plt.subplots(nzbins, len(usecols),
                                     sharex=True, sharey=True, figsize=(8,8))
                ax = np.array(ax).reshape((nzbins, len(usecols)))
            #if want fractional deviations, need to make twice as
            #many rows of axes. Every other row contains fractional
            #deviations from the row above it.
            else:
                assert(ref_y is not None)
                gs = gridspec.GridSpec(nzbins*2, len(usecols))
                f = plt.figure()
                ax = np.zeros((nzbins*2, len(usecols)), dtype='O')
                for r in range(len(usecols)):
                    for c in range(nzbins):
                        if (r==0) & (c==0):
                            ax[2*c][r] = f.add_subplot(gs[2*c,r])
                            ax[2*c+1][r] = f.add_subplot(gs[2*c+1,r], sharex=ax[0][0])
                        else:
                            if sharex & sharey:
                                ax[2*c][r] = f.add_subplot(gs[2*c,r], sharex=ax[0][0], sharey=ax[0][0])
                            elif sharex:
                                ax[2*c][r] = f.add_subplot(gs[2*c,r], sharex=ax[0][0])
                            elif sharey:
                                ax[2*c][r] = f.add_subplot(gs[2*c,r], sharey=ax[0][0])
                            else:
                                ax[2*c][r]= f.add_subplot(gs[2*c,r])

                            ax[2*c+1][r] = f.add_subplot(gs[2*c+1,r],
                              sharex=ax[2*c][r], sharey=ax[1][0])

            newaxes = True
        else:
            newaxes = False

        if nzbins>1:
            for i, b in enumerate(usecols):
                for j in range(nzbins):
                    if fracdev==False:
                        if (self.y[:,b,usez[j]]==0).all() | (np.isnan(self.y[:,b,usez[j]]).all()): continue
                        l1 = ax[j][i].plot(mxs[:,b,usez[j]], self.y[:,b,usez[j]], **kwargs)
                        if self.ye is not None:
                            ax[j][i].fill_between(mxs[:,b,usez[j]], self.y[:,b,usez[j]]-self.ye[:,b,usez[j]],
                              self.y[:,b,usez[j]]+self.ye[:,b,usez[j]],
                              alpha=0.5, **kwargs)
                        if logx:
                            ax[j][i].set_xscale('log')
                        if logy:
                            ax[j][i].set_yscale('log')
                    else:
                        li = lidx[i,j]
                        hi = hidx[i,j]

                        rb = rusecols[i]
                        #calculate error on fractional
                        #difference
                        if (ref_ye is not None) & (self.ye is not None):
                            vye = self.ye[li:hi,b,usez[j]]**2
                            vrye = ref_ye[li:hi,rb,rusez[j]]**2
                            fye = (self.y[li:hi,b,usez[j]] - ref_y[li:hi,rb,rusez[j]]) / ref_y[li:hi,rb,rusez[j]]
                            dye = fye * np.sqrt( (vye + vrye) / (self.y[li:hi,b,usez[j]] - ref_y[li:hi,rb,rusez[j]]) ** 2 + ref_ye[li:hi,rb,rusez[j]] ** 2 / ref_y[li:hi,rb,rusez[j]]**2 )
                        else:
                            fye = (self.y[li:hi,b,usez[j]] - ref_y[li:hi,rb,rusez[j]]) / ref_y[li:hi,rb,rusez[j]]
                            dye = None

                        if (self.y[:,b,usez[j]]==0).all() | (np.isnan(self.y[:,b,usez[j]]).all()): continue
                        l1 = ax[2*j][i].plot(mxs[:,b,usez[j]], self.y[:,b,usez[j]], **kwargs)
                        ax[2*j+1][i].plot(mxs[li:hi,b,usez[j]], fye, **kwargs)
                        if self.ye is not None:
                            ax[2*j][i].fill_between(mxs[:,b,usez[j]], self.y[:,b,usez[j]]-self.ye[:,b,usez[j]],
                              self.y[:,b,usez[j]]+self.ye[:,b,usez[j]],
                              alpha=0.5, **kwargs)
                        if dye is not None:
                            ax[2*j+1][i].fill_between(mxs[li:hi,b,usez[j]], fye-dye,
                              fye+dye,
                              alpha=0.5,**kwargs)

                        if logx:
                            ax[2*j][i].set_xscale('log')

                        if logy:
                            ax[2*j][i].set_yscale('log')

                        if (i==0) & (j==0):
                            if xlim is not None:
                                ax[0][0].set_xlim(xlim)
                            if ylim is not None:
                                ax[0][0].set_ylim(ylim)
                            if fylim is not None:
                                ax[1][0].set_ylim(fylim)

        else:
            for i, b in enumerate(usecols):
                if fracdev==False:
                    if (self.y[:,b,0]==0).all() | (np.isnan(self.y[:,b,0]).all()): continue
                    l1 = ax[0][i].plot(mxs[:,b,0], self.y[:,b,0], **kwargs)
                    if self.ye is not None:
                        ax[0][i].fill_between(mxs[:,b,0], self.y[:,b,0] - self.ye[:,b,0],
                                                self.y[:,b,0] + self.ye[:,b,0],
                                                alpha=0.5, **kwargs)
                                            
                    if logx:
                        ax[0][i].set_xscale('log')
                    if logy:
                        ax[0][i].set_yscale('log')

                else:
                    li = lidx[i,0]
                    hi = hidx[i,0]

                    rb = rusecols[i]
                    #calculate error on fractional
                    #difference
                    if (ref_ye is not None) & (self.ye is not None):
                        vye = self.ye[li:hi,b,0]**2
                        vrye = ref_ye[li:hi,rb,0]**2
                        fye = (self.y[li:hi,b,0] - ref_y[li:hi,rb,0]) / ref_y[li:hi,rb,0]
                        dye = fye * np.sqrt( (vye + vrye) / (self.y[li:hi,b,0] - ref_y[li:hi,rb,0]) ** 2 + ref_ye[li:hi,rb,0] ** 2 / ref_y[li:hi,rb,0]**2 )
                    else:
                        fye = (self.y[li:hi,b,0] - ref_y[li:hi,rb,0]) / ref_y[li:hi,rb,0]
                        dye = None

                    if (self.y[:,b,0]==0).all() | (np.isnan(self.y[:,b,0]).all()): continue
                    l1 = ax[0][i].plot(mxs[:,b,0], self.y[:,b,0], **kwargs)
                    ax[1][i].plot(mxs[li:hi,b,0], fye, **kwargs)

                    if self.ye is not None:
                        ax[0][i].fill_between(mxs[:,b,0], self.y[:,b,0]-self.ye[:,b,0],
                          self.y[:,b,0]+self.ye[:,b,0],
                          alpha=0.5, **kwargs)
                    if dye is not None:
                        ax[1][i].fill_between(mxs[li:hi,b,0], fye-dye,
                          fye+dye,
                          alpha=0.5, **kwargs)

                    if logx:
                        ax[0][i].set_xscale('log')
                    if logy:
                        ax[0][i].set_yscale('log')

                    if (i==0):
                        if xlim is not None:
                            ax[0][0].set_xlim(xlim)
                        if ylim is not None:
                            ax[0][0].set_ylim(ylim)
                        if fylim is not None:
                            ax[1][0].set_ylim(fylim)

        #if we just created the axes, add labels
        if newaxes:
            sax = f.add_subplot(111)
            plt.setp(sax.get_xticklines(), visible=False)
            plt.setp(sax.get_yticklines(), visible=False)
            plt.setp(sax.get_xticklabels(), visible=False)
            plt.setp(sax.get_yticklabels(), visible=False)
            sax.patch.set_alpha(0.0)
            sax.patch.set_facecolor('none')
            sax.spines['top'].set_color('none')
            sax.spines['top'].set_alpha(0.0)
            sax.spines['bottom'].set_color('none')
            sax.spines['bottom'].set_alpha(0.0)
            sax.spines['left'].set_color('none')
            sax.spines['left'].set_alpha(0.0)
            sax.spines['right'].set_color('none')
            sax.tick_params(labelcolor='w', top='off', bottom='off', left='off', right='off')
            sax.set_xlabel(r'%s' % xlabel, labelpad=40)
            sax.set_ylabel(r'%s' % ylabel, labelpad=40)
            #plt.tight_layout()

        if (plotname is not None) & (not compare):
            plt.savefig(plotname)

        return f, ax, l1
def Evolution((xi,yi,ci,u,Yi,gi,giS,gipr,giprS,gipr2,gipr2S,AYi,AYiS,n,Lami,OmDEi,OmegaDE,OmegaRad,OmegaM,wphi,hdot,delta,deltap,deltap2,e1,e2,A,WA,Delta,GrZ,w0,wa,wp)):
    """Evolution acts on a Universe class object and models it from a = 10**-9 to a = 1"""
    Fins = np.array([xi[0],xi[1],yi[0],yi[1],ci[0],ci[1],u,Lami[0],Lami[1],n+1])
    cons = (6.0**(1.0/2.0))/2.0
    a = 10**(-9)
    dlna = 10**(-2)
    z = (1.0/a)-1.0 
    while a < 1:
            f = 3.0*(((xi[0]**2)*gi[0])+ n*((xi[1]**2)*gi[1]))+(u**2)
            dxi = ((xi/2.0)*(3.0+f-2.0*cons*Lami*xi)+cons*AYi*(Lami*OmDEi-2.0*cons*xi*(gi+Yi*gipr)))*dlna
            dyi = ((yi/2.0)*(3.0+f-2.0*cons*Lami*xi))*dlna
            du = ((u/2.0)*(-1.0+f))*dlna
            xi += dxi
            yi += dyi
            u += du
            Yi = (xi**2)/(yi**2)
            gi = eval(giS)
            gipr = eval(giprS)
            gipr2 = eval(gipr2S)
            AYi = eval(AYiS)
            OmDEi = (xi**2)*(gi+2.0*Yi*gipr)
            OmegaDE = OmDEi[0] + n*OmDEi[1]
            OmegaRad = (u**2)
            OmegaM = 1-OmegaDE-OmegaRad
            wphi = ((((xi[0]**2)*gi[0])+n*((xi[1]**2)*gi[1]))/(((xi[0]**2)*(gi[0]+2.0*Yi[0]*gipr[0]))+ n*((xi[1]**2)*(gi[1]+2.0*Yi[1]*gipr[1]))))
            hdot = -(3.0/2.0) -(3.0/2.0)*((xi[0]**2)*gi[0]+n*((xi[1]**2)*gi[1]))-.5*(u**2)
            delta += deltap*dlna
            deltap += deltap2*dlna
            deltap2 = ((3.0/2.0)*OmegaM*delta)-((hdot+2.0)*deltap)
            
            if z < 25.0:
                Delta = np.append(Delta, delta)
                GrZ = np.append(GrZ, z)
            if a > 0.1:
                WA = np.append(WA, wphi)
                A = np.append(A,a)
            a = a*(1.0+dlna)
            z = (1.0/a)-1.0
        #Exporting Linear Growth Data
        
    GrZ = np.trim_zeros(GrZ, 'f')
    Delta = np.trim_zeros(Delta, 'f')
    GrZ = np.flipud(GrZ)
    Delta = (np.flipud(Delta))/delta       
    PickZ = np.zeros(20)
    PickD = np.zeros(20)
            
    for p in xrange(20):
            i = 16*p
            PickZ[p] = GrZ[i]
            PickD[p] = Delta[i]
                
    GrZ = PickZ
    Delta = (PickD)/(1.0/(1.0+PickZ)) 
    
    # Finding w_0, w_a, and w_p
    
    A = np.trim_zeros(A, 'f')
    WA = np.trim_zeros(WA, 'f')  
    e1c = e1.__call__(A)
    e2c = e2.__call__(A)  
    alpha1s = InterpolatedUnivariateSpline(A, (1.0+WA)*e1c, k=3)
    alpha2s = InterpolatedUnivariateSpline(A, (1.0+WA)*e2c, k=3)
    alpha1 = con1*alpha1s.integral(0.1,1.0)
    alpha2 = con2*alpha2s.integral(0.1,1.0)
    w0 = ((alpha1*(gamma2-beta2)+alpha2*(beta1-gamma1))/(beta1*gamma2-beta2*gamma1))-1.0
    wa = (alpha1*beta2-alpha2*beta1)/(beta1*gamma2-beta2*gamma1)
    wp = (alpha1/beta1)-1.0
    if str(wp) != "nan": 
        END = np.array([w0,wa,wp,wphi,OmegaM,OmegaDE])
        END = np.append(END,Fins)
        GROWTH = np.array([GrZ,Delta])
        return END, GROWTH
    else:
        return np.array([0,0]),np.array([0,0])
Example #56
0
        for h in hs.h_vec:
            e_k_mag = e_k.copy()
            e_k_mag.data[:] += h * Sz[None, ...]

            hsh = HartreeSolver(e_k_mag,
                                beta,
                                H_int=H_int,
                                gf_struct=gf_struct)
            hsh.solve_newton_mu(hs.mu, M0=hs.M)

            hs.Omega0_vec.append(hsh.Omega0)
            hs.Omega_vec.append(hsh.Omega)

        # -- Compute susceptibility from field response

        spl_m_h = InterpolatedUnivariateSpline(hs.h_vec, hs.m_vec)
        hs.chi_dmdh = -spl_m_h(0., nu=1)

        spl_Omega_h = InterpolatedUnivariateSpline(hs.h_vec, hs.Omega_vec)
        hs.chi_d2Omegadh2 = -spl_Omega_h(0., nu=2)

        print('chi            =', hs.chi)
        print('chi_dmdh       =', hs.chi_dmdh)
        print('chi_d2Omegadh2 =', hs.chi_d2Omegadh2)

        #exit()

        # -- Solve by seeding FM symmetry breaking

        hs.solve_newton(N_target=N_tot, M0=M, mu0=mu)
        mu = hs.mu
Example #57
0
def mean_focus(expstart, expend, camera='UVIS1', spline_order=3,
               not_found_value=None, with_var=False):
    """
    Gets the mean focus over a given observation period. Exposure start and end
    times can be specified as Modified Julian Date float (like the FITS header
    EXPSTART and EXPEND keywords) or a UTC time string in YYYY-MM-DD HH:MM:SS
    format.
    :param expstart: Start time of exposure.
    :param expend: End time of exposure.
    :param camera: One of UVIS1, UVIS2, WFC1, WFC2, HRC, PC. Default is UVIS1.
    :param spline_order: Degree of the spline used to interpolate the model
     data points (passed as k= to scipy.interpolate.UnivariateSpline). Use 1 for
     linear interpolation. Default is 3.
    :param not_found_value: Value to return if the Focus Model does not have
     data for the given time interval. Default value (None) means raise
     HTTPResponseError
     :param with_var: Also include variance in a returned 2-tuple
    :return: Continuous (integral) mean focus between expstart and expend
    """
    # Convert date/time strings to MJD
    try:
        startnums = [int(num) for num in re.split(':|-|/| ', expstart)]
        endnums = [int(num) for num in re.split(':|-|/| ', expend)]
        expstart = _date_time_to_mjd(*startnums)
        expend = _date_time_to_mjd(*endnums)
    except TypeError:
        pass
    # Pad input exposure start and end time, to make sure we get at least one
    # data point before and after. Then split up into year, date, times
    ten_mins = 10 / (24 * 60)
    expstart_pad = float(expstart) - ten_mins
    expend_pad = float(expend) + ten_mins
    start_yr, start_date, start_time = _mjd_to_year_date_time(expstart_pad)
    stop_yr, stop_date, stop_time = _mjd_to_year_date_time(expend_pad)
    # Chop off seconds
    start_time = start_time.rsplit(':', 1)[0]
    stop_time = stop_time.rsplit(':', 1)[0]

    if start_date != stop_date:
        intervals = [(start_yr, start_date, start_time, '23:59'),
                     (stop_yr, stop_date, '00:00', stop_time)]
    else:
        intervals = [(start_yr, start_date, start_time, stop_time)]

    try:
        txt_focus = ''
        # Get text table of focus data for each interval
        for year, date, start, stop in intervals:
            txt_interval = get_model_data(year, date, start, stop, camera,
                                          format='TXT')
            col_names, txt_interval = txt_interval.split('\n', 1)
            txt_focus += txt_interval
        # convert to numpy array
        focus_data = genfromtxt(StringIO(txt_focus), skiprows=0, dtype=None,
                                names=_model_output_columns,
                                delimiter=_output_field_widths)
        # Create interpolating spline
        spline = InterpolatedUnivariateSpline(
            focus_data['JulianDate'], focus_data['Model'], k=spline_order)
        # Return the continuous (integral) mean
        mean_foc = spline.integral(expstart, expend) / (expend - expstart)
        # Calculate signal variance (see e.g. Wikipedia article for RMS)
        if with_var:
            xvals = linspace(expstart, expend, focus_data.size*2)
            var_foc = trapz(spline(xvals)**2, xvals) / (expend - expstart)
            var_foc -= mean_foc**2

    except HTTPResponseError, err:
        if err.response.status == httplib.NOT_FOUND \
                or not_found_value is not None:
            mean_foc = not_found_value
            var_foc = not_found_value
        else:
            raise err
Example #58
0
def interp1d(x, y, xi):
    ius = InterpolatedUnivariateSpline(x, y)
    return ius(xi)
mass_derv = np.vstack((mass_pltbin, avg_shear_mat)).T

mass_s = np.arange(10.3, 13.9, 0.02)

derv_weighted_avg = [0] * shear_bin
derv_avg = [0] * shear_bin
spd = [0] * shear_bin

for j in range(shear_bin):
    f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
    ### Deg. = 2
    """
    spl2 = IUS(mass_pltbin, avg_shear_mat[j], k = 2)
    spld2 = spl2.derivative()
    """
    spl3 = IUS(mass_pltbin, avg_shear_mat[j], k=3)
    spld3 = spl3.derivative()
    """
    spl4 = IUS(mass_pltbin, avg_shear_mat[j], k = 4)
    spld4 = spl4.derivative()
    spl5 = IUS(mass_pltbin, avg_shear_mat[j], k = 5)
    spld5 = spl5.derivative()
    """
    #ax1.plot(mass_pltbin, avg_shear_mat[j], color = 'r')
    #ax1.plot(mass_s, spl2(mass_s), color = 'b')
    ax1.plot(mass_s, spl3(mass_s), color='r')
    #ax1.plot(mass_s, spl4(mass_s), color = 'c')
    #ax1.plot(mass_s, spl5(mass_s), color = 'r')
    #ax1.legend(["k = 2", "k = 3", "k = 4", "k = 5"], loc = 'upper left')
    ax1.scatter(mass_pltbin, avg_shear_mat[j], marker='x', color='r')
    ax1.set_xlabel(r"$\log(M_h) (M_{\odot})$")
Example #60
0
    def calibrate_photometry_gaia(self, solution_num=None, iteration=1):
        """
        Calibrate extracted magnitudes with Gaia data.

        """

        num_solutions = self.plate_solution.num_solutions

        assert (solution_num is None
                or (solution_num > 0 and solution_num <= num_solutions))

        self.log.write(
            'Photometric calibration: solution {:d}, iteration {:d}'.format(
                solution_num, iteration),
            level=3,
            event=70,
            solution_num=solution_num)

        # Initialise the flag value
        self.phot_calibrated = False

        if 'METHOD' in self.plate_header:
            pmethod = self.plate_header['METHOD']

            if (pmethod is not None and pmethod != ''
                    and 'direct photograph' not in pmethod
                    and 'focusing' not in pmethod
                    and 'test plate' not in pmethod):
                self.log.write('Cannot calibrate photometry due to unsupported'
                               'observation method ({:s})'.format(pmethod),
                               level=2,
                               event=70,
                               solution_num=solution_num)
                return

        # Create dictionary for calibration results
        self.phot_calib = OrderedDict()

        # Create output directory, if missing
        if self.write_phot_dir and not os.path.isdir(self.write_phot_dir):
            self.log.write('Creating output directory {}'.format(
                self.write_phot_dir),
                           level=4,
                           event=70,
                           solution_num=solution_num)
            os.makedirs(self.write_phot_dir)

        if self.write_phot_dir:
            fn_cterm = os.path.join(self.write_phot_dir,
                                    '{}_cterm.txt'.format(self.basefn))
            fcterm = open(fn_cterm, 'wb')
            fn_caldata = os.path.join(self.write_phot_dir,
                                      '{}_caldata.txt'.format(self.basefn))
            fcaldata = open(fn_caldata, 'wb')

        # Select sources for photometric calibration
        self.log.write('Selecting sources for photometric calibration',
                       level=3,
                       event=71,
                       solution_num=solution_num,
                       double_newline=False)

        if solution_num is None:
            solution_num = 1

        self.phot_calib['solution_num'] = solution_num
        self.phot_calib['iteration'] = iteration

        # Store number of Gaia DR2 objects matched with the current solution
        bgaia = (self.sources['solution_num'] == solution_num)
        self.phot_calib['num_gaia_edr3'] = bgaia.sum()

        # For single exposures, exclude blended sources.
        # For multiple exposures, include them, because otherwise the bright
        # end will lack calibration stars.
        if num_solutions == 1:
            bflags = ((self.sources['sextractor_flags'] == 0) |
                      (self.sources['sextractor_flags'] == 2))
        else:
            bflags = self.sources['sextractor_flags'] <= 3

        # Create calibration-star mask
        # Discard very red stars (BP-RP > 2)
        cal_mask = ((self.sources['solution_num'] == solution_num) &
                    (self.sources['mag_auto'] > 0) &
                    (self.sources['mag_auto'] < 90) & bflags &
                    (self.sources['flag_clean'] == 1)
                    & ~self.sources['gaiaedr3_bpmag'].mask
                    & ~self.sources['gaiaedr3_rpmag'].mask &
                    (self.sources['gaiaedr3_bp_rp'].filled(99.) <= 2) &
                    (self.sources['gaiaedr3_neighbors'] == 1))

        num_calstars = cal_mask.sum()
        self.phot_calib['num_candidate_stars'] = num_calstars

        if num_calstars == 0:
            self.log.write('No stars for photometric calibration',
                           level=2,
                           event=71,
                           solution_num=solution_num)
            return

        self.log.write('Found {:d} calibration-star candidates with '
                       'Gaia magnitudes on the plate'.format(num_calstars),
                       level=4,
                       event=71,
                       solution_num=solution_num)

        if num_calstars < 10:
            self.log.write('Too few calibration stars on the plate!',
                           level=2,
                           event=71,
                           solution_num=solution_num)
            return

        # Evaluate color term

        if iteration == 1:
            self.log.write('Determining color term using annular bins 1-3',
                           level=3,
                           event=72,
                           solution_num=solution_num)
            cterm_mask = cal_mask & (self.sources['annular_bin'] <= 3)

            if cterm_mask.sum() < 50:
                self.log.write('Found {:d} calibration stars in bins 1-3, '
                               'increasing area'.format(cterm_mask.sum()),
                               level=4,
                               event=72,
                               solution_num=solution_num)
                self.log.write('Determining color term using annular bins 1-6',
                               level=3,
                               event=72,
                               solution_num=solution_num)
                cterm_mask = cal_mask & (self.sources['annular_bin'] <= 6)
        else:
            self.log.write('Determining color term using annular bins 1-8',
                           level=3,
                           event=72,
                           solution_num=solution_num)
            cterm_mask = cal_mask & (self.sources['annular_bin'] <= 8)

        self.evaluate_color_term(self.sources[cterm_mask],
                                 solution_num=solution_num)

        # If color term was not determined, we need to terminate the
        # calibration
        if 'color_term' not in self.phot_calib:
            self.log.write(
                'Cannot continue photometric calibration without '
                'color term',
                level=2,
                event=72,
                solution_num=solution_num)
            return

        cterm = self.phot_calib['color_term']
        cterm_err = self.phot_calib['color_term_error']

        # Use stars in all annular bins
        self.log.write('Photometric calibration using annular bins 1-9',
                       level=3,
                       event=73,
                       solution_num=solution_num)

        # Select stars with unique plate mag values
        plate_mag = self.sources['mag_auto'][cal_mask].data
        plate_mag_u, uind = np.unique(plate_mag, return_index=True)
        ind_calibstar_u = np.where(cal_mask)[0][uind]
        #cal_u_mask = np.zeros_like(cal_mask)
        #cal_u_mask[np.where(cal_mask)[0][uind]] = True
        num_cal_u = len(plate_mag_u)

        self.log.write('{:d} stars with unique magnitude'.format(num_cal_u),
                       double_newline=False,
                       level=4,
                       event=73,
                       solution_num=solution_num)

        if num_cal_u < 10:
            self.log.write('Too few stars with unique magnitude!',
                           double_newline=False,
                           level=2,
                           event=73,
                           solution_num=solution_num)
            return

        plate_mag_u = self.sources['mag_auto'][ind_calibstar_u].data
        cat_bmag_u = self.sources['gaiaedr3_bpmag'][ind_calibstar_u].data
        cat_vmag_u = self.sources['gaiaedr3_rpmag'][ind_calibstar_u].data
        cat_natmag = cat_vmag_u + cterm * (cat_bmag_u - cat_vmag_u)
        self.sources['cat_natmag'][ind_calibstar_u] = cat_natmag

        # Eliminate outliers by constructing calibration curve from
        # the bright end and extrapolate towards faint stars

        # Find initial plate magnitude limit
        kde = sm.nonparametric.KDEUnivariate(plate_mag_u.astype(np.double))
        kde.fit()
        ind_maxden = np.argmax(kde.density)
        plate_mag_maxden = kde.support[ind_maxden]
        ind_dense = np.where(kde.density > 0.2 * kde.density.max())[0]
        brightmag = kde.support[ind_dense[0]]
        plate_mag_lim = kde.support[ind_dense[-1]]
        plate_mag_brt = plate_mag_u.min()
        plate_mag_mid = (plate_mag_brt + 0.5 * (plate_mag_lim - plate_mag_brt))

        if brightmag > plate_mag_mid:
            brightmag = plate_mag_mid

        # Check the number of stars in the bright end
        nb = (plate_mag_u <= plate_mag_mid).sum()

        if nb < 10:
            plate_mag_mid = plate_mag_u[9]

        # Construct magnitude cuts for outlier elimination
        ncuts = int((plate_mag_lim - plate_mag_mid) / 0.5) + 2
        mag_cuts = np.linspace(plate_mag_mid, plate_mag_lim, ncuts)
        ind_cut = np.where(plate_mag_u <= plate_mag_mid)[0]
        ind_good = np.arange(len(ind_cut))
        mag_cut_prev = mag_cuts[0]
        #mag_slope_prev = None

        # Loop over magnitude bins
        for mag_cut in mag_cuts[1:]:
            gpmag = plate_mag_u[ind_cut[ind_good]]
            gcmag = cat_natmag[ind_cut[ind_good]]

            nbright = (gpmag < brightmag).sum()

            if nbright < 20:
                alt_brightmag = (plate_mag_u.min() +
                                 (plate_mag_maxden - plate_mag_u.min()) * 0.5)
                nbright = (gpmag < alt_brightmag).sum()

            if nbright < 10:
                nbright = 10

            # Exclude bright outliers by fitting a line and checking
            # if residuals are larger than 2 mag
            ind_outliers = np.array([], dtype=int)
            xdata = gpmag[:nbright]
            ydata = gcmag[:nbright]
            p1 = np.poly1d(np.polyfit(xdata, ydata, 1))
            res = cat_natmag[ind_cut] - p1(plate_mag_u[ind_cut])
            ind_brightout = np.where((np.absolute(res) > 2.) &
                                     (plate_mag_u[ind_cut] <= xdata.max()))[0]

            if len(ind_brightout) > 0:
                ind_outliers = np.append(ind_outliers, ind_cut[ind_brightout])
                ind_good = np.setdiff1d(ind_good, ind_outliers)
                gpmag = plate_mag_u[ind_cut[ind_good]]
                gcmag = cat_natmag[ind_cut[ind_good]]
                nbright -= len(ind_brightout)

                if nbright < 10:
                    nbright = 10

            # Construct calibration curve
            # Set lowess fraction depending on the number of data points
            frac = 0.2

            if len(ind_good) < 500:
                frac = 0.2 + 0.3 * (500 - len(ind_good)) / 500.

            z = sm.nonparametric.lowess(gcmag,
                                        gpmag,
                                        frac=frac,
                                        it=3,
                                        delta=0.1,
                                        return_sorted=True)

            # In case there are less than 20 good stars, use only
            # polynomial
            if len(ind_good) < 20:
                weights = np.zeros(len(ind_good)) + 1.

                for i in np.arange(len(ind_good)):
                    indw = np.where(np.absolute(gpmag - gpmag[i]) < 1.0)[0]

                    if len(indw) > 2:
                        weights[i] = 1. / gcmag[indw].std()**2

                p2 = np.poly1d(np.polyfit(gpmag, gcmag, 2, w=weights))
                z[:, 1] = p2(z[:, 0])

            # Improve bright-star calibration
            if nbright > len(ind_good):
                nbright = len(ind_good)

            xbright = gpmag[:nbright]
            ybright = gcmag[:nbright]

            if nbright < 50:
                p2 = np.poly1d(np.polyfit(xbright, ybright, 2))
                vals = p2(xbright)
            else:
                z1 = sm.nonparametric.lowess(ybright,
                                             xbright,
                                             frac=0.4,
                                             it=3,
                                             delta=0.1,
                                             return_sorted=True)
                vals = z1[:, 1]

            weight2 = np.arange(nbright, dtype=float) / nbright
            weight1 = 1. - weight2
            z[:nbright, 1] = weight1 * vals + weight2 * z[:nbright, 1]

            # Improve faint-star calibration by fitting a 2nd order
            # polynomial
            # Currently, disable improvement
            improve_faint = False
            if improve_faint:
                ind_faint = np.where(gpmag > mag_cut_prev - 6.)[0]
                nfaint = len(ind_faint)

                if nfaint > 5:
                    xfaint = gpmag[ind_faint]
                    yfaint = gcmag[ind_faint]
                    weights = np.zeros(nfaint) + 1.

                    for i in np.arange(nfaint):
                        indw = np.where(
                            np.absolute(xfaint - xfaint[i]) < 0.5)[0]

                        if len(indw) > 2:
                            weights[i] = 1. / yfaint[indw].std()**2

                    p2 = np.poly1d(np.polyfit(xfaint, yfaint, 2, w=weights))
                    vals = p2(xfaint)

                    weight2 = (np.arange(nfaint, dtype=float) / nfaint)**1
                    weight1 = 1. - weight2
                    z[ind_faint,
                      1] = weight2 * vals + weight1 * z[ind_faint, 1]

            # Interpolate smoothed calibration curve
            s = InterpolatedUnivariateSpline(z[:, 0], z[:, 1], k=1)

            ind_cut = np.where(plate_mag_u <= mag_cut)[0]
            fit_mag = s(plate_mag_u[ind_cut])

            residuals = cat_natmag[ind_cut] - fit_mag
            mag_cut_prev = mag_cut

            ind_outliers = np.array([], dtype=int)

            # Mark as outliers those stars that deviate more than 1 mag
            ind_out = np.where(np.absolute(residuals) > 1.0)

            if len(ind_out) > 0:
                ind_outliers = np.append(ind_outliers, ind_cut[ind_out])
                ind_outliers = np.unique(ind_outliers)

            # Additionally clip outliers in small bins
            for mag_loc in np.linspace(plate_mag_brt, mag_cut, 100):
                mag_low = mag_loc - 0.5
                mag_high = mag_loc + 0.5
                ind_loc = np.where((plate_mag_u[ind_cut] > mag_low)
                                   & (plate_mag_u[ind_cut] < mag_high))[0]
                ind_loc = np.setdiff1d(ind_loc, ind_outliers)

                if len(ind_loc) >= 5:
                    rms_res = np.sqrt((residuals[ind_loc]**2).sum())
                    ind_locout = np.where(
                        np.absolute(residuals[ind_loc]) > 3. * rms_res)[0]

                    if len(ind_locout) > 0:
                        ind_outliers = np.append(ind_outliers,
                                                 ind_cut[ind_loc[ind_locout]])

                    ind_outliers = np.unique(ind_outliers)

            ind_good = np.setdiff1d(np.arange(len(ind_cut)), ind_outliers)

            #flt = sigma_clip(residuals, maxiters=None)
            #ind_good = ~flt.mask
            #ind_good = np.where(np.absolute(residuals) < 3*residuals.std())[0]

            # Stop outlier elimination if there is a gap in magnitudes
            if mag_cut - plate_mag_u[ind_cut[ind_good]].max() > 1.5:
                ind_faintout = np.where(plate_mag_u > mag_cut)[0]

                if len(ind_faintout) > 0:
                    ind_outliers = np.append(ind_outliers, ind_faintout)
                    ind_outliers = np.unique(ind_outliers)
                    ind_good = np.setdiff1d(np.arange(len(plate_mag_u)),
                                            ind_outliers)
                    self.log.write(
                        '{:d} faint stars eliminated as outliers'.format(
                            len(ind_faintout)),
                        double_newline=False,
                        level=4,
                        event=73,
                        solution_num=solution_num)

                self.log.write(
                    'Outlier elimination stopped due to a long gap '
                    'in magnitudes!',
                    double_newline=False,
                    level=2,
                    event=73,
                    solution_num=solution_num)
                break

            if len(ind_good) < 10:
                self.log.write(
                    'Outlier elimination stopped '
                    'due to insufficient number of stars left!',
                    double_newline=False,
                    level=2,
                    event=73,
                    solution_num=solution_num)
                break

        num_outliers = len(ind_outliers)
        self.log.write('{:d} outliers eliminated'.format(num_outliers),
                       double_newline=False,
                       level=4,
                       event=73,
                       solution_num=solution_num)
        ind_good = np.setdiff1d(np.arange(len(plate_mag_u)), ind_outliers)
        self.log.write('{:d} stars after outlier elimination'.format(
            len(ind_good)),
                       double_newline=False,
                       level=4,
                       event=73,
                       solution_num=solution_num)

        if len(ind_good) < 10:
            self.log.write('Too few calibration stars ({:d}) after outlier '
                           'elimination!'.format(len(ind_good)),
                           double_newline=False,
                           level=2,
                           event=73,
                           solution_num=solution_num)
            return

        # Continue with photometric calibration without outliers

        # Study the distribution of magnitudes
        kde = sm.nonparametric.KDEUnivariate(plate_mag_u[ind_good].astype(
            np.double))
        kde.fit()
        ind_maxden = np.argmax(kde.density)
        plate_mag_maxden = kde.support[ind_maxden]
        ind_dense = np.where(kde.density > 0.2 * kde.density.max())[0]
        plate_mag_lim = kde.support[ind_dense[-1]]
        ind_valid = np.where(plate_mag_u[ind_good] <= plate_mag_lim)[0]
        num_valid = len(ind_valid)

        self.log.write(
            '{:d} calibration stars brighter than limiting magnitude'.format(
                num_valid),
            double_newline=False,
            level=4,
            event=73,
            solution_num=solution_num)

        #valid_cal_mask = np.zeros_like(cal_u_mask)
        #valid_cal_mask[np.where(cal_u_mask)[0][ind_good[ind_valid]]] = True
        ind_calibstar_valid = ind_calibstar_u[ind_good[ind_valid]]
        self.sources['phot_calib_flags'][ind_calibstar_valid] = 1

        if num_outliers > 0:
            #outlier_mask = np.zeros_like(cal_u_mask)
            #outlier_mask[np.where(cal_u_mask)[0][ind_outliers]]
            ind_calibstar_outlier = ind_calibstar_u[ind_outliers]
            self.sources['phot_calib_flags'][ind_calibstar_outlier] = 2

        cat_natmag = cat_natmag[ind_good[ind_valid]]
        plate_mag_u = plate_mag_u[ind_good[ind_valid]]
        plate_mag_brightest = plate_mag_u.min()
        frac = 0.2

        if num_valid < 500:
            frac = 0.2 + 0.3 * (500 - num_valid) / 500.

        z = sm.nonparametric.lowess(cat_natmag,
                                    plate_mag_u,
                                    frac=frac,
                                    it=3,
                                    delta=0.1,
                                    return_sorted=True)

        # Improve bright-star calibration

        # Find magnitude at which the frequency of stars becomes
        # larger than 500 mag^(-1)
        #ind_500 = np.where((kde.density*len(ind_good) > 500))[0][0]
        #brightmag = kde.support[ind_500]

        # Find magnitude at which density becomes larger than 0.05 of
        # the max density
        #ind_dense_005 = np.where(kde.density > 0.05*kde.density.max())[0]
        # Index of kde.support at which density becomes 0.05 of max
        #ind0 = ind_dense_005[0]
        #brightmag = kde.support[ind0]
        #nbright = len(plate_mag_u[np.where(plate_mag_u < brightmag)])

        # Find magnitude at which density becomes larger than 0.2 of
        # the max density
        #brightmag = kde.support[ind_dense[0]]
        #nbright = len(plate_mag_u[np.where(plate_mag_u < brightmag)])

        # Find the second percentile of magnitudes
        nbright = round(num_valid * 0.02)

        # Limit bright stars with 2000
        nbright = min([nbright, 2000])

        if nbright < 20:
            brightmag = (plate_mag_brightest +
                         (plate_mag_maxden - plate_mag_brightest) * 0.5)
            nbright = len(plate_mag_u[np.where(plate_mag_u < brightmag)])

        if nbright < 5:
            nbright = 5

        if nbright < 50:
            p2 = np.poly1d(
                np.polyfit(plate_mag_u[:nbright], cat_natmag[:nbright], 2))
            vals = p2(plate_mag_u[:nbright])
        else:
            z1 = sm.nonparametric.lowess(cat_natmag[:nbright],
                                         plate_mag_u[:nbright],
                                         frac=0.4,
                                         it=3,
                                         delta=0.1,
                                         return_sorted=True)
            vals = z1[:, 1]

        t = Table()
        t['plate_mag'] = plate_mag_u[:nbright]
        t['cat_natmag'] = cat_natmag[:nbright]
        t['fit_mag'] = vals
        basefn_solution = '{}-{:02d}'.format(self.basefn, solution_num)
        fn_tab = os.path.join(self.scratch_dir,
                              '{}_bright.fits'.format(basefn_solution))
        t.write(fn_tab, format='fits', overwrite=True)

        # Normalise density to max density of the bright range
        #d_bright = kde.density[:ind0] / kde.density[:ind0].max()
        # Find a smooth density curve and use values as weights
        #s_bright = InterpolatedUnivariateSpline(kde.support[:ind0],
        #                                        d_bright, k=1)
        #weight2 = s_bright(plate_mag_u[:nbright])

        # Linearly increasing weight
        weight2 = np.arange(nbright, dtype=float) / nbright

        weight1 = 1. - weight2

        # Merge two calibration curves with different weights
        z[:nbright, 1] = weight1 * vals + weight2 * z[:nbright, 1]

        # Interpolate the whole calibration curve
        s = InterpolatedUnivariateSpline(z[:, 0], z[:, 1], k=1)

        # Store the calibration curve
        self.calib_curve = s

        # Calculate residuals
        residuals = cat_natmag - s(plate_mag_u)

        # Smooth residuals with spline
        X = self.sources['x_source'][ind_calibstar_valid].data
        Y = self.sources['y_source'][ind_calibstar_valid].data

        if num_valid > 100:
            s_corr = SmoothBivariateSpline(X, Y, residuals, kx=5, ky=5)
        elif num_valid > 50:
            s_corr = SmoothBivariateSpline(X, Y, residuals, kx=3, ky=3)
        else:
            s_corr = None

        # Calculate new residuals and correct for dependence on
        # x, y, mag_auto. Do it only if the number of valid
        # calibration stars is larger than 500.
        s_magcorr = None

        if num_valid > 500:
            residuals2 = np.zeros(num_valid)

            for i in np.arange(num_valid):
                residuals2[i] = residuals[i] - s_corr(X[i], Y[i])

            # Create magnitude bins
            plate_mag_srt = np.sort(plate_mag_u)
            bin_mag = [(plate_mag_srt[99] + plate_mag_srt[0]) / 2.]
            bin_hw = [(plate_mag_srt[99] - plate_mag_srt[0]) / 2.]
            ind_lastmag = 99

            while True:
                if plate_mag_srt[ind_lastmag +
                                 100] - bin_mag[-1] - bin_hw[-1] > 0.5:
                    bin_edge = bin_mag[-1] + bin_hw[-1]
                    bin_mag.append(
                        (plate_mag_srt[ind_lastmag + 100] + bin_edge) / 2.)
                    bin_hw.append(
                        (plate_mag_srt[ind_lastmag + 100] - bin_edge) / 2.)
                    ind_lastmag += 100
                else:
                    bin_mag.append(bin_mag[-1] + bin_hw[-1] + 0.25)
                    bin_hw.append(0.25)
                    ind_lastmag = (plate_mag_srt <
                                   bin_mag[-1] + 0.25).sum() - 1

                # If less than 100 sources remain
                if ind_lastmag > num_valid - 101:
                    add_width = plate_mag_srt[-1] - bin_mag[-1] - bin_hw[-1]
                    bin_mag[-1] += add_width / 2.
                    bin_hw[-1] += add_width / 2.
                    break

            # Evaluate natmag correction in magnitude bins
            s_magcorr = []

            for i, (m, hw) in enumerate(zip(bin_mag, bin_hw)):
                binmask = (plate_mag_u > m - hw) & (plate_mag_u <= m + hw)
                #print(m, m-hw, m+hw, binmask.sum())
                smag = SmoothBivariateSpline(X[binmask],
                                             Y[binmask],
                                             residuals2[binmask],
                                             kx=3,
                                             ky=3)
                s_magcorr.append(smag)

        # Evaluate RMS errors from the calibration residuals
        rmse_list = generic_filter(residuals, _rmse, size=10)
        rmse_lowess = sm.nonparametric.lowess(rmse_list,
                                              plate_mag_u,
                                              frac=0.5,
                                              it=3,
                                              delta=0.1)
        s_rmse = InterpolatedUnivariateSpline(rmse_lowess[:, 0],
                                              rmse_lowess[:, 1],
                                              k=1)
        rmse = s_rmse(plate_mag_u)

        if self.write_phot_dir:
            np.savetxt(
                fcaldata,
                np.column_stack((plate_mag_u, cat_natmag, s(plate_mag_u),
                                 cat_natmag - s(plate_mag_u))))
            fcaldata.write('\n\n')

        # Store calibration statistics
        bright_limit = s(plate_mag_brightest).item()
        faint_limit = s(plate_mag_lim).item()

        self.phot_calib['num_calib_stars'] = num_valid
        self.phot_calib['num_bright_stars'] = nbright
        self.phot_calib['num_outliers'] = num_outliers
        self.phot_calib['bright_limit'] = bright_limit
        self.phot_calib['faint_limit'] = faint_limit
        self.phot_calib['mag_range'] = faint_limit - bright_limit
        self.phot_calib['rmse_min'] = rmse.min()
        self.phot_calib['rmse_median'] = np.median(rmse)
        self.phot_calib['rmse_max'] = rmse.max()
        self.phot_calib['plate_mag_brightest'] = plate_mag_brightest
        self.phot_calib['plate_mag_density02'] = kde.support[ind_dense[0]]
        self.phot_calib['plate_mag_brightcut'] = brightmag
        self.phot_calib['plate_mag_maxden'] = plate_mag_maxden
        self.phot_calib['plate_mag_lim'] = plate_mag_lim

        # Append calibration results to the list
        self.phot_calib_list.append(self.phot_calib)

        # Apply photometric calibration to sources
        sol_mask = ((self.sources['solution_num'] == solution_num) &
                    (self.sources['mag_auto'] < 90.))
        num_solstars = sol_mask.sum()
        mag_auto_sol = self.sources['mag_auto'][sol_mask]

        self.log.write(
            'Applying photometric calibration to sources '
            'in annular bins 1-9',
            level=3,
            event=74,
            solution_num=solution_num)

        # Correct magnitudes for positional effects
        if s_corr is not None:
            natmag_corr = self.sources['natmag_correction'][sol_mask]
            xsrc = self.sources['x_source'][sol_mask]
            ysrc = self.sources['y_source'][sol_mask]

            # Do a for-cycle, because SmoothBivariateSpline may crash with
            # large input arrays
            for i in np.arange(num_solstars):
                # Apply first correction (dependent only on coordinates)
                natmag_corr[i] = s_corr(xsrc[i], ysrc[i])

                # Apply second correction (dependent on mag_auto)
                if s_magcorr is not None:
                    corr_list = []

                    for smag in s_magcorr:
                        corr_list.append(smag(xsrc[i], ysrc[i])[0, 0])

                    smc = InterpolatedUnivariateSpline(bin_mag, corr_list, k=1)
                    natmag_corr[i] += smc(mag_auto_sol[i])

        # Assign magnitudes and errors
        self.sources['natmag'][sol_mask] = s(mag_auto_sol)
        self.sources['natmag_plate'][sol_mask] = s(mag_auto_sol)
        self.sources['natmag_error'][sol_mask] = s_rmse(mag_auto_sol)

        if s_corr is not None:
            self.sources['natmag_correction'][sol_mask] = natmag_corr
            self.sources['natmag'][sol_mask] += natmag_corr

        self.sources['color_term'][sol_mask] = cterm
        self.sources['natmag_residual'][ind_calibstar_u] = \
                (self.sources['cat_natmag'][ind_calibstar_u] -
                 self.sources['natmag'][ind_calibstar_u])

        # Apply flags and errors to sources outside the magnitude range
        # of calibration stars
        brange = (mag_auto_sol < plate_mag_brightest)
        ind = np.where(sol_mask)[0][brange]

        if brange.sum() > 0:
            self.sources['phot_range_flags'][ind] = 1
            self.sources['natmag_error'][ind] = s_rmse(plate_mag_brightest)

        brange = (mag_auto_sol > plate_mag_lim)
        ind = np.where(sol_mask)[0][brange]

        if brange.sum() > 0:
            self.sources['phot_range_flags'][ind] = 2
            self.sources['natmag_error'][ind] = s_rmse(plate_mag_lim)

        # Select stars with known external photometry
        bgaia = (sol_mask & ~self.sources['gaiaedr3_bpmag'].mask
                 & ~self.sources['gaiaedr3_rpmag'].mask)

        if bgaia.sum() > 0:
            bp_rp = self.sources['gaiaedr3_bp_rp'][bgaia]
            bp_rp_err = 0.

            self.sources['rpmag'][bgaia] = (self.sources['natmag'][bgaia] -
                                            cterm * bp_rp)
            self.sources['bpmag'][bgaia] = (self.sources['natmag'][bgaia] -
                                            (cterm - 1.) * bp_rp)
            rpmagerr = np.sqrt(self.sources['natmag_error'][bgaia]**2 +
                               (cterm_err * bp_rp)**2 + (cterm * bp_rp_err)**2)
            bpmagerr = np.sqrt(self.sources['natmag_error'][bgaia]**2 +
                               (cterm_err * bp_rp)**2 +
                               ((cterm - 1.) * bp_rp_err)**2)
            self.sources['rpmag_error'][bgaia] = rpmagerr
            self.sources['bpmag_error'][bgaia] = bpmagerr

        try:
            brightlim = min([
                cal['bright_limit'] for cal in self.phot_calib_list
                if cal['solution_num'] == solution_num
                and cal['iteration'] == iteration
            ])
            faintlim = max([
                cal['faint_limit'] for cal in self.phot_calib_list
                if cal['solution_num'] == solution_num
                and cal['iteration'] == iteration
            ])
            mag_range = faintlim - brightlim
        except Exception:
            brightlim = None
            faintlim = None
            mag_range = None

        if num_valid > 0:
            self.phot_calibrated = True
            self.bright_limit = brightlim
            self.faint_limit = faintlim

            self.log.write('Photometric calibration results (solution {:d}, '
                           'iteration {:d}): '
                           'bright limit {:.3f}, faint limit {:.3f}'.format(
                               solution_num, iteration, brightlim, faintlim),
                           level=4,
                           event=73,
                           solution_num=solution_num)

        if self.write_phot_dir:
            fcaldata.close()