def doFUNCTION(self,wl_min=10.0,wl_max=20.0,function='linear'): """ Replace all opacities by an extrapolation below a cut-off wavelength. @keyword wl_min: lower boundary interpolation range and the cutoff wavelength for the extrapolation (default: 10.0) @type wl_min: float @keyword wl_max: upper boundary interpolation range (default: 20.0) @type wl_max: float @keyword function: type of function used for the inter/extrapolation See Interpol.pEval for function types (default: 'linear') @type function: string """ #raise TypeError("WARNING! The CustumOpa().doFUNCTION method is obsolete! New version NYI.") #- Select relevant inputlines (not saving the scattering matrices) self.opacity_file = True inputsel = [line for line in self.input_data if len(line) == 4] inputsel = array([[float(f) for f in line] for line in inputsel]) wl = inputsel[:,0] function = function.lower() #- Select the extrapolation and interpolation regions. wl_low = wl[wl < wl_min] wlinter = wl[(wl >= wl_min) * (wl <= wl_max)] abs_inter = inputsel[:,2][(wl >= wl_min) * (wl <= wl_max)] sca_inter = inputsel[:,3][(wl >= wl_min) * (wl <= wl_max)] #- Fit the interpolation region and extrapolate this to lower wavelength abs_p0 = [abs_inter[0],wl[0],1,1] sca_p0 = [sca_inter[0],wl[0],1,1] abs_low = Interpol.fitFunction(x_in=wlinter,y_in=abs_inter,\ x_out=wl_low,func=function,\ initial=abs_p0,show=1) abs_low = array([a > 0 and a or 0 for a in abs_low]) sca_low = Interpol.fitFunction(x_in=wlinter,y_in=sca_inter,\ x_out=wl_low,func=function,\ initial=sca_p0,show=1) sca_low = array([s > 0 and s or 0 for s in sca_low]) ext_low = abs_low + sca_low #- Set the output data self.output_data = [array([w,el,al,sl]) for w,el,al,sl in zip(wl_low,ext_low,\ abs_low,sca_low)] self.output_data.extend(inputsel[wl >= wl_min])
def dereddenData(self): ''' Deredden the data. The interstellar extinction curve by Chiar and Tielens (2006) is used with an Av to Ak conversion factor of 0.112. The correction is done by interpolating the curve, and extrapolating to longer wavelengths. The extrapolation is done with a power law fitted to the long wavelength region (lambda > 22 micron). For short wavelength points below 1.24 micron, the cardelli extinction curve is used. ''' #- Read the extinction curve, then inter/extrapolate it ext_x,ext_y = getExtinctionCurve(self.gal_position,'chiar_tielens',\ 0.112) ext_car_x, ext_car_y = getExtinctionCurve(curve_type='cardelli',\ av_to_ak_conv=0.112) #- Initial param guess for extrapolation of long wavelength extinction curve p0 = [ -2, 0.01 , 1, -1] #- Assuming a power law for the extrapolation, and fitting from 22 mic deredfunc = 'power' extrapol_xmin = 22.0 chiar_min = ext_x[0] #- Fit the power law to the > 22 micron wavelength range plsq = leastsq(Interpol.getResiduals,p0,\ args=(ext_x[ext_x>=extrapol_xmin],\ ext_y[ext_x>=extrapol_xmin],\ deredfunc),maxfev=20000)[0] #- Calculate the extrapolation and interpolation for the datagrids #- Then combine and apply the correction to the data for (dt,fn),data in self.data_raw.items(): if len(data) == 3: data_x, data_y, data_ey = data[0], data[1], data[2] else: data_x, data_y = data[0], data[1] extra = Interpol.pEval(data_x[data_x>=extrapol_xmin],plsq,deredfunc) inter = interp1d(ext_x,ext_y)(data_x[(data_x<extrapol_xmin)\ *(data_x>=chiar_min)]) short = interp1d(ext_car_x,ext_car_y)(data_x[data_x<chiar_min]) corr = hstack([short,inter,extra]) if self.plot_extrapol_extinction: Plotting2.plotCols(x=[ext_x,data_x],y=[ext_y,corr],\ xlogscale=1,ylogscale=1) if len(data) == 3: self.data[(dt,fn)] = (data_x,\ data_y*10**(corr*self.ak*0.4),\ data_ey*10**(corr*self.ak*0.4)) else: self.data[(dt,fn)] = (data_x,data_y*10**(corr*self.ak*0.4))
def doHONY(self, wl1=1.0, wl2=2.0, wl3=10.0, a_mod=0.05, q_cst=1.0): """ Replace all opacities below a cut-off wavelength with values as assumed by Hony et al (2003, 2004) @keyword wl1: first discontinuity in micron (default: 1.0) @type wl1: float @keyword wl2: second discontinuity in micron (default: 2.0) @type wl2: float @keyword wl3: third discontinuity in micron (default: 10) @type wl3: float @keyword a_mod: model grain size, 0.01 according to Hony 2004 in micron (default: 0.05) @type a_mod: float @keyword q_cst: constant extinction efficiency at wavelengths < wl1 (default: 1.0) @type q_cst: float """ spec_dens = DataIO.getInputData(keyword='SPEC_DENS',rindex=self.index,\ filename='Dust.dat') opa_cst = q_cst / 4.0 * 3.0 / spec_dens / (a_mod * 10**(-4)) for line in self.input_data: if len(line) == 4 and float(line[0]) < wl1: self.output_data.append([line[0],str(opa_cst+1e-60),\ str(opa_cst),str(1e-60)]) elif len(line) == 4 and float(line[0]) >= wl1 and float( line[0]) < wl2: opa = Interpol.linInterpol([wl1,wl2],[opa_cst,1e-60],\ float(line[0])) self.output_data.append([line[0],str(opa+1e-60),str(opa),\ str(1e-60)]) elif len(line) == 4 and float(line[0]) >= wl2 and float( line[0]) < wl3: self.output_data.append([line[0],str(2e-60),str(1e-60),\ str(1e-60)]) else: self.output_data.append(line)
def doHONY(self,wl1 = 1.0,wl2=2.0,wl3=10.0,a_mod=0.05,q_cst=1.0): """ Replace all opacities below a cut-off wavelength with values as assumed by Hony et al (2003, 2004) @keyword wl1: first discontinuity in micron (default: 1.0) @type wl1: float @keyword wl2: second discontinuity in micron (default: 2.0) @type wl2: float @keyword wl3: third discontinuity in micron (default: 10) @type wl3: float @keyword a_mod: model grain size, 0.01 according to Hony 2004 in micron (default: 0.05) @type a_mod: float @keyword q_cst: constant extinction efficiency at wavelengths < wl1 (default: 1.0) @type q_cst: float """ spec_dens = DataIO.getInputData(keyword='SPEC_DENS',rindex=self.index,\ filename='Dust.dat') opa_cst = q_cst/4.0*3.0/spec_dens/(a_mod*10**(-4)) for line in self.input_data: if len(line) == 4 and float(line[0]) < wl1: self.output_data.append([line[0],str(opa_cst+1e-60),\ str(opa_cst),str(1e-60)]) elif len(line)==4 and float(line[0])>=wl1 and float(line[0]) < wl2: opa = Interpol.linInterpol([wl1,wl2],[opa_cst,1e-60],\ float(line[0])) self.output_data.append([line[0],str(opa+1e-60),str(opa),\ str(1e-60)]) elif len(line)==4 and float(line[0])>=wl2 and float(line[0]) < wl3: self.output_data.append([line[0],str(2e-60),str(1e-60),\ str(1e-60)]) else: self.output_data.append(line)
def divideContinuum(self,w,f,dtype,frindex): ''' Divide flux by the continuum flux in a dust feature. @param w: The wavelength grid @type w: list/array @param f: The flux grid @type f: list/array @param dtype: data type (only 'model','sws' for now) @type dtype: string @param frindex: The index in the franges list for this entry. @type frindex: int @keyword plot: Show a plot of the continuum division @type plot: bool ''' fr1,fr2 = self.franges[frindex][0],self.franges[frindex][1] fr3,fr4 = self.franges[frindex][2],self.franges[frindex][3] w_in = list(w[(w>fr1) * (w<fr2)]) + list(w[(w>fr3) * (w<fr4)]) f_in = list(f[(w>fr1) * (w<fr2)]) + list(f[(w>fr3) * (w<fr4)]) w_cont = w[(w>self.frmin*0.9)*(w<self.frmax*1.1)] f_ori = f[(w>self.frmin*0.9)*(w<self.frmax*1.1)] f_cont = Interpol.fitFunction(x_in=w_in,y_in=f_in,x_out=w_cont,\ func=self.func[frindex]) self.cont_division[dtype] = dict() self.cont_division[dtype]['w_feat'] = w_cont self.cont_division[dtype]['f_feat'] = f_ori self.cont_division[dtype]['w_fitsel_feat'] = w_in self.cont_division[dtype]['f_fitsel_feat'] = f_in self.cont_division[dtype]['f_interp'] = f_cont self.cont_division[dtype]['f_division'] = f_ori/f_cont if self.plot: x = [w_cont,w_cont] y = [f_ori,f_cont] Plotting2.plotCols(x=x,y=y,xmin=self.frmin*0.9,xmax=self.frmax*1.1,\ ylogscale=0,xlogscale=0)
def divideContinuum(self, w, f, dtype, frindex): ''' Divide flux by the continuum flux in a dust feature. @param w: The wavelength grid @type w: list/array @param f: The flux grid @type f: list/array @param dtype: data type (only 'model','sws' for now) @type dtype: string @param frindex: The index in the franges list for this entry. @type frindex: int @keyword plot: Show a plot of the continuum division @type plot: bool ''' fr1, fr2 = self.franges[frindex][0], self.franges[frindex][1] fr3, fr4 = self.franges[frindex][2], self.franges[frindex][3] w_in = list(w[(w > fr1) * (w < fr2)]) + list(w[(w > fr3) * (w < fr4)]) f_in = list(f[(w > fr1) * (w < fr2)]) + list(f[(w > fr3) * (w < fr4)]) w_cont = w[(w > self.frmin * 0.9) * (w < self.frmax * 1.1)] f_ori = f[(w > self.frmin * 0.9) * (w < self.frmax * 1.1)] f_cont = Interpol.fitFunction(x_in=w_in,y_in=f_in,x_out=w_cont,\ func=self.func[frindex]) self.cont_division[dtype] = dict() self.cont_division[dtype]['w_feat'] = w_cont self.cont_division[dtype]['f_feat'] = f_ori self.cont_division[dtype]['w_fitsel_feat'] = w_in self.cont_division[dtype]['f_fitsel_feat'] = f_in self.cont_division[dtype]['f_interp'] = f_cont self.cont_division[dtype]['f_division'] = f_ori / f_cont if self.plot: x = [w_cont, w_cont] y = [f_ori, f_cont] Plotting2.plotCols(x=x,y=y,xmin=self.frmin*0.9,xmax=self.frmax*1.1,\ ylogscale=0,xlogscale=0)
def alignY(datalists,xmin,xmax,zeropoint=0,p0=[1,0,1.5,-2.5],func='power'): """ *** WILL BE REWRITTEN *** Align two datasets by shifting Y coordinate. Works on multiple data lists at a time. Each dataset is shifted to match with the previous dataset AFTER the previous one has already been shifted. e.g. for the third dataset y_new = zeropoint + shifts[1]*shifts[2]*y The shifts between datasets are multiplicative, the first dataset is shifted additively by the keyword zeropoint. At least 3 points of overlap are required! @param datalists: two or more 2d lists of data giving (x,y) @type datalists: list[list[(.,.)]] @param xmin: the lower boundar(y)(ies) of the overlapping region, used for alignment. Must be 1 value for 2 datasets, n-1 values for more @type xmin: float or list @param xmax: the upper boundar(y)(ies) of the overlapping region, used for alignment. Must be 1 value for 2 datasets, n-1 values for more @type xmax: float or list @keyword zeropoint: The first dataset is shifted additively with this value (default: 0) @type zeropoint: float @keyword p0: initial parameters for fitting function definition (default: [1,0,1.5,-2.5]) @type p0: list @return: The datasets are returned as given, but with shifted Y-values, and the shifts used (first value additively, rest multiplicatively) @rtype: (list[list[(.,.)]], list) """ #- zeropoint correction shifts = [zeropoint] current_data = array(datalists[0]) corrected = [[coord + array([0,zeropoint]) for coord in current_data]] #- Power law is fitted to overlapping xrange for both datasets with leastsq #- x of second list is evaluated with both functions #- second list's y values are corrected by the mean of the ratios of the #- two function evaluations for i in xrange(len(datalists)-1): p_lsqlist1 = leastsq(Interpol.getResiduals,p0,\ args=([x for x in array(corrected[i])[:,0] if x >= xmin[i] and x <= xmax[i]],\ [coord[1] for coord in array(corrected[i]) if coord[0] >= xmin[i] and coord[0] <= xmax[i]],\ func),\ maxfev=2000)[0] p_lsqlist2 = leastsq(Interpol.getResiduals,p0,\ args=([x for x in array(datalists[i+1])[:,0] if x >= xmin[i] and x <= xmax[i]],\ [coord[1] for coord in array(datalists[i+1]) if coord[0] >= xmin[i] and coord[0] <= xmax[i]],\ func),\ maxfev=2000)[0] f1x2 = Interpol.pEval([x for x in array(datalists[i+1])[:,0] if x >= xmin[i] and x <= xmax[i]], p_lsqlist1,func) f2x2 = Interpol.pEval([x for x in array(datalists[i+1])[:,0] if x >= xmin[i] and x <= xmax[i]],\ p_lsqlist2,func) shifts.append(mean(f1x2/f2x2)) corrected.append([coord*array([1,shifts[i+1]]) for coord in array(datalists[i+1])]) return corrected,shifts