def list_mesa_data(filename='profiles.index'): """ Return a chronological list of *.data files in a MESA LOG directory """ number,priority,lognr = ascii.read2array(filename,skip_lines=1).T logfiles = [os.path.join(os.path.dirname(filename),'profile%d.data'%(nr)) for nr in lognr] return number,logfiles
def list_mesa_data(filename='profiles.index'): """ Return a chronological list of *.data files in a MESA LOG directory """ number, priority, lognr = ascii.read2array(filename, skip_lines=1).T logfiles = [ os.path.join(os.path.dirname(filename), 'profile%d.data' % (nr)) for nr in lognr ] return number, logfiles
def get_response(photband): """ Retrieve the response curve of a photometric system 'SYSTEM.FILTER' OPEN.BOL represents a bolometric open filter. Example usage: >>> p = pl.figure() >>> for band in ['J','H','KS']: ... p = pl.plot(*get_response('2MASS.%s'%(band))) If you defined a custom filter with the same name as an existing one and you want to use that one in the future, set C{prefer_file=False} in the C{custom_filters} module dictionary. @param photband: photometric passband @type photband: str ('SYSTEM.FILTER') @return: (wavelength [A], response) @rtype: (array, array) """ photband = photband.upper() prefer_file = custom_filters["_prefer_file"] if photband == "OPEN.BOL": return np.array([1, 1e10]), np.array([1 / (1e10 - 1), 1 / (1e10 - 1)]) # -- either get from file or get from dictionary photfile = os.path.join(basedir, "filters", photband) photfile_is_file = os.path.isfile(photfile) # -- if the file exists and files have preference if photfile_is_file and prefer_file: wave, response = ascii.read2array(photfile).T[:2] # -- if the custom_filter exist elif photband in custom_filters: wave, response = custom_filters[photband]["response"] # -- if the file exists but custom filters have preference elif photfile_is_file: wave, response = ascii.read2array(photfile).T[:2] else: raise IOError, ("{0} does not exist {1}".format(photband, custom_filters.keys())) sa = np.argsort(wave) return wave[sa], response[sa]
def csv2recarray(filename): """ Read a MAST csv (comma-sep) file into a record array. @param filename: name of the TCSV file @type filename: str @return: catalog data columns, units, comments @rtype: record array, dict, list of str """ data, comms = ascii.read2array(filename, dtype=np.str, splitchar=',', return_comments=True) results = None units = {} #-- retrieve the data and put it into a record array if len(data) > 1: #-- now convert this thing into a nice dictionary data = np.array(data) #-- retrieve the format of the columns. They are given in the # Fortran format. In rare cases, columns contain multiple values # themselves (so called vectors). In those cases, we interpret # the contents as a long string formats = np.zeros_like(data[0]) for i, fmt in enumerate(data[1]): if 'string' in fmt or fmt == 'datetime': formats[i] = 'a100' if fmt == 'integer': formats[i] = 'f8' if fmt == 'ra': formats[i] = 'f8' if fmt == 'dec': formats[i] = 'f8' if fmt == 'float': formats[i] = 'f8' #-- define dtypes for record array dtypes = np.dtype([(i, j) for i, j in zip(data[0], formats)]) #-- remove spaces or empty values cols = [] for i, key in enumerate(data[0]): col = data[2:, i] #-- fill empty values with nan cols.append([(row.isspace() or not row) and np.nan or row for row in col]) #-- fix unit name for source in cat_info.sections(): if cat_info.has_option(source, data[0, i] + '_unit'): units[key] = cat_info.get(source, data[0, i] + '_unit') break else: units[key] = 'nan' #-- define columns for record array and construct record array cols = [np.cast[dtypes[i]](cols[i]) for i in range(len(cols))] results = np.rec.array(cols, dtype=dtypes) else: results = None units = {} return results, units, comms
def csv2recarray(filename): """ Read a MAST csv (comma-sep) file into a record array. @param filename: name of the TCSV file @type filename: str @return: catalog data columns, units, comments @rtype: record array, dict, list of str """ data,comms = ascii.read2array(filename,dtype=np.str,splitchar=',',return_comments=True) results = None units = {} #-- retrieve the data and put it into a record array if len(data)>1: #-- now convert this thing into a nice dictionary data = np.array(data) #-- retrieve the format of the columns. They are given in the # Fortran format. In rare cases, columns contain multiple values # themselves (so called vectors). In those cases, we interpret # the contents as a long string formats = np.zeros_like(data[0]) for i,fmt in enumerate(data[1]): if 'string' in fmt or fmt=='datetime': formats[i] = 'a100' if fmt=='integer': formats[i] = 'f8' if fmt=='ra': formats[i] = 'f8' if fmt=='dec': formats[i] = 'f8' if fmt=='float': formats[i] = 'f8' #-- define dtypes for record array dtypes = np.dtype([(i,j) for i,j in zip(data[0],formats)]) #-- remove spaces or empty values cols = [] for i,key in enumerate(data[0]): col = data[2:,i] #-- fill empty values with nan cols.append([(row.isspace() or not row) and np.nan or row for row in col]) #-- fix unit name for source in cat_info.sections(): if cat_info.has_option(source,data[0,i]+'_unit'): units[key] = cat_info.get(source,data[0,i]+'_unit') break else: units[key] = 'nan' #-- define columns for record array and construct record array cols = [np.cast[dtypes[i]](cols[i]) for i in range(len(cols))] results = np.rec.array(cols, dtype=dtypes) else: results = None units = {} return results,units,comms
def fitz2004chiar2006(Rv=3.1,curve='ism',**kwargs): """ Combined and extrapolated extinction curve Fitzpatrick 2004 and from Chiar and Tielens (2006). We return A(lambda)/E(B-V), by multiplying A(lambda)/Av with Rv. This is only defined for Rv=3.1. If it is different, this will raise an AssertionError Extra kwags are to catch unwanted keyword arguments. @param Rv: Rv @type Rv: float @param curve: extinction curve @type curve: string (one of 'gc' or 'ism', galactic centre or local ISM) @return: wavelengths (A), A(lambda)/Av @rtype: (ndarray,ndarray) """ if curve.lower() not in ['ism','gc']: raise ValueError,'No Fitzpatrick2004/Chiar2006 curve available for %s.'\ %(curve) fn = 'fitzpatrick2004_chiar2006%s_extrapol.dat'%curve source = os.path.join(basename,fn) #-- check Rv assert(Rv==3.1) wavelengths,alam_ak = ascii.read2array(source).T #-- Convert to AA wavelengths *= 1e4 #-- Convert from Ak normalization to Av normalization. norm_reddening = model.synthetic_flux(wavelengths,alam_ak,\ ['JOHNSON.V','JOHNSON.K']) ak_to_av = norm_reddening[1]/norm_reddening[0] alam_aV = alam_ak * ak_to_av return wavelengths,alam_aV
def read_cles_sum(sumfile): """ Read the input from a CLES '-sum.txt' file. Keys: C{logTeff}, C{logg}, C{M}, C{R}, C{logL}, C{age}(, C{num}, C{Xc}) @param sumfile: filename of the '-sum.txt' file. @type param:str @return: model track @rtype: recarray """ c = {'summary':{'logTeff':7,'logg':11,'M':2,'R':10,'logL':8,'age':6}, 'sum': {'logTeff':2,'logg': 8,'M':6,'R': 7,'logL':3,'age':1,'num':0,'Xc':4}} data = ascii.read2array(sumfile) if 'summary' in sumfile: t = 'summary' else: t = 'sum' #-- not all info readily available, build it data_ = np.zeros((len(data[:,0]),3)) mass = float(os.path.basename(sumfile).split('_')[0][1:]) data_[:,0] = mass radi = conversions.derive_radius((data[:,c[t]['logL']],'[Lsol]'),\ (data[:,c[t]['logTeff']],'[K]'),units='Rsol')[0] data_[:,1] = radi logg_ = conversions.derive_logg((float(mass),'Msol'),(data_[:,1],'Rsol')) data_[:,2] = logg_ data = np.hstack([data,data_]) keys = np.array(c[t].keys()) cols = np.array([c[t][key] for key in keys]) sa = np.argsort(cols) keys = keys[sa] cols = cols[sa] mydata = np.rec.fromarrays([data[:,col] for col in cols],names=list(keys)) logger.debug('CLES summary %s read'%(sumfile)) return mydata
def fitzpatrick2004(Rv=3.1,**kwargs): """ From Fitzpatrick 2004 (downloaded from FTP) This function returns A(lambda)/A(V). To get A(lambda)/E(B-V), multiply the return value with Rv (A(V)=Rv*E(B-V)) Extra kwags are to catch unwanted keyword arguments. @param Rv: Rv (2.1, 3.1 or 5.0) @type Rv: float @return: wavelengths (A), A(lambda)/Av @rtype: (ndarray,ndarray) """ filename = 'Fitzpatrick2004_Rv_%.1f.red'%(Rv) myfile = os.path.join(basename,filename) wave_inv,elamv_ebv = ascii.read2array(myfile,skip_lines=15).T logger.info('Fitzpatrick2004 curve with Rv=%.2f'%(Rv)) return 1e4/wave_inv[::-1],((elamv_ebv+Rv)/Rv)[::-1]
def fitzpatrick2004(Rv=3.1, **kwargs): """ From Fitzpatrick 2004 (downloaded from FTP) This function returns A(lambda)/A(V). To get A(lambda)/E(B-V), multiply the return value with Rv (A(V)=Rv*E(B-V)) Extra kwags are to catch unwanted keyword arguments. @param Rv: Rv (2.1, 3.1 or 5.0) @type Rv: float @return: wavelengths (A), A(lambda)/Av @rtype: (ndarray,ndarray) """ filename = 'Fitzpatrick2004_Rv_%.1f.red' % (Rv) myfile = os.path.join(basename, filename) wave_inv, elamv_ebv = ascii.read2array(myfile, skip_lines=15).T logger.info('Fitzpatrick2004 curve with Rv=%.2f' % (Rv)) return 1e4 / wave_inv[::-1], ((elamv_ebv + Rv) / Rv)[::-1]
def chiar2006(Rv=3.1,curve='ism',**kwargs): """ Extinction curve at infrared wavelengths from Chiar and Tielens (2006) We return A(lambda)/Av, by multiplying A(lambda)/Ak Ak/Av=0.09 (see Chiar and Tielens (2006). This is only defined for Rv=3.1. If it is different, this will raise an AssertionError Extra kwags are to catch unwanted keyword arguments. UNCERTAIN NORMALISATION @param Rv: Rv @type Rv: float @param curve: extinction curve @type curve: string (one of 'gc' or 'ism', galactic centre or local ISM) @return: wavelengths (A), A(lambda)/Av @rtype: (ndarray,ndarray) """ source = os.path.join(basename,'Chiar2006.red') #-- check Rv assert(Rv==3.1) wavelengths,gc,ism = ascii.read2array(source).T if curve=='gc': alam_ak = gc elif curve=='ism': keep = ism>0 alam_ak = ism[keep] wavelengths = wavelengths[keep] else: raise ValueError,'no curve %s'%(curve) alam_aV = alam_ak * 0.09 #plot(1/wavelengths,alam_aV,'o-') return wavelengths*1e4,alam_aV
def fitzpatrick1999(Rv=3.1, **kwargs): """ From Fitzpatrick 1999 (downloaded from ASAGIO database) This function returns A(lambda)/A(V). To get A(lambda)/E(B-V), multiply the return value with Rv (A(V)=Rv*E(B-V)) Extra kwags are to catch unwanted keyword arguments. @param Rv: Rv (2.1, 3.1 or 5.0) @type Rv: float @return: wavelengths (A), A(lambda)/Av @rtype: (ndarray,ndarray) """ filename = 'Fitzpatrick1999_Rv_%.1f' % (Rv) filename = filename.replace('.', '_') + '.red' myfile = os.path.join(basename, filename) wave, alam_ebv = ascii.read2array(myfile).T alam_av = alam_ebv / Rv logger.info('Fitzpatrick1999 curve with Rv=%.2f' % (Rv)) return wave, alam_av
def fitzpatrick1999(Rv=3.1,**kwargs): """ From Fitzpatrick 1999 (downloaded from ASAGIO database) This function returns A(lambda)/A(V). To get A(lambda)/E(B-V), multiply the return value with Rv (A(V)=Rv*E(B-V)) Extra kwags are to catch unwanted keyword arguments. @param Rv: Rv (2.1, 3.1 or 5.0) @type Rv: float @return: wavelengths (A), A(lambda)/Av @rtype: (ndarray,ndarray) """ filename = 'Fitzpatrick1999_Rv_%.1f'%(Rv) filename = filename.replace('.','_') + '.red' myfile = os.path.join(basename,filename) wave,alam_ebv = ascii.read2array(myfile).T alam_av = alam_ebv/Rv logger.info('Fitzpatrick1999 curve with Rv=%.2f'%(Rv)) return wave,alam_av
def chiar2006(Rv=3.1, curve='ism', **kwargs): """ Extinction curve at infrared wavelengths from Chiar and Tielens (2006) We return A(lambda)/E(B-V), by multiplying A(lambda)/Av with Rv. This is only defined for Rv=3.1. If it is different, this will raise an AssertionError Extra kwags are to catch unwanted keyword arguments. UNCERTAIN NORMALISATION @param Rv: Rv @type Rv: float @param curve: extinction curve @type curve: string (one of 'gc' or 'ism', galactic centre or local ISM) @return: wavelengths (A), A(lambda)/Av @rtype: (ndarray,ndarray) """ source = os.path.join(basename, 'Chiar2006.red') #-- check Rv assert (Rv == 3.1) wavelengths, gc, ism = ascii.read2array(source).T if curve == 'gc': alam_ak = gc elif curve == 'ism': keep = ism > 0 alam_ak = ism[keep] wavelengths = wavelengths[keep] else: raise ValueError, 'no curve %s' % (curve) alam_aV = alam_ak * 0.09 #plot(1/wavelengths,alam_aV,'o-') return wavelengths * 1e4, alam_aV
def plot_logRho_logT(starl): """ Plot Density -Temperature diagram for one given profile. """ #-- list all burning regions pl.xlabel('log (Density [g cm$^{-1}$]) [dex]') pl.ylabel('log (Temperature [K]) [dex]') for species in ['hydrogen','helium','carbon','oxygen']: logRho,logT = ascii.read2array(os.path.join(os.path.dirname(__file__),'plot_info','%s_burn.data'%(species))).T pl.plot(logRho,logT,'k--',lw=2) pl.annotate(species,(logRho[-1],logT[-1]),ha='right') bounds = ['elect','gamma_4_thirds','kap_rad_cond_eq','opal_clip','psi4','scvh_clip'] bounds = ['gamma_4_thirds','opal_clip','scvh_clip'] names = ['$\Gamma$<4/3','OPAL','SCVH'] for limits,name in zip(bounds,names): logRho,logT = ascii.read2array(os.path.join(os.path.dirname(__file__),'plot_info','%s.data'%(limits))).T pl.plot(logRho,logT,'--',lw=2,color='0.5') xann,yann = logRho.mean(),logT.mean() pl.annotate(name,(xann,yann),color='0.5') #-- plot stellar profile color_radiative = 'g' color_convective = (0.33,0.33,1.00) x = np.log10(starl['Rho']) y = np.log10(starl['temperature']) # first convective regions and radiative regions stab_type = starl['stability_type'] # Create a colormap for convective and radiative regions cmap = ListedColormap([color_radiative,color_convective]) norm = BoundaryNorm([0, 1, 5], cmap.N) # Create a set of line segments so that we can color them individually # This creates the points as a N x 1 x 2 array so that we can stack points # together easily to get the segments. The segments array for line collection # needs to be numlines x points per line x 2 (x and y) points = np.array([x, y]).T.reshape(-1, 1, 2) segments = np.concatenate([points[:-1], points[1:]], axis=1) # Create the line collection object, setting the colormapping parameters. # Have to set the actual values used for colormapping separately. lc = LineCollection(segments, cmap=cmap, norm=norm) lc.set_array(stab_type) lc.set_linewidth(10) pl.gca().add_collection(lc) # burning regions limits = [0,1,1e3,1e7,np.inf] burn = starl['eps_nuc'][:-1] colors = np.zeros((len(segments),4)) colors[:,0] = 1 colors[:,-1] = 1 colors[(burn<=1e0),-1] = 0 colors[(1e0<burn)&(burn<=1e3),1] = 1 colors[(1e3<burn)&(burn<=1e7),1] = 0.5 # Create the line collection object, setting the colormapping parameters. # Have to set the actual values used for colormapping separately. lc = LineCollection(segments,colors=colors) lc.set_linewidth(4) pl.gca().add_collection(lc) p1 = pl.Line2D([0,0],[1,1], color=(1,1.0,0),lw=3) p2 = pl.Line2D([0,0],[1,1], color=(1,0.5,0),lw=3) p3 = pl.Line2D([0,0],[1,1], color=(1,0,0),lw=3) p4 = pl.Line2D([0,0],[1,1], color='g',lw=3) p5 = pl.Line2D([0,0],[1,1], color=(0.33,0.33,1.0),lw=3) leg = pl.legend([p1,p2,p3,p4,p5], [">1 erg/s/g",">10$^3$ erg/s/g",">10$^7$ erg/s/g",'Radiative','Convective'],loc='best',fancybox=True) leg.get_frame().set_alpha(0.5) pl.xlim(-10,11) pl.ylim(4.0,9.4)
def plot_logRho_logT(starl): """ Plot Density -Temperature diagram for one given profile. """ #-- list all burning regions pl.xlabel('log (Density [g cm$^{-1}$]) [dex]') pl.ylabel('log (Temperature [K]) [dex]') for species in ['hydrogen', 'helium', 'carbon', 'oxygen']: logRho, logT = ascii.read2array( os.path.join(os.path.dirname(__file__), 'plot_info', '%s_burn.data' % (species))).T pl.plot(logRho, logT, 'k--', lw=2) pl.annotate(species, (logRho[-1], logT[-1]), ha='right') bounds = [ 'elect', 'gamma_4_thirds', 'kap_rad_cond_eq', 'opal_clip', 'psi4', 'scvh_clip' ] bounds = ['gamma_4_thirds', 'opal_clip', 'scvh_clip'] names = ['$\Gamma$<4/3', 'OPAL', 'SCVH'] for limits, name in zip(bounds, names): logRho, logT = ascii.read2array( os.path.join(os.path.dirname(__file__), 'plot_info', '%s.data' % (limits))).T pl.plot(logRho, logT, '--', lw=2, color='0.5') xann, yann = logRho.mean(), logT.mean() pl.annotate(name, (xann, yann), color='0.5') #-- plot stellar profile color_radiative = 'g' color_convective = (0.33, 0.33, 1.00) x = np.log10(starl['Rho']) y = np.log10(starl['temperature']) # first convective regions and radiative regions stab_type = starl['stability_type'] # Create a colormap for convective and radiative regions cmap = ListedColormap([color_radiative, color_convective]) norm = BoundaryNorm([0, 1, 5], cmap.N) # Create a set of line segments so that we can color them individually # This creates the points as a N x 1 x 2 array so that we can stack points # together easily to get the segments. The segments array for line collection # needs to be numlines x points per line x 2 (x and y) points = np.array([x, y]).T.reshape(-1, 1, 2) segments = np.concatenate([points[:-1], points[1:]], axis=1) # Create the line collection object, setting the colormapping parameters. # Have to set the actual values used for colormapping separately. lc = LineCollection(segments, cmap=cmap, norm=norm) lc.set_array(stab_type) lc.set_linewidth(10) pl.gca().add_collection(lc) # burning regions limits = [0, 1, 1e3, 1e7, np.inf] burn = starl['eps_nuc'][:-1] colors = np.zeros((len(segments), 4)) colors[:, 0] = 1 colors[:, -1] = 1 colors[(burn <= 1e0), -1] = 0 colors[(1e0 < burn) & (burn <= 1e3), 1] = 1 colors[(1e3 < burn) & (burn <= 1e7), 1] = 0.5 # Create the line collection object, setting the colormapping parameters. # Have to set the actual values used for colormapping separately. lc = LineCollection(segments, colors=colors) lc.set_linewidth(4) pl.gca().add_collection(lc) p1 = pl.Line2D([0, 0], [1, 1], color=(1, 1.0, 0), lw=3) p2 = pl.Line2D([0, 0], [1, 1], color=(1, 0.5, 0), lw=3) p3 = pl.Line2D([0, 0], [1, 1], color=(1, 0, 0), lw=3) p4 = pl.Line2D([0, 0], [1, 1], color='g', lw=3) p5 = pl.Line2D([0, 0], [1, 1], color=(0.33, 0.33, 1.0), lw=3) leg = pl.legend([p1, p2, p3, p4, p5], [ ">1 erg/s/g", ">10$^3$ erg/s/g", ">10$^7$ erg/s/g", 'Radiative', 'Convective' ], loc='best', fancybox=True) leg.get_frame().set_alpha(0.5) pl.xlim(-10, 11) pl.ylim(4.0, 9.4)
def read_cles_sum(sumfile): """ Read the input from a CLES '-sum.txt' file. Keys: C{logTeff}, C{logg}, C{M}, C{R}, C{logL}, C{age}(, C{num}, C{Xc}) @param sumfile: filename of the '-sum.txt' file. @type param:str @return: model track @rtype: recarray """ c = { 'summary': { 'logTeff': 7, 'logg': 11, 'M': 2, 'R': 10, 'logL': 8, 'age': 6 }, 'sum': { 'logTeff': 2, 'logg': 8, 'M': 6, 'R': 7, 'logL': 3, 'age': 1, 'num': 0, 'Xc': 4 } } data = ascii.read2array(sumfile) if 'summary' in sumfile: t = 'summary' else: t = 'sum' #-- not all info readily available, build it data_ = np.zeros((len(data[:, 0]), 3)) mass = float(os.path.basename(sumfile).split('_')[0][1:]) data_[:, 0] = mass radi = conversions.derive_radius((data[:,c[t]['logL']],'[Lsol]'),\ (data[:,c[t]['logTeff']],'[K]'),units='Rsol')[0] data_[:, 1] = radi logg_ = conversions.derive_logg((float(mass), 'Msol'), (data_[:, 1], 'Rsol')) data_[:, 2] = logg_ data = np.hstack([data, data_]) keys = np.array(c[t].keys()) cols = np.array([c[t][key] for key in keys]) sa = np.argsort(cols) keys = keys[sa] cols = cols[sa] mydata = np.rec.fromarrays([data[:, col] for col in cols], names=list(keys)) logger.debug('CLES summary %s read' % (sumfile)) return mydata
pi1 = ((2.*en+1.)*amu*pi- (en+1.)*pi0)/ en pi0 = 0+pipi # 0+pi because we want a hard copy of the values #*** Have summed sufficient terms. # Now compute QSCA,QEXT,QBACK,and GSCA # we have to reverse the order of the elements of the second part of s1 and s2 s1=np.concatenate((s1_1,s1_2[:,-2::-1]), axis=1) s2=np.concatenate((s2_1,s2_2[:,-2::-1]), axis=1) gsca = 2.*gsca/qsca qsca = (2./(x*x))*qsca qext = (4./(x*x))*np.real(s1[:,0]) # more common definition of the backscattering efficiency, # so that the backscattering cross section really # has dimension of length squared qback = 4*(abs(s1[:,2*nang-2])/x)**2 return np.array(qext, dtype=float), np.array(qsca, dtype=float), np.array(qback, dtype=float), np.array(gsca, dtype=float) if __name__ == '__main__': from ivs.io.ascii import read2array lnk = read2array("/home/kristofs/python/IVSdata/optical_constants/alumino-silicates/A/ca2al2sio7_02.91_0000_001.lnk") wavelength = lnk[:,0] refrel = lnk[:,1] + lnk[:,2]*complex(0,1) radius = .01 nang = 5. qext1, qsca1, qback1, gsca1 = bhmie(wavelength, refrel, nang, radius) qext2, qsca2, qback2, gsca2 = bhmie_slow(wavelength, refrel, nang, radius)
pl.show() sys.exit() #-- if arguments are given, we assume the user wants to run one of the # functions with arguments given in the command line # EXAMPLES: # $:> python freqanalyse.py find_frequency infile=test.dat full_output=True # $:> python freqanalyse.py time_frequency infile=test.dat full_output=True else: method, args, kwargs = argkwargparser.parse() print "Running method %s with arguments %s and keyword arguments %s" % ( method, args, kwargs) if '--help' in args or 'help' in args or 'help' in kwargs: sys.exit() full_output = kwargs.get('full_output', False) times, signal = ascii.read2array(kwargs.pop('infile')).T[:2] out = globals()[method](times, signal, **kwargs) #-- when find_frequency is called if method == 'find_frequency' and full_output: print pl.mlab.rec2txt(out[0], precision=8) pl.figure() pl.subplot(211) pl.plot(out[1][0], out[1][1], 'k-') pl.subplot(212) pl.plot(times, signal, 'ko', ms=2) pl.plot(times, out[2], 'r-', lw=2) pl.show() elif method == 'find_frequency': print pl.mlab.rec2txt(out)
doctest.testmod() pl.show() sys.exit() #-- if arguments are given, we assume the user wants to run one of the # functions with arguments given in the command line # EXAMPLES: # $:> python freqanalyse.py find_frequency infile=test.dat full_output=True # $:> python freqanalyse.py time_frequency infile=test.dat full_output=True else: method,args,kwargs = argkwargparser.parse() print "Running method %s with arguments %s and keyword arguments %s"%(method,args,kwargs) if '--help' in args or 'help' in args or 'help' in kwargs: sys.exit() full_output = kwargs.get('full_output',False) times,signal = ascii.read2array(kwargs.pop('infile')).T[:2] out = globals()[method](times,signal, **kwargs) #-- when find_frequency is called if method=='find_frequency' and full_output: print pl.mlab.rec2txt(out[0],precision=8) pl.figure() pl.subplot(211) pl.plot(out[1][0],out[1][1],'k-') pl.subplot(212) pl.plot(times,signal,'ko',ms=2) pl.plot(times,out[2],'r-',lw=2) pl.show() elif method=='find_frequency': print pl.mlab.rec2txt(out)
if reference == 'VEGA': #-- calculate Flam based on the Vega spectrum hdu = pyfits.open('alpha_lyr_stis_008.fits') wave, flux = hdu[1].data['wavelength'], hdu[1].data['flux'] hdu.close() else: #-- calculate Flam for the AB system wave = np.arange(3000, 9000, step=0.5) flux = cv.convert(cc.cc_units, 'AA/s', cc.cc) / wave**2 * 3631e-23 Flam_0 = model.synthetic_flux(wave, flux, photbands=photbands) #-- load the calibrators calibrators = ascii.read2array(basedir + 'calspec.ident', splitchar=',', dtype=str) def get_synthetic_photometry(calibrator): """ Integrate the spectrum belonging to this calibrator and return the synthetic magnitudes """ hdu = pyfits.open(basedir + calibrator[1]) wave, flux = hdu[1].data['wavelength'], hdu[1].data['flux'] hdu.close() #-- integrate the flux over the 5 pass bands. flam = model.synthetic_flux(wave, flux, photbands=photbands)