def get_sismo_data(ID): """ Retrieve CoRoT timeseries from a local data repository. The output record array has fields 'HJD', 'flux', 'e_flux', 'flag'. @param ID: ID of the target: either an integer (CoRoT ID), an SIMBAD-recognised target name, or a valid CoRoT FITS file @type ID: int or str @return: data, header @rtype: numpy recarray, dict """ #-- data on one target can be spread over multiple files: collect the # data data = [] if isinstance(ID, str) and os.path.isfile(ID): header = pf.getheader(ID) times, flux, error, flags = fits.read_corot(ID) data.append([times, flux, error, flags]) else: #-- resolve the target's name: it's either a target name or CoRoT ID. try: ID = int(ID) except ValueError: info = sesame.search(ID, db='S') IDs = [alias for alias in info['alias'] if 'HD' in alias] if len(IDs) != 1: logger.error( "Data retrieval for %s not possible. Reason: no HD number resolved" % (ID)) return ID = IDs[0] #-- collect the files containing data on the target catfiles = config.glob((os.sep).join(['catalogs', 'corot', 'sismo']), '*.fits') for catfile in catfiles: try: header = pf.getheader(catfile) except IOError: continue if header['starname'] == ID or header['corotid'].replace( ' ', '') == '%s' % (ID): times, flux, error, flags = fits.read_corot(catfile) data.append([times, flux, error, flags]) #-- now make a record array and sort according to times if not data: raise ValueError( 'target {0} not in offline CoRoT data repository'.format(ID)) data = np.hstack(data) data = np.rec.fromarrays(data, dtype=[('HJD', '>f8'), ('flux', '>f8'), ('e_flux', '>f8'), ('flag', 'i')]) sa = np.argsort(data['HJD']) return data[sa], header
def VALD(elem=None, xmin=3200., xmax=4800., outputdir=None): """ Request linelists from VALD for each ion seperately within a specific wavelength range. elem = an array of ions e.g. ['CI','OII'], xmin and xmax: wavelength range in which the spectral lines are searched, outputdir = output directory chosen by the user. If no elements are given, this function returns all of them. @param elem: list of ions @type elem: list of str Example usage: >>> x = VALD(elem=['CI','OII'],xmin=3000., xmax=4000.) CI OII """ if elem is None: files = sorted(config.glob('VALD_individual', 'VALD_*.lijnen')) elem = [ os.path.splitext(os.path.basename(ff))[0].split('_')[1] for ff in files ] all_lines = [] for i in range(len(elem)): print(elem[i]) filename = config.get_datafile('VALD_individual', 'VALD_' + elem[i] + '.lijnen') if not os.path.isfile(filename): logger.info('No data for element ' + str(elem[i])) return None newwav, newexc, newep, newgf = np.loadtxt(filename).T lines = np.rec.fromarrays([newwav, newexc, newep, newgf], names=['wavelength', 'ion', 'ep', 'gf']) keep = (xmin <= lines['wavelength']) & (lines['wavelength'] <= xmax) if not hasattr(keep, '__iter__'): continue lines = lines[keep] if len(lines) and outputdir is not None: ascii.write_array(lines, outputdir + 'VALD_' + str(elem[i]) + '_' + str(xmin) + '_' + str(xmax) + '.dat', auto_width=True, header=True, formats=['%.3f', '%.1f', '%.3f', '%.3f']) elif len(lines): all_lines.append(lines) else: logger.debug('No lines of ' + str(elem[i]) + ' in given wavelength range') return np.hstack(all_lines)
def get_sismo_data(ID): """ Retrieve CoRoT timeseries from a local data repository. The output record array has fields 'HJD', 'flux', 'e_flux', 'flag'. @param ID: ID of the target: either an integer (CoRoT ID), an SIMBAD-recognised target name, or a valid CoRoT FITS file @type ID: int or str @return: data, header @rtype: numpy recarray, dict """ #-- data on one target can be spread over multiple files: collect the # data data = [] if isinstance(ID,str) and os.path.isfile(ID): header = pyfits.getheader(ID) times,flux,error,flags = fits.read_corot(ID) data.append([times,flux,error,flags]) else: #-- resolve the target's name: it's either a target name or CoRoT ID. try: ID = int(ID) except ValueError: info = sesame.search(ID,db='S') IDs = [alias for alias in info['alias'] if 'HD' in alias] if len(IDs)!=1: logger.error("Data retrieval for %s not possible. Reason: no HD number resolved" % (ID)) return ID = IDs[0] #-- collect the files containing data on the target catfiles = config.glob((os.sep).join(['catalogs','corot','sismo']),'*.fits') for catfile in catfiles: try: header = pyfits.getheader(catfile) except IOError: continue if header['starname']==ID or header['corotid'].replace(' ','')=='%s'%(ID): times,flux,error,flags = fits.read_corot(catfile) data.append([times,flux,error,flags]) #-- now make a record array and sort according to times if not data: raise ValueError('target {0} not in offline CoRoT data repository'.format(ID)) data = np.hstack(data) data = np.rec.fromarrays(data,dtype=[('HJD','>f8'),('flux','>f8'),('e_flux','>f8'),('flag','i')]) sa = np.argsort(data['HJD']) return data[sa],header
def VALD(elem=None,xmin=3200.,xmax=4800.,outputdir=None): """ Request linelists from VALD for each ion seperately within a specific wavelength range. elem = an array of ions e.g. ['CI','OII'], xmin and xmax: wavelength range in which the spectral lines are searched, outputdir = output directory chosen by the user. If no elements are given, this function returns all of them. @param elem: list of ions @type elem: list of str """ if elem is None: files = sorted(config.glob('VALD_individual','VALD_*.lijnen')) elem = [os.path.splitext(os.path.basename(ff))[0].split('_')[1] for ff in files] all_lines = [] for i in range(len(elem)): print elem[i] filename = config.get_datafile('VALD_individual','VALD_' + elem[i] + '.lijnen') if not os.path.isfile(filename): logger.info('No data for element ' + str(elem[i])) return None newwav,newexc,newep,newgf = np.loadtxt(filename).T lines = np.rec.fromarrays([newwav,newexc,newep,newgf],names=['wavelength','ion','ep','gf']) keep = (xmin<=lines['wavelength']) & (lines['wavelength']<=xmax) if not hasattr(keep,'__iter__'): continue lines = lines[keep] if len(lines) and outputdir is not None: ascii.write_array(lines,outputdir + 'VALD_' + str(elem[i]) + '_' + str(xmin) + '_' + str(xmax) + '.dat',auto_width=True,header=True,formats=['%.3f','%.1f','%.3f','%.3f']) elif len(lines): all_lines.append(lines) else: logger.debug('No lines of ' + str(elem[i]) + ' in given wavelength range') return np.hstack(all_lines)
def get_file(integrated=False, **kwargs): """ Retrieve the filename containing the specified SED grid. The keyword arguments are specific to the kind of grid you're using. Basic keywords are 'grid' for the name of the grid, and 'z' for metallicity. For other keywords, see the source code. Available grids and example keywords: - grid='kurucz93': * metallicity (z): m01 is -0.1 log metal abundance relative to solar (solar abundances from Anders and Grevesse 1989) * metallicity (z): p01 is +0.1 log metal abundance relative to solar (solar abundances from Anders and Grevesse 1989) * alpha enhancement (alpha): True means alpha enhanced (+0.4) * turbulent velocity (vturb): vturb in km/s * nover= True means no overshoot * odfnew=True means no overshoot but with better opacities and abundances @param integrated: choose integrated version of the grid @type integrated: boolean @keyword grid: gridname (default Kurucz) @type grid: str @return: gridfile @rtype: str """ #-- possibly you give a filename grid = kwargs.get('grid', defaults['grid']) if os.path.isfile(grid): return grid #-- general z = kwargs.get('z', defaults['z']) #-- only for Kurucz vturb = int(kwargs.get('vturb', defaults['vturb'])) odfnew = kwargs.get('odfnew', defaults['odfnew']) alpha = kwargs.get('alpha', defaults['alpha']) nover = kwargs.get('nover', defaults['nover']) #-- figure out what grid to use if grid == 'kurucz': if not isinstance(z, str): z = '%.1f' % (z) if not isinstance(vturb, str): vturb = '%d' % (vturb) if not alpha and not nover and not odfnew: basename = 'kurucz93_z%s_k%s_ld.fits' % (z, vturb) elif alpha and odfnew: basename = 'kurucz93_z%s_ak%sodfnew_ld.fits' % (z, vturb) elif odfnew: basename = 'kurucz93_z%s_k%sodfnew_ld.fits' % (z, vturb) elif nover: basename = 'kurucz93_z%s_k%snover_ld.fits' % (z, vturb) else: basename = grid #-- retrieve the absolute path of the file and check if it exists: if not '*' in basename: if integrated: grid = config.get_datafile(basedir, 'i' + basename) else: grid = config.get_datafile(basedir, basename) #-- we could also ask for a list of files, when wildcards are given: else: grid = config.glob(basedir, 'i' + basename) if integrated: grid = config.glob(basedir, 'i' + basename) else: grid = config.glob(basedir, basename) logger.debug('Selected %s' % (grid)) return grid
def get_file(integrated=False, **kwargs): """ Retrieve the filename containing the specified SED grid. The keyword arguments are specific to the kind of grid you're using. Basic keywords are 'grid' for the name of the grid, and 'z' for metallicity. For other keywords, see the source code. Available grids and example keywords: - grid='kurucz93': * metallicity (z): m01 is -0.1 log metal abundance relative to solar (solar abundances from Anders and Grevesse 1989) * metallicity (z): p01 is +0.1 log metal abundance relative to solar (solar abundances from Anders and Grevesse 1989) * alpha enhancement (alpha): True means alpha enhanced (+0.4) * turbulent velocity (vturb): vturb in km/s * nover= True means no overshoot * odfnew=True means no overshoot but with better opacities and abundances @param integrated: choose integrated version of the grid @type integrated: boolean @keyword grid: gridname (default Kurucz) @type grid: str @return: gridfile @rtype: str """ # -- possibly you give a filename grid = kwargs.get("grid", defaults["grid"]) if os.path.isfile(grid): return grid # -- general z = kwargs.get("z", defaults["z"]) # -- only for Kurucz vturb = int(kwargs.get("vturb", defaults["vturb"])) odfnew = kwargs.get("odfnew", defaults["odfnew"]) alpha = kwargs.get("alpha", defaults["alpha"]) nover = kwargs.get("nover", defaults["nover"]) # -- figure out what grid to use if grid == "kurucz": if not isinstance(z, str): z = "%.1f" % (z) if not isinstance(vturb, str): vturb = "%d" % (vturb) if not alpha and not nover and not odfnew: basename = "kurucz93_z%s_k%s_ld.fits" % (z, vturb) elif alpha and odfnew: basename = "kurucz93_z%s_ak%sodfnew_ld.fits" % (z, vturb) elif odfnew: basename = "kurucz93_z%s_k%sodfnew_ld.fits" % (z, vturb) elif nover: basename = "kurucz93_z%s_k%snover_ld.fits" % (z, vturb) else: basename = grid # -- retrieve the absolute path of the file and check if it exists: if not "*" in basename: if integrated: grid = config.get_datafile(basedir, "i" + basename) else: grid = config.get_datafile(basedir, basename) # -- we could also ask for a list of files, when wildcards are given: else: grid = config.glob(basedir, "i" + basename) if integrated: grid = config.glob(basedir, "i" + basename) else: grid = config.glob(basedir, basename) logger.debug("Selected %s" % (grid)) return grid