def get_photometry(ID=None,extra_fields=['_r','_RAJ2000','_DEJ2000'],**kwargs): """ Download all available photometry from a star to a record array. For extra kwargs, see L{_get_URI} and L{mast2phot} """ to_units = kwargs.pop('to_units','erg/s/cm2/AA') master_ = kwargs.get('master',None) master = None #-- retrieve all measurements for source in cat_info.sections(): if source=='galex': results,units,comms = galex(ID=ID,**kwargs) else: results,units,comms = search(source,ID=ID,**kwargs) if results is not None: master = mast2phot(source,results,units,master,extra_fields=extra_fields) #-- convert the measurement to a common unit. if to_units and master is not None: #-- prepare columns to extend to basic master dtypes = [('cwave','f8'),('cmeas','f8'),('e_cmeas','f8'),('cunit','a50')] cols = [[],[],[],[]] #-- forget about 'nan' errors for the moment no_errors = np.isnan(master['e_meas']) master['e_meas'][no_errors] = 0. #-- extend basic master zp = filters.get_info(master['photband']) for i in range(len(master)): try: value,e_value = conversions.convert(master['unit'][i],to_units,master['meas'][i],master['e_meas'][i],photband=master['photband'][i]) except ValueError: # calibrations not available value,e_value = np.nan,np.nan except AssertionError: # postive flux and errors! value,e_value = np.nan,np.nan try: eff_wave = filters.eff_wave(master['photband'][i]) except IOError: eff_wave = np.nan cols[0].append(eff_wave) cols[1].append(value) cols[2].append(e_value) cols[3].append(to_units) master = numpy_ext.recarr_addcols(master,cols,dtypes) #-- reset errors master['e_meas'][no_errors] = np.nan master['e_cmeas'][no_errors] = np.nan if master_ is not None and master is not None: master = numpy_ext.recarr_addrows(master_,master.tolist()) elif master_ is not None: master = master_ #-- and return the results return master
def dobb(x, T, **kwargs): wave_units = kwargs.get('wave_units', 'AA') flux_units = kwargs.get('flux_units', 'erg/s/cm2/AA') #-- prepare input #-- what kind of units did we receive? curr_conv = constants._current_convention # X: wavelength/frequency x_unit_type = conversions.get_type(wave_units) x = conversions.convert(wave_units, curr_conv, x) # T: temperature if isinstance(T, tuple): T = conversions.convert(T[1], 'K', T[0]) # Y: flux y_unit_type = conversions.change_convention('SI', flux_units) #-- if you give Jy vs micron, we need to first convert wavelength to frequency if y_unit_type == 'kg1 rad-1 s-2' and x_unit_type == 'length': x = conversions.convert( conversions._conventions[curr_conv]['length'], 'rad/s', x) x_unit_type = 'frequency' elif y_unit_type == 'kg1 m-1 s-3' and x_unit_type == 'frequency': x = conversions.convert( 'rad/s', conversions._conventions[curr_conv]['length'], x) x_unit_type = 'length' #-- correct for rad if x_unit_type == 'frequency': x /= (2 * np.pi) print y_unit_type #-- run function I = fctn((x, x_unit_type), T) #-- prepare output disc_integrated = kwargs.get('disc_integrated', True) ang_diam = kwargs.get('ang_diam', None) if disc_integrated: I *= np.sqrt(2 * np.pi) if ang_diam is not None: scale = conversions.convert(ang_diam[1], 'sr', ang_diam[0] / 2.) I *= scale I = conversions.convert(curr_conv, flux_units, I) return I
def dobb(x,T,**kwargs): wave_units = kwargs.get('wave_units','AA') flux_units = kwargs.get('flux_units','erg/s/cm2/AA') #-- prepare input #-- what kind of units did we receive? curr_conv = constants._current_convention # X: wavelength/frequency x_unit_type = conversions.get_type(wave_units) x = conversions.convert(wave_units,curr_conv,x) # T: temperature if isinstance(T,tuple): T = conversions.convert(T[1],'K',T[0]) # Y: flux y_unit_type = conversions.change_convention('SI',flux_units) #-- if you give Jy vs micron, we need to first convert wavelength to frequency if y_unit_type=='kg1 rad-1 s-2' and x_unit_type=='length': x = conversions.convert(conversions._conventions[curr_conv]['length'],'rad/s',x) x_unit_type = 'frequency' elif y_unit_type=='kg1 m-1 s-3' and x_unit_type=='frequency': x = conversions.convert('rad/s',conversions._conventions[curr_conv]['length'],x) x_unit_type = 'length' #-- correct for rad if x_unit_type=='frequency': x /= (2*np.pi) print y_unit_type #-- run function I = fctn((x,x_unit_type),T) #-- prepare output disc_integrated = kwargs.get('disc_integrated',True) ang_diam = kwargs.get('ang_diam',None) if disc_integrated: I *= np.sqrt(2*np.pi) if ang_diam is not None: scale = conversions.convert(ang_diam[1],'sr',ang_diam[0]/2.) I *= scale I = conversions.convert(curr_conv,flux_units,I) return I
def get_photometry(ID=None, extra_fields=['dist', 'ra', 'dec'], **kwargs): """ Download all available photometry from a star to a record array. For extra kwargs, see L{_get_URI} and L{gator2phot} Example usage: >>> import pylab >>> import vizier >>> name = 'kr cam' >>> master = vizier.get_photometry(name,to_units='erg/s/cm2/AA',extra_fields=[]) >>> master = get_photometry(name,to_units='erg/s/cm2/AA',extra_fields=[],master=master) >>> p = pylab.figure() >>> wise = np.array(['WISE' in photband and True or False for photband in master['photband']]) >>> p = pylab.errorbar(master['cwave'],master['cmeas'],yerr=master['e_cmeas'],fmt='ko') >>> p = pylab.errorbar(master['cwave'][wise],master['cmeas'][wise],yerr=master['e_cmeas'][wise],fmt='ro',ms=8) >>> p = pylab.gca().set_xscale('log') >>> p = pylab.gca().set_yscale('log') >>> p = pylab.show() Other examples: >>> master = get_photometry(ra=71.239527,dec=-70.589427,to_units='erg/s/cm2/AA',extra_fields=[],radius=1.) >>> master = get_photometry(ID='J044458.39-703522.6',to_units='W/m2',extra_fields=[],radius=1.) """ kwargs['ID'] = ID to_units = kwargs.pop('to_units', 'erg/s/cm2/AA') master_ = kwargs.get('master', None) master = None #-- retrieve all measurements for source in cat_info.sections(): results, units, comms = search(source, **kwargs) if results is not None: master = gator2phot(source, results, units, master, extra_fields=extra_fields) #-- convert the measurement to a common unit. if to_units and master is not None: #-- prepare columns to extend to basic master dtypes = [('cwave', 'f8'), ('cmeas', 'f8'), ('e_cmeas', 'f8'), ('cunit', 'a50')] cols = [[], [], [], []] #-- forget about 'nan' errors for the moment no_errors = np.isnan(master['e_meas']) master['e_meas'][no_errors] = 0. #-- extend basic master zp = filters.get_info(master['photband']) for i in range(len(master)): try: value, e_value = conversions.convert( master['unit'][i], to_units, master['meas'][i], master['e_meas'][i], photband=master['photband'][i]) except ValueError: # calibrations not available value, e_value = np.nan, np.nan except AssertionError: # the error or flux must be positive number value, e_value = np.nan, np.nan try: eff_wave = filters.eff_wave(master['photband'][i]) except IOError: eff_wave = np.nan cols[0].append(eff_wave) cols[1].append(value) cols[2].append(e_value) cols[3].append(to_units) master = numpy_ext.recarr_addcols(master, cols, dtypes) #-- reset errors master['e_meas'][no_errors] = np.nan master['e_cmeas'][no_errors] = np.nan if master_ is not None and master is not None: master = numpy_ext.recarr_addrows(master_, master.tolist()) elif master_ is not None: master = master_ #-- and return the results return master
def combine(list_of_spectra,R=200.,lambda0=(950.,'AA'),lambdan=(3350.,'AA')): """ Combine and weight-average spectra on a common wavelength grid. C{list_of_spectra} should be a list of lists/arrays. Each element in the main list should be (wavelength,flux,error). If you have FUSE fits files, use L{cc.ivs.fits.read_fuse}. If you have IUE FITS files, use L{cc.ivs.fits.read_iue}. After Peter Woitke. @param R: resolution @type R: float @param lambda0: start wavelength, unit @type lambda0: tuple (float,str) @param lambdan: end wavelength, unit @type lambdan: tuple (float,str) @return: binned spectrum (wavelengths,flux, error) @rtype: array, array, array """ l0 = conversions.convert(lambda0[1],'AA',lambda0[0]) ln = conversions.convert(lambdan[1],'AA',lambdan[0]) #-- STEP 1: define wavelength bins Delta = np.log10(1.+1./R) x = np.arange(np.log10(l0),np.log10(ln)+Delta,Delta) x = 10**x lamc_j = 0.5*(np.roll(x,1)+x) #-- STEP 2: rebinning of data onto newly defined wavelength bins Ns = len(list_of_spectra) Nw = len(lamc_j)-1 binned_fluxes = np.zeros((Ns,Nw)) binned_errors = np.inf*np.ones((Ns,Nw)) for snr,(wave,flux,err) in enumerate(list_of_spectra): wave0 = np.roll(wave,1) wave1 = np.roll(wave,-1) lam_i0_dc = 0.5*(wave0+wave) lam_i1_dc = 0.5*(wave1+wave) dlam_i = lam_i1_dc-lam_i0_dc for j in range(Nw): A = np.min(np.vstack([lamc_j[j+1]*np.ones(len(wave)),lam_i1_dc]),axis=0) B = np.max(np.vstack([lamc_j[j]*np.ones(len(wave)),lam_i0_dc]),axis=0) overlaps = scipy.stats.threshold(A-B,threshmin=0) norm = np.sum(overlaps) binned_fluxes[snr,j] = np.sum(flux*overlaps)/norm binned_errors[snr,j] = np.sqrt(np.sum((err*overlaps)**2))/norm #-- STEP 3: all available spectra sets are co-added, using the inverse # square of the bin uncertainty as weight binned_fluxes[np.isnan(binned_fluxes)] = 0 binned_errors[np.isnan(binned_errors)] = 1e300 weights = 1./binned_errors**2 totalflux = np.sum(weights*binned_fluxes,axis=0)/np.sum(weights,axis=0) totalerr = np.sqrt(np.sum((weights*binned_errors)**2,axis=0))/np.sum(weights,axis=0) totalspec = np.sum(binned_fluxes>0,axis=0) #-- that's it! return x[:-1],totalflux,totalerr,totalspec
def get_photometry(ID=None, extra_fields=['_r', '_RAJ2000', '_DEJ2000'], **kwargs): """ Download all available photometry from a star to a record array. For extra kwargs, see L{_get_URI} and L{mast2phot} """ to_units = kwargs.pop('to_units', 'erg/s/cm2/AA') master_ = kwargs.get('master', None) master = None #-- retrieve all measurements for source in cat_info.sections(): if source == 'galex': results, units, comms = galex(ID=ID, **kwargs) else: results, units, comms = search(source, ID=ID, **kwargs) if results is not None: master = mast2phot(source, results, units, master, extra_fields=extra_fields) #-- convert the measurement to a common unit. if to_units and master is not None: #-- prepare columns to extend to basic master dtypes = [('cwave', 'f8'), ('cmeas', 'f8'), ('e_cmeas', 'f8'), ('cunit', 'a50')] cols = [[], [], [], []] #-- forget about 'nan' errors for the moment no_errors = np.isnan(master['e_meas']) master['e_meas'][no_errors] = 0. #-- extend basic master zp = filters.get_info(master['photband']) for i in range(len(master)): try: value, e_value = conversions.convert( master['unit'][i], to_units, master['meas'][i], master['e_meas'][i], photband=master['photband'][i]) except ValueError: # calibrations not available value, e_value = np.nan, np.nan except AssertionError: # postive flux and errors! value, e_value = np.nan, np.nan try: eff_wave = filters.eff_wave(master['photband'][i]) except IOError: eff_wave = np.nan cols[0].append(eff_wave) cols[1].append(value) cols[2].append(e_value) cols[3].append(to_units) master = numpy_ext.recarr_addcols(master, cols, dtypes) #-- reset errors master['e_meas'][no_errors] = np.nan master['e_cmeas'][no_errors] = np.nan if master_ is not None and master is not None: master = numpy_ext.recarr_addrows(master_, master.tolist()) elif master_ is not None: master = master_ #-- and return the results return master
def get_photometry(ID=None,extra_fields=[],**kwargs): """ Download all available photometry from a star to a record array. Extra fields will not be useful probably. For extra kwargs, see L{_get_URI} and L{gcpd2phot} """ to_units = kwargs.pop('to_units','erg/s/cm2/AA') master_ = kwargs.get('master',None) master = None #-- retrieve all measurements for source in cat_info.sections(): results,units,comms = search(source,ID=ID,**kwargs) if results is not None: master = gcpd2phot(source,results,units,master,extra_fields=extra_fields) #-- convert the measurement to a common unit. if to_units and master is not None: #-- prepare columns to extend to basic master dtypes = [('cwave','f8'),('cmeas','f8'),('e_cmeas','f8'),('cunit','a50')] cols = [[],[],[],[]] #-- forget about 'nan' errors for the moment no_errors = np.isnan(master['e_meas']) master['e_meas'][no_errors] = 0. #-- extend basic master try: zp = filters.get_info(master['photband']) except: print master['photband'] raise for i in range(len(master)): to_units_ = to_units+'' try: value,e_value = conversions.convert(master['unit'][i],to_units,master['meas'][i],master['e_meas'][i],photband=master['photband'][i]) except ValueError: # calibrations not available # if it is a magnitude color, try converting it to a flux ratio if 'mag' in master['unit'][i]: try: value,e_value = conversions.convert('mag_color','flux_ratio',master['meas'][i],master['e_meas'][i],photband=master['photband'][i]) to_units_ = 'flux_ratio' except ValueError: value,e_value = np.nan,np.nan # else, we are powerless... else: value,e_value = np.nan,np.nan try: eff_wave = filters.eff_wave(master['photband'][i]) except IOError: eff_wave = np.nan cols[0].append(eff_wave) cols[1].append(value) cols[2].append(e_value) cols[3].append(to_units_) master = numpy_ext.recarr_addcols(master,cols,dtypes) #-- reset errors master['e_meas'][no_errors] = np.nan master['e_cmeas'][no_errors] = np.nan #-- if a master is given as a keyword, and data is found in this module, # append the two if master_ is not None and master is not None: master = numpy_ext.recarr_addrows(master_,master.tolist()) elif master is None: master = master_ #-- and return the results return master
if 'plx' in database and not ('2007' in database['plx']['r']): data,units,comms = vizier.search('I/311/hip2',ID=ID) if data is not None and len(data): if not 'plx' in database: database['plx'] = {} database['plx']['v'] = data['Plx'][0] database['plx']['e'] = data['e_Plx'][0] database['plx']['r'] = 'I/311/hip2' #-- fix the spectral type data,units,comms = vizier.search('B/mk/mktypes',ID=ID) if data is not None and len(data): database['spType'] = data['SpType'][0] if 'jpos' in database: #-- add galactic coordinates (in degrees) ra,dec = database['jpos'].split() gal = conversions.convert('equatorial','galactic',(str(ra),str(dec)),epoch='2000') gal = float(gal[0])/np.pi*180,float(gal[1])/np.pi*180 database['galpos'] = gal #-- fix the proper motions data,units,comms = vizier.search('I/317/sample',ID=ID) if data is not None and len(data): if not 'pm' in database: database['pm'] = {} database['pm']['pmRA'] = data['pmRA'][0] database['pm']['pmDE'] = data['pmDE'][0] database['pm']['epmRA'] = data['e_pmRA'][0] database['pm']['epmDE'] = data['e_pmDE'][0] database['pm']['r'] = 'I/317/sample' return database if __name__=="__main__":
def get_photometry(ID=None,extra_fields=['dist','ra','dec'],**kwargs): """ Download all available photometry from a star to a record array. For extra kwargs, see L{_get_URI} and L{gator2phot} Example usage: >>> import pylab >>> import vizier >>> name = 'kr cam' >>> master = vizier.get_photometry(name,to_units='erg/s/cm2/AA',extra_fields=[]) >>> master = get_photometry(name,to_units='erg/s/cm2/AA',extra_fields=[],master=master) >>> p = pylab.figure() >>> wise = np.array(['WISE' in photband and True or False for photband in master['photband']]) >>> p = pylab.errorbar(master['cwave'],master['cmeas'],yerr=master['e_cmeas'],fmt='ko') >>> p = pylab.errorbar(master['cwave'][wise],master['cmeas'][wise],yerr=master['e_cmeas'][wise],fmt='ro',ms=8) >>> p = pylab.gca().set_xscale('log') >>> p = pylab.gca().set_yscale('log') >>> p = pylab.show() Other examples: >>> master = get_photometry(ra=71.239527,dec=-70.589427,to_units='erg/s/cm2/AA',extra_fields=[],radius=1.) >>> master = get_photometry(ID='J044458.39-703522.6',to_units='W/m2',extra_fields=[],radius=1.) """ kwargs['ID'] = ID to_units = kwargs.pop('to_units','erg/s/cm2/AA') master_ = kwargs.get('master',None) master = None #-- retrieve all measurements for source in cat_info.sections(): results,units,comms = search(source,**kwargs) if results is not None: master = gator2phot(source,results,units,master,extra_fields=extra_fields) #-- convert the measurement to a common unit. if to_units and master is not None: #-- prepare columns to extend to basic master dtypes = [('cwave','f8'),('cmeas','f8'),('e_cmeas','f8'),('cunit','a50')] cols = [[],[],[],[]] #-- forget about 'nan' errors for the moment no_errors = np.isnan(master['e_meas']) master['e_meas'][no_errors] = 0. #-- extend basic master zp = filters.get_info(master['photband']) for i in range(len(master)): try: value,e_value = conversions.convert(master['unit'][i],to_units,master['meas'][i],master['e_meas'][i],photband=master['photband'][i]) except ValueError: # calibrations not available value,e_value = np.nan,np.nan except AssertionError: # the error or flux must be positive number value,e_value = np.nan,np.nan try: eff_wave = filters.eff_wave(master['photband'][i]) except IOError: eff_wave = np.nan cols[0].append(eff_wave) cols[1].append(value) cols[2].append(e_value) cols[3].append(to_units) master = numpy_ext.recarr_addcols(master,cols,dtypes) #-- reset errors master['e_meas'][no_errors] = np.nan master['e_cmeas'][no_errors] = np.nan if master_ is not None and master is not None: master = numpy_ext.recarr_addrows(master_,master.tolist()) elif master_ is not None: master = master_ #-- and return the results return master
def read_corot(fits_file, return_header=False, type_data='hel', remove_flagged=True): """ Read CoRoT data from a CoRoT FITS file. Both SISMO and EXO data are recognised and extracted accordingly. type_data is one of: - type_data='raw' - type_data='hel': heliocentric unequidistant - type_data='helreg': heliocentric equidistant @param fits_file: CoRoT FITS file name @type fits_file: string @param return_header: return header information as dictionary @type return_header: bool @param type_data: type of data to return @type type_data: string (one of 'raw','hel' or 'helreg') @param remove_flagged: remove flagged datapoints @type remove_flagged: True @return: CoRoT data (times, flux, error, flags) @rtype: array, array, array, array(, header) """ #-- read in the FITS file # headers: ['DATE', 'DATEJD', 'DATEHEL', 'STATUS', 'WHITEFLUX', 'WHITEFLUXDEV', 'BG', 'CORREC'] fits_file_ = pyfits.open(fits_file) if fits_file_[0].header['hlfccdid'][0]=='A': times,flux,error,flags = fits_file_[type_data].data.field(0),\ fits_file_[type_data].data.field(1),\ fits_file_[type_data].data.field(2),\ fits_file_[type_data].data.field(3) # extract the header if asked if return_header: header = fits_file_[0].header fits_file_.close() logger.debug('Read CoRoT SISMO file %s'%(fits_file)) elif fits_file_[0].header['hlfccdid'][0]=='E': times = fits_file_['bintable'].data.field('datehel') if 'blueflux' in fits_file_['bintable'].columns.names: blueflux,e_blueflux = fits_file_['bintable'].data.field('blueflux'),fits_file_['bintable'].data.field('bluefluxdev') greenflux,e_greenflux = fits_file_['bintable'].data.field('greenflux'),fits_file_['bintable'].data.field('greenfluxdev') redflux,e_redflux = fits_file_['bintable'].data.field('redflux'),fits_file_['bintable'].data.field('redfluxdev') #-- chromatic light curves if type_data=='colors': flux = np.column_stack([blueflux,greenflux,redflux]) error = np.column_stack([e_blueflux,e_greenflux,e_redflux]).min(axis=1) #-- white light curves else: flux = blueflux + greenflux + redflux error = np.sqrt(e_blueflux**2 + e_greenflux**2 + e_redflux**2) else: flux,error = fits_file_['bintable'].data.field('whiteflux'),fits_file_['bintable'].data.field('whitefluxdev') flags = fits_file_['bintable'].data.field('status') # remove flagged datapoints if asked if remove_flagged: keep1 = (flags==0) keep2 = (error!=-1) logger.info('Remove: flagged (%d) no valid error (%d) datapoints (%d)'%(len(keep1)-keep1.sum(),len(keep2)-keep2.sum(),len(keep1))) keep = keep1 & keep2 times,flux,error,flags = times[keep], flux[keep], error[keep], flags[keep] # convert times to heliocentric JD times = conversions.convert('MJD','JD',times,jtype='corot') if return_header: return times, flux, error, flags, header else: return times, flux, error, flags
def combine(list_of_spectra, R=200., lambda0=(950., 'AA'), lambdan=(3350., 'AA')): """ Combine and weight-average spectra on a common wavelength grid. C{list_of_spectra} should be a list of lists/arrays. Each element in the main list should be (wavelength,flux,error). If you have FUSE fits files, use L{cc.ivs.fits.read_fuse}. If you have IUE FITS files, use L{cc.ivs.fits.read_iue}. After Peter Woitke. @param R: resolution @type R: float @param lambda0: start wavelength, unit @type lambda0: tuple (float,str) @param lambdan: end wavelength, unit @type lambdan: tuple (float,str) @return: binned spectrum (wavelengths,flux, error) @rtype: array, array, array """ l0 = conversions.convert(lambda0[1], 'AA', lambda0[0]) ln = conversions.convert(lambdan[1], 'AA', lambdan[0]) #-- STEP 1: define wavelength bins Delta = np.log10(1. + 1. / R) x = np.arange(np.log10(l0), np.log10(ln) + Delta, Delta) x = 10**x lamc_j = 0.5 * (np.roll(x, 1) + x) #-- STEP 2: rebinning of data onto newly defined wavelength bins Ns = len(list_of_spectra) Nw = len(lamc_j) - 1 binned_fluxes = np.zeros((Ns, Nw)) binned_errors = np.inf * np.ones((Ns, Nw)) for snr, (wave, flux, err) in enumerate(list_of_spectra): wave0 = np.roll(wave, 1) wave1 = np.roll(wave, -1) lam_i0_dc = 0.5 * (wave0 + wave) lam_i1_dc = 0.5 * (wave1 + wave) dlam_i = lam_i1_dc - lam_i0_dc for j in range(Nw): A = np.min(np.vstack( [lamc_j[j + 1] * np.ones(len(wave)), lam_i1_dc]), axis=0) B = np.max(np.vstack([lamc_j[j] * np.ones(len(wave)), lam_i0_dc]), axis=0) overlaps = scipy.stats.threshold(A - B, threshmin=0) norm = np.sum(overlaps) binned_fluxes[snr, j] = np.sum(flux * overlaps) / norm binned_errors[snr, j] = np.sqrt(np.sum((err * overlaps)**2)) / norm #-- STEP 3: all available spectra sets are co-added, using the inverse # square of the bin uncertainty as weight binned_fluxes[np.isnan(binned_fluxes)] = 0 binned_errors[np.isnan(binned_errors)] = 1e300 weights = 1. / binned_errors**2 totalflux = np.sum(weights * binned_fluxes, axis=0) / np.sum(weights, axis=0) totalerr = np.sqrt(np.sum( (weights * binned_errors)**2, axis=0)) / np.sum(weights, axis=0) totalspec = np.sum(binned_fluxes > 0, axis=0) #-- that's it! return x[:-1], totalflux, totalerr, totalspec
data, units, comms = vizier.search('I/311/hip2', ID=ID) if data is not None and len(data): if not 'plx' in database: database['plx'] = {} database['plx']['v'] = data['Plx'][0] database['plx']['e'] = data['e_Plx'][0] database['plx']['r'] = 'I/311/hip2' #-- fix the spectral type data, units, comms = vizier.search('B/mk/mktypes', ID=ID) if data is not None and len(data): database['spType'] = data['SpType'][0] if 'jpos' in database: #-- add galactic coordinates (in degrees) ra, dec = database['jpos'].split() gal = conversions.convert('equatorial', 'galactic', (str(ra), str(dec)), epoch='2000') gal = float(gal[0]) / np.pi * 180, float(gal[1]) / np.pi * 180 database['galpos'] = gal #-- fix the proper motions data, units, comms = vizier.search('I/317/sample', ID=ID) if data is not None and len(data): if not 'pm' in database: database['pm'] = {} database['pm']['pmRA'] = data['pmRA'][0] database['pm']['pmDE'] = data['pmDE'][0] database['pm']['epmRA'] = data['e_pmRA'][0] database['pm']['epmDE'] = data['e_pmDE'][0] database['pm']['r'] = 'I/317/sample' return database
def get_law(name,norm='E(B-V)',wave_units='AA',photbands=None,**kwargs): """ Retrieve an interstellar reddening law. Parameter C{name} must be the function name of one of the laws defined in this module. By default, the law will be interpolated on a grid from 100 angstrom to 10 micron in steps of 10 angstrom. This can be adjusted with the parameter C{wave} (array), which B{must} be in angstrom. You can change the units ouf the returned wavelength array via C{wave_units}. By default, the curve is normalised with respect to E(B-V) (you get A(l)/E(B-V)). You can set the C{norm} keyword to Av if you want A(l)/Av. Remember that A(V) = Rv * E(B-V) The parameter C{Rv} is by default 3.1, other reasonable values lie between 2.0 and 5.1 Extra accepted keywords depend on the type of reddening law used. Example usage: >>> wave = np.r_[1e3:1e5:10] >>> wave,mag = get_law('cardelli1989',wave=wave,Rv=3.1) @param name: name of the interstellar law @type name: str, one of the functions defined here @param norm: type of normalisation of the curve @type norm: str (one of E(B-V), Av) @param wave_units: wavelength units @type wave_units: str (interpretable for units.conversions.convert) @param photbands: list of photometric passbands @type photbands: list of strings @keyword wave: wavelength array to interpolate the law on @type wave: ndarray @return: wavelength, reddening magnitude @rtype: (ndarray,ndarray) """ #-- get the inputs wave_ = kwargs.pop('wave',None) Rv = kwargs.setdefault('Rv',3.1) #-- get the curve wave,mag = globals()[name.lower()](**kwargs) wave_orig,mag_orig = wave.copy(),mag.copy() #-- interpolate on user defined grid if wave_ is not None: if wave_units != 'AA': wave_ = conversions.convert(wave_units,'AA',wave_) mag = np.interp(wave_,wave,mag,right=0) wave = wave_ #-- pick right normalisation: convert to A(lambda)/Av if needed if norm.lower()=='e(b-v)': mag *= Rv else: #-- we allow ak and av as shortcuts for normalisation in JOHNSON K and # V bands if norm.lower()=='ak': norm = 'JOHNSON.K' elif norm.lower()=='av': norm = 'JOHNSON.V' norm_reddening = model.synthetic_flux(wave_orig,mag_orig,[norm])[0] logger.info('Normalisation via %s: Av/%s = %.6g'%(norm,norm,1./norm_reddening)) mag /= norm_reddening #-- maybe we want the curve in photometric filters if photbands is not None: mag = model.synthetic_flux(wave,mag,photbands) wave = filters.get_info(photbands)['eff_wave'] #-- set the units of the wavelengths if wave_units != 'AA': wave = conversions.convert('AA',wave_units,wave) return wave,mag
def get_law(name, norm='E(B-V)', wave_units='AA', photbands=None, **kwargs): """ Retrieve an interstellar reddening law. Parameter C{name} must be the function name of one of the laws defined in this module. By default, the law will be interpolated on a grid from 100 angstrom to 10 micron in steps of 10 angstrom. This can be adjusted with the parameter C{wave} (array), which B{must} be in angstrom. You can change the units ouf the returned wavelength array via C{wave_units}. By default, the curve is normalised with respect to E(B-V) (you get A(l)/E(B-V)). You can set the C{norm} keyword to Av if you want A(l)/Av. Remember that A(V) = Rv * E(B-V) The parameter C{Rv} is by default 3.1, other reasonable values lie between 2.0 and 5.1 Extra accepted keywords depend on the type of reddening law used. Example usage: >>> wave = np.r_[1e3:1e5:10] >>> wave,mag = get_law('cardelli1989',wave=wave,Rv=3.1) @param name: name of the interstellar law @type name: str, one of the functions defined here @param norm: type of normalisation of the curve @type norm: str (one of E(B-V), Av) @param wave_units: wavelength units @type wave_units: str (interpretable for units.conversions.convert) @param photbands: list of photometric passbands @type photbands: list of strings @keyword wave: wavelength array to interpolate the law on @type wave: ndarray @return: wavelength, reddening magnitude @rtype: (ndarray,ndarray) """ #-- get the inputs wave_ = kwargs.pop('wave', None) Rv = kwargs.setdefault('Rv', 3.1) #-- get the curve wave, mag = globals()[name.lower()](**kwargs) wave_orig, mag_orig = wave.copy(), mag.copy() #-- interpolate on user defined grid if wave_ is not None: if wave_units != 'AA': wave_ = conversions.convert(wave_units, 'AA', wave_) mag = np.interp(wave_, wave, mag, right=0) wave = wave_ #-- pick right normalisation: convert to A(lambda)/Av if needed if norm.lower() == 'e(b-v)': mag *= Rv else: #-- we allow ak and av as shortcuts for normalisation in JOHNSON K and # V bands if norm.lower() == 'ak': norm = 'JOHNSON.K' elif norm.lower() == 'av': norm = 'JOHNSON.V' norm_reddening = model.synthetic_flux(wave_orig, mag_orig, [norm])[0] logger.info('Normalisation via %s: Av/%s = %.6g' % (norm, norm, 1. / norm_reddening)) mag /= norm_reddening #-- maybe we want the curve in photometric filters if photbands is not None: mag = model.synthetic_flux(wave, mag, photbands) wave = filters.get_info(photbands)['eff_wave'] #-- set the units of the wavelengths if wave_units != 'AA': wave = conversions.convert('AA', wave_units, wave) return wave, mag
def get_photometry(ID=None, extra_fields=[], **kwargs): """ Download all available photometry from a star to a record array. Extra fields will not be useful probably. For extra kwargs, see L{_get_URI} and L{gcpd2phot} """ to_units = kwargs.pop('to_units', 'erg/s/cm2/AA') master_ = kwargs.get('master', None) master = None #-- retrieve all measurements for source in cat_info.sections(): results, units, comms = search(source, ID=ID, **kwargs) if results is not None: master = gcpd2phot(source, results, units, master, extra_fields=extra_fields) #-- convert the measurement to a common unit. if to_units and master is not None: #-- prepare columns to extend to basic master dtypes = [('cwave', 'f8'), ('cmeas', 'f8'), ('e_cmeas', 'f8'), ('cunit', 'a50')] cols = [[], [], [], []] #-- forget about 'nan' errors for the moment no_errors = np.isnan(master['e_meas']) master['e_meas'][no_errors] = 0. #-- extend basic master try: zp = filters.get_info(master['photband']) except: print master['photband'] raise for i in range(len(master)): to_units_ = to_units + '' try: value, e_value = conversions.convert( master['unit'][i], to_units, master['meas'][i], master['e_meas'][i], photband=master['photband'][i]) except ValueError: # calibrations not available # if it is a magnitude color, try converting it to a flux ratio if 'mag' in master['unit'][i]: try: value, e_value = conversions.convert( 'mag_color', 'flux_ratio', master['meas'][i], master['e_meas'][i], photband=master['photband'][i]) to_units_ = 'flux_ratio' except ValueError: value, e_value = np.nan, np.nan # else, we are powerless... else: value, e_value = np.nan, np.nan try: eff_wave = filters.eff_wave(master['photband'][i]) except IOError: eff_wave = np.nan cols[0].append(eff_wave) cols[1].append(value) cols[2].append(e_value) cols[3].append(to_units_) master = numpy_ext.recarr_addcols(master, cols, dtypes) #-- reset errors master['e_meas'][no_errors] = np.nan master['e_cmeas'][no_errors] = np.nan #-- if a master is given as a keyword, and data is found in this module, # append the two if master_ is not None and master is not None: master = numpy_ext.recarr_addrows(master_, master.tolist()) elif master is None: master = master_ #-- and return the results return master