def waterFraction1StepProfiler(model_id, path_gastronoom, fraction, rfrac): ''' Create a 1-step fractional profile for water. The original water abundance profile is taken from the output of the original model without fractional abundances. These fraction profiles can be used for CHANGE_ABUNDANCE_FRACTION in mline @param model_id: The model id of the original cooling model @type model_id: string @param path_gastronoom: The model subfolder in ~/GASTRoNOoM/ @type path_gastronoom: string @param fraction: the fraction used @type fraction: float @param rfrac: the radius at the step to the fractional abundance [cm] @type rfrac: float ''' rfrac = float(rfrac) fraction = float(fraction) filename = os.path.join(cc.path.gastronoom,path_gastronoom,'models',\ model_id,'coolfgr_all%s.dat'%model_id) rad = Gastronoom.getGastronoomOutput(filename=filename,keyword='RADIUS',\ return_array=1) fraction_profile = np.ones(len(rad)) step_index = np.argmin(abs(rad - rfrac)) fraction_profile[step_index:] = fraction output_filename = os.path.join(cc.path.gastronoom,path_gastronoom,\ 'profiles',\ 'water_fractions_%s_%.2f_r%.3e.dat'\ %(model_id,fraction,rfrac)) DataIO.writeCols(output_filename, [rad, fraction_profile])
def setStarPars(self): """ Set some standard stellar parameters such as Ak and galactic position. """ self.star_index = DataIO.getInputData().index(self.star_name) ll = DataIO.getInputData(keyword='LONG',rindex=self.star_index) bb = DataIO.getInputData(keyword='LAT',rindex=self.star_index) if self.distance <> None: self.ak = em.findext_marshall(ll=ll,bb=bb,distance=self.distance,\ norm='Ak') if self.ak is None: self.ak = em.findext_drimmel(lng=ll,lat=bb,norm='Ak',\ distance=self.distance) if self.ak is None: self.ak = DataIO.getInputData(keyword='A_K',rindex=self.star_index) snp = DataIO.getInputData(keyword='STAR_NAME_PLOTS',\ remove_underscore=1,rindex=self.star_index) self.star_name_plots = snp if (abs(ll) < 5.0 or ll > 355.0) and abs(bb) < 5.0: self.gal_position = 'GC' else: self.gal_position = 'ISM'
def readTelescopeProperties(telescope): """ Read the telescope properties from Telescope.dat. This currently includes the telescope size in m, and the default absolute flux calibration uncertainty. @param telescope: The telescope requested @type telescope: str @return: The telescope size and absolute flux calibration uncertainty @rtype: (float,float) """ all_telescopes = DataIO.getInputData(keyword="TELESCOPE", start_index=5, filename="Telescope.dat") if "PACS" in telescope: telescope = "PACS" else: telescope = telescope try: tel_index = all_telescopes.index(telescope) except ValueError: raise ValueError("%s not found in Telescope.dat." % telescope) size = DataIO.getInputData(keyword="SIZE", start_index=5, filename="Telescope.dat", rindex=tel_index) abs_err = DataIO.getInputData(keyword="ABS_ERR", start_index=5, filename="Telescope.dat", rindex=tel_index) return (size, abs_err)
def getKey(k,data=None,fn=None): ''' Retrieve data from an ALI inputfile. Returns the line following the line that contains the given key. @param k: The unique input key word for which the ALI inputfile is searched. @type k: str @keyword data: The data, ie a file read by readFile with delimiter set to '' A filename must be given if data is None. (default: None) @type data: list[str] @keyword fn: The ALI input filename. Only used if data is None. (default: None) @type fn: str @return: The line following the line that contains given key @rtype: str ''' if data is None: data = DataIO.readFile(filename=fn,delimiter=None,replace_spaces=0) i = DataIO.findKey(0,data,k) return data[i+1].replace('\n','')
def makeMCMaxStars(self, models): ''' Set parameters for star_list taken from the MCMax database. Based on the model id of MCMax. @param models: model_ids for the MCMax db @type models: list(string) @return: The model instances @rtype: list(Star()) ''' star_grid = Star.makeStars(models=models,code='MCMax',id_type='MCMax',\ path=self.path) for star, model in zip(star_grid, models): filepath = os.path.join(cc.path.mout,'models',\ star['LAST_MCMAX_MODEL']) denstemp = os.path.join(filepath, 'denstemp.dat') logfile = os.path.join(filepath, 'log.dat') grid_shape = DataIO.getMCMaxOutput(filename=denstemp,incr=1,\ keyword='NGRAINS',single=0)[0] star.update({'NTHETA':int(grid_shape[1]),\ 'NRAD':int(grid_shape[0]),\ 'T_STAR':float(DataIO.getMCMaxOutput(filename=logfile,\ incr=0,\ keyword='STELLAR TEMPERATURE',\ single=0)[0][2]),\ 'R_STAR':float(DataIO.getMCMaxOutput(filename=logfile,\ incr=0,\ keyword='STELLAR RADIUS',\ single=0)[0][2])}) return star_grid
def makeMCMaxStars(self,models): ''' Set parameters for star_list taken from the MCMax database. Based on the model id of MCMax. @param models: model_ids for the MCMax db @type models: list(string) @return: The model instances @rtype: list(Star()) ''' star_grid = Star.makeStars(models=models,code='MCMax',id_type='MCMax',\ path=self.path) for star,model in zip(star_grid,models): filepath = os.path.join(cc.path.mout,'models',\ star['LAST_MCMAX_MODEL']) denstemp = os.path.join(filepath,'denstemp.dat') logfile = os.path.join(filepath,'log.dat') grid_shape = DataIO.getMCMaxOutput(filename=denstemp,incr=1,\ keyword='NGRAINS',single=0)[0] star.update({'NTHETA':int(grid_shape[1]),\ 'NRAD':int(grid_shape[0]),\ 'T_STAR':float(DataIO.getMCMaxOutput(filename=logfile,\ incr=0,\ keyword='STELLAR TEMPERATURE',\ single=0)[0][2]),\ 'R_STAR':float(DataIO.getMCMaxOutput(filename=logfile,\ incr=0,\ keyword='STELLAR RADIUS',\ single=0)[0][2])}) return star_grid
def doMline(self,star): """ Run mline. First, database is checked for retrieval of old models. @param star: The parameter set for this session @type star: Star() """ model_bools = self.checkMlineDatabase() del self.command_list['R_OUTER'] del self.command_list['OUTER_R_MODE'] for molec,model_bool in zip(self.molec_list,model_bools): if not model_bool: self.updateModel(molec.getModelId()) commandfile = ['%s=%s'%(k,v) for k,v in sorted(self.command_list.items()) if k != 'R_POINTS_MASS_LOSS'] +\ ['####'] + \ ['%s=%s'%(k,v) for k,v in sorted(molec.makeDict().items())] +\ ['####'] if self.command_list.has_key('R_POINTS_MASS_LOSS'): commandfile.extend(['%s=%s'%('R_POINTS_MASS_LOSS',v) for v in self.command_list\ ['R_POINTS_MASS_LOSS']] +\ ['####']) filename = os.path.join(cc.path.gout,'models',\ 'gastronoom_%s.inp'%molec.getModelId()) DataIO.writeFile(filename,commandfile) self.execGastronoom(subcode='mline',filename=filename) self.mline_done=True if len([f for f in glob(os.path.join(cc.path.gout,'models',\ molec.getModelId(),'ml*%s_%s.dat'\ %(molec.getModelId(),molec.molecule)))])\ == 3: self.ml_db[self.model_id][molec.getModelId()]\ [molec.molecule] = molec.makeDict() self.ml_db.addChangedKey(self.model_id) self.ml_db.sync() else: print 'Mline model calculation failed for'\ '%s. No entry is added to the database.'\ %(molec.molecule) molec.setModelId('') if set([molec.getModelId() for molec in self.molec_list]) == set(['']): #- no mline models calculated: stop GASTRoNOoM here self.model_id = '' print 'Mline model calculation failed for all requested ' + \ 'molecules. Stopping GASTRoNOoM here!' else: #- at least one molecule was successfully calculated, so start #- Sphinx, hence if vic is requested, the cooling model_id can now #- be added to the models list if self.vic <> None and self.sphinx: #- add the command list to the vic models list self.vic.addModel(self.model_id,self.command_list)
def parseProfile(self): ''' Parse the sphinx file 2, which includes all line profile info. The output is stored in dict self['sph2']. ''' self['sph2'] = dict() self['sph2']['nobeam'] = dict() self['sph2']['beam'] = dict() self['sph2']['nobeam_cont'] = dict() self['sph2']['beam_cont'] = dict() data = self.getFile(wildcard='2',delimiter=' ') data_col_1 = [d[0] for d in data] data_i = 6 data_j = DataIO.findString(data_i,data_col_1) self['sph2']['nobeam']['velocity'] = array([float(line[0]) for line in data[data_i:data_j]]) #-- Reverse this flux grid. Sphinx output files give the mirrored # flux grid for the associated velocity grid. self['sph2']['nobeam']['flux'] = array([DataIO.convertFloat(line[-1],\ nans=1) for line in data[data_i:data_j]]) self['sph2']['nobeam']['flux'] = self['sph2']['nobeam']['flux'][::-1] data_k = data_j + 4 data_l = DataIO.findString(data_k,data_col_1) self['sph2']['beam']['velocity'] = array([float(line[0]) for line in data[data_k:data_l]]) self['sph2']['beam']['flux'] = array([float(line[-1]) for line in data[data_k:data_l]]) self['sph2']['beam']['norm_flux'] = array([float(line[1]) for line in data[data_k:data_l]]) self['sph2']['beam']['tmb'] = array([float(line[2]) for line in data[data_k:data_l]]) #-- Set the continuum value for the different profiles self.setContinuum('nobeam','flux') for lp in ['flux','norm_flux','tmb']: self.setContinuum('beam',lp) #-- Check if the velocity is correctly monotonously increasing if self['sph2']['beam']['velocity'][0] > self['sph2']['beam']['velocity'][-1]: self['sph2']['beam']['velocity'] = self['sph2']['beam']['velocity'][::-1] self['sph2']['beam']['flux'] = self['sph2']['beam']['flux'][::-1] self['sph2']['beam']['norm_flux'] = self['sph2']['beam']['norm_flux'][::-1] self['sph2']['beam']['tmb'] = self['sph2']['beam']['tmb'][::-1] if self['sph2']['nobeam']['velocity'][0] > self['sph2']['nobeam']['velocity'][-1]: self['sph2']['nobeam']['velocity'] = self['sph2']['nobeam']['velocity'][::-1] self['sph2']['nobeam']['flux'] = self['sph2']['nobeam']['flux'][::-1] #-- Check for NaNs in the profile. if True in list(isnan(self['sph2']['nobeam']['flux'])): self.nans_present = True print "WARNING! There are NaN's in the intrinsic line profile " + \ "with model id %s:"\ %(os.path.split(os.path.split(self.fn)[0])[1]) print os.path.split(self.fn.replace('sph*','sph2'))[1]
def setData(self,**kwargs): ''' Select available data. Based on the data file types in Sed.dat and the available data files. Also calls the buildPhotometry method to create a photometry file from the IvS Sed builder tool Any keywords required for buildPhotometry can be passed here. ''' data_types = DataIO.getInputData(keyword='DATA_TYPES',\ filename='Sed.dat') abs_errs = DataIO.getInputData(keyword='ABS_ERR',filename='Sed.dat') if 'Photometric_IvS' in data_types: buildPhotometry(self.star_name,**kwargs) self.data_types = [] self.data_filenames = [] self.abs_err = dict() for dt,ierr in zip(data_types,abs_errs): searchpath = os.path.join(cc.path.dsed,'%s_*%s*.dat'\ %(dt,self.star_name)) add_files = glob(searchpath) for ff in add_files: if ff not in self.data_filenames: self.data_filenames.append(ff) self.data_types.append(dt) self.abs_err[dt] = ierr
def readTelescopeProperties(telescope): """ Read the telescope properties from Telescope.dat. This currently includes the telescope size in m, and the default absolute flux calibration uncertainty. @param telescope: The telescope requested @type telescope: str @return: The telescope size and absolute flux calibration uncertainty @rtype: (float,float) """ all_telescopes = DataIO.getInputData(keyword='TELESCOPE',start_index=5,\ filename='Telescope.dat') if 'PACS' in telescope: telescope = 'PACS' else: telescope = telescope try: tel_index = all_telescopes.index(telescope) except ValueError: raise ValueError('%s not found in Telescope.dat.' % telescope) size = DataIO.getInputData(keyword='SIZE',start_index=5,\ filename='Telescope.dat',\ rindex=tel_index) abs_err = DataIO.getInputData(keyword='ABS_ERR',start_index=5,\ filename='Telescope.dat',\ rindex=tel_index) return (size, abs_err)
def waterFraction1StepProfiler(model_id,path_gastronoom,fraction,rfrac): ''' Create a 1-step fractional profile for water. The original water abundance profile is taken from the output of the original model without fractional abundances. These fraction profiles can be used for CHANGE_ABUNDANCE_FRACTION in mline @param model_id: The model id of the original cooling model @type model_id: string @param path_gastronoom: The model subfolder in ~/GASTRoNOoM/ @type path_gastronoom: string @param fraction: the fraction used @type fraction: float @param rfrac: the radius at the step to the fractional abundance [cm] @type rfrac: float ''' rfrac = float(rfrac) fraction = float(fraction) filename = os.path.join(cc.path.gastronoom,path_gastronoom,'models',\ model_id,'coolfgr_all%s.dat'%model_id) rad = Gastronoom.getGastronoomOutput(filename=filename,keyword='RADIUS',\ return_array=1) fraction_profile = np.ones(len(rad)) step_index = np.argmin(abs(rad-rfrac)) fraction_profile[step_index:] = fraction output_filename = os.path.join(cc.path.gastronoom,path_gastronoom,\ 'profiles',\ 'water_fractions_%s_%.2f_r%.3e.dat'\ %(model_id,fraction,rfrac)) DataIO.writeCols(output_filename,[rad,fraction_profile])
def readTelescopeProperties(self): """ Read the telescope properties from Telescope.dat. This currently includes the telescope size in m, and the default absolute flux calibration uncertainty. """ all_telescopes = DataIO.getInputData(keyword='TELESCOPE',start_index=5,\ filename='Telescope.dat') try: tel_index = all_telescopes.index(self.instrument.upper()) except ValueError: raise ValueError('%s not found in Telescope.dat.'\ %self.instrument.upper()) self.telescope_size = DataIO.getInputData(keyword='SIZE',start_index=5,\ filename='Telescope.dat',\ rindex=tel_index) self.absflux_err = DataIO.getInputData(keyword='ABS_ERR',start_index=5,\ filename='Telescope.dat',\ rindex=tel_index)
def setData(self, **kwargs): ''' Select available data. Based on the data file types in Sed.dat and the available data files. Also calls the buildPhotometry method to create a photometry file from the IvS Sed builder tool Any keywords required for buildPhotometry can be passed here. ''' data_types = DataIO.getInputData(keyword='DATA_TYPES',\ filename='Sed.dat') abs_errs = DataIO.getInputData(keyword='ABS_ERR', filename='Sed.dat') if 'Photometric_IvS' in data_types: buildPhotometry(self.star_name, **kwargs) self.data_types = [] self.data_filenames = [] self.abs_err = dict() for dt, ierr in zip(data_types, abs_errs): searchpath = os.path.join(cc.path.dsed,'%s_*%s*.dat'\ %(dt,self.star_name)) add_files = glob(searchpath) for ff in add_files: if ff not in self.data_filenames: self.data_filenames.append(ff) self.data_types.append(dt) self.abs_err[dt] = ierr
def __init__(self,path_chemistry='runTest',replace_db_entry=0,db=None,\ single_session=0): """ Initializing an instance of ModelingSession. @keyword db: the Chemistry database (default: None) @type db: Database() @keyword replace_db_entry: replace an entry in the Chemistry database with a newly calculated model with a new model id (for instance if some general data not included in the inputfiles is changed) (default: 0) @type replace_db_entry: bool @keyword path_chemistry: modeling folder in Chemistry home (default: 'runTest') @type path_chemistry: string @keyword new_entries: The new model_ids when replace_db_entry is 1 of other models in the grid. These are not replaced! (default: []) @type new_entries: list[str] @keyword single_session: If this is the only CC session. Speeds up db check. (default: 0) @type single_session: bool """ super(Chemistry, self).__init__(code='Chemistry',path=path_chemistry,\ replace_db_entry=replace_db_entry,\ single_session=single_session) #-- Convenience path cc.path.cout = os.path.join(cc.path.chemistry,self.path) #DataIO.testFolderExistence(os.path.join(cc.path.mout,\ #'data_for_gastronoom')) self.db = db #-- If an chemistry model is in progress, the model manager will hold until # the other cc session is finished. self.in_progress = False #- Read standard input file with all parameters that should be included #- as well as some dust specific information self.inputfilename = os.path.join(cc.path.aux,'inputChemistry.dat') self.standard_inputfile = DataIO.readDict(self.inputfilename,\ convert_floats=1,\ convert_ints=1,\ comment_chars=['#','*']) chemistry_keys = os.path.join(cc.path.aux,'Input_Keywords_Chemistry.dat') self.chemistry_keywords = [line.strip() for line in DataIO.readFile(chemistry_keys) if line]
def doMline(self, star): """ Run mline. First, database is checked for retrieval of old models. @param star: The parameter set for this session @type star: Star() """ model_bools = self.checkMlineDatabase() del self.command_list['R_OUTER'] del self.command_list['OUTER_R_MODE'] for molec, model_bool in zip(self.molec_list, model_bools): if not model_bool: self.updateModel(molec.getModelId()) commandfile = ['%s=%s'%(k,v) for k,v in sorted(self.command_list.items()) if k != 'R_POINTS_MASS_LOSS'] +\ ['####'] + \ ['%s=%s'%(k,v) for k,v in sorted(molec.makeDict().items())] +\ ['####'] if self.command_list.has_key('R_POINTS_MASS_LOSS'): commandfile.extend(['%s=%s'%('R_POINTS_MASS_LOSS',v) for v in self.command_list\ ['R_POINTS_MASS_LOSS']] +\ ['####']) filename = os.path.join(cc.path.gout,'models',\ 'gastronoom_%s.inp'%molec.getModelId()) DataIO.writeFile(filename, commandfile) self.execGastronoom(subcode='mline', filename=filename) self.mline_done = True if len([f for f in glob(os.path.join(cc.path.gout,'models',\ molec.getModelId(),'ml*%s_%s.dat'\ %(molec.getModelId(),molec.molecule)))])\ == 3: self.ml_db[self.model_id][molec.getModelId()]\ [molec.molecule] = molec.makeDict() self.ml_db.addChangedKey(self.model_id) self.ml_db.sync() else: print 'Mline model calculation failed for'\ '%s. No entry is added to the database.'\ %(molec.molecule) molec.setModelId('') if set([molec.getModelId() for molec in self.molec_list]) == set(['']): #- no mline models calculated: stop GASTRoNOoM here self.model_id = '' print 'Mline model calculation failed for all requested ' + \ 'molecules. Stopping GASTRoNOoM here!' else: #- at least one molecule was successfully calculated, so start #- Sphinx, hence if vic is requested, the cooling model_id can now #- be added to the models list if self.vic <> None and self.sphinx: #- add the command list to the vic models list self.vic.addModel(self.model_id, self.command_list)
def parseProfile(self): ''' Parse the sphinx file 2, which includes all line profile info. The output is stored in dict self.sph2. ''' self.sph2 = dict() self.contents['sph2'] = self.sph2 self.sph2['nobeam'] = dict() self.sph2['beam'] = dict() self.sph2['nobeam_cont'] = dict() self.sph2['beam_cont'] = dict() data = self.getFile(self.filename.replace('*', '2')) data_col_1 = [d[0] for d in data] data_i = 6 data_j = DataIO.findString(data_i, data_col_1) self.sph2['nobeam']['velocity'] = array( [float(line[0]) for line in data[data_i:data_j]]) #-- Reverse this flux grid. Sphinx output files give the mirrored # flux grid for the associated velocity grid. self.sph2['nobeam']['flux'] = array([DataIO.convertFloat(line[-1],\ nans=1) for line in data[data_i:data_j]]) self.sph2['nobeam']['flux'] = self.sph2['nobeam']['flux'][::-1] data_k = data_j + 4 data_l = DataIO.findString(data_k, data_col_1) self.sph2['beam']['velocity'] = array( [float(line[0]) for line in data[data_k:data_l]]) self.sph2['beam']['flux'] = array( [float(line[-1]) for line in data[data_k:data_l]]) self.sph2['beam']['norm_flux'] = array( [float(line[1]) for line in data[data_k:data_l]]) self.sph2['beam']['tmb'] = array( [float(line[2]) for line in data[data_k:data_l]]) self.setContinuum('nobeam', 'flux') for lp in ['flux', 'norm_flux', 'tmb']: self.setContinuum('beam', lp) if self.sph2['beam']['velocity'][0] > self.sph2['beam']['velocity'][-1]: self.sph2['beam']['velocity'] = self.sph2['beam']['velocity'][::-1] self.sph2['beam']['flux'] = self.sph2['beam']['flux'][::-1] self.sph2['beam']['norm_flux'] = self.sph2['beam'][ 'norm_flux'][::-1] self.sph2['beam']['tmb'] = self.sph2['beam']['tmb'][::-1] if self.sph2['nobeam']['velocity'][0] > self.sph2['nobeam'][ 'velocity'][-1]: self.sph2['nobeam']['velocity'] = self.sph2['nobeam'][ 'velocity'][::-1] self.sph2['nobeam']['flux'] = self.sph2['nobeam']['flux'][::-1] if True in list(isnan(self.sph2['nobeam']['flux'])): self.nans_present = True print "WARNING! There are NaN's in the intrinsic line profile " + \ "with model id %s:"\ %(os.path.split(os.path.split(self.filename)[0])[1]) print os.path.split(self.filename.replace('sph*', 'sph2'))[1]
def combineRedLaw(ofn, chiar_curve='ism', power=-1.8): ''' A method to combine the Fitzpatrick 2004 and Chiar & Tielens 2006 reddening laws as well as to extrapolate Chiar and Tielens 2006 to longer wavelengths. The result is saved in a file and used by the IvS repository as a valid reddening law. @param ofn: The output filename with path @type ofn: str @keyword chiar_curve: The curve type for Chiar & Tielens 2004. Either 'gc' or 'ism'. (default: 'ism') @type chiar_curve: str @keyword power: The power for the power law extrapolation. Default is taken from Chiar and Tielens 2006, as a typical value for local ISM between 2 and 5 micron. gc may require different value but not very important. (default: -1.8) @type power: float ''' chiar_curve = chiar_curve.lower() #-- Extract the two relevant extinction laws. xchiar, a_ak_chiar = red.get_law('chiar2006',norm='Ak',wave_units='micron',\ curve=chiar_curve) xfitz, a_ak_fitz = red.get_law('fitzpatrick2004',norm='Ak',\ wave_units='micron') #-- Define a power law for the extrapolation def power_law(x, scale, power): return scale * (x)**power #-- Determine the scaling factor from specific chiar/tielens law scale = a_ak_chiar[-1] / (xchiar[-1]**power) #-- Create an x grid for longer wavelengths. xlong = np.linspace(xchiar[-1] + 0.1, 1000, 1000) a_ak_long = power_law(xlong, scale, power) #-- Combine the three sections xcom = hstack([xfitz[xfitz < xchiar[0]], xchiar, xlong]) a_ak_com = hstack([a_ak_fitz[xfitz < xchiar[0]], a_ak_chiar, a_ak_long]) #-- Write the result to a file comments = '#-- wavelength (micron) A_lambda/A_k\n' DataIO.writeCols(filename=ofn, cols=[[comments]]) DataIO.writeCols(filename=ofn, cols=[xcom, a_ak_com], mode='a')
def setStarPars(self): """ Set some standard stellar parameters such as Ak and galactic position. """ self.star_index = DataIO.getInputData().index(self.star_name) self.ll = DataIO.getInputData(keyword='LONG', rindex=self.star_index) self.bb = DataIO.getInputData(keyword='LAT', rindex=self.star_index) snp = DataIO.getInputData(keyword='STAR_NAME_PLOTS',\ remove_underscore=1,rindex=self.star_index) self.star_name_plots = snp
def combineRedLaw(ofn, chiar_curve="ism", power=-1.8): """ A method to combine the Fitzpatrick 2004 and Chiar & Tielens 2006 reddening laws as well as to extrapolate Chiar and Tielens 2006 to longer wavelengths. The result is saved in a file and used by the IvS repository as a valid reddening law. @param ofn: The output filename with path @type ofn: str @keyword chiar_curve: The curve type for Chiar & Tielens 2004. Either 'gc' or 'ism'. (default: 'ism') @type chiar_curve: str @keyword power: The power for the power law extrapolation. Default is taken from Chiar and Tielens 2006, as a typical value for local ISM between 2 and 5 micron. gc may require different value but not very important. (default: -1.8) @type power: float """ chiar_curve = chiar_curve.lower() # -- Extract the two relevant extinction laws. xchiar, a_ak_chiar = red.get_law("chiar2006", norm="Ak", wave_units="micron", curve=chiar_curve) xfitz, a_ak_fitz = red.get_law("fitzpatrick2004", norm="Ak", wave_units="micron") # -- Define a power law for the extrapolation def power_law(x, scale, power): return scale * (x) ** power # -- Determine the scaling factor from specific chiar/tielens law scale = a_ak_chiar[-1] / (xchiar[-1] ** power) # -- Create an x grid for longer wavelengths. xlong = np.linspace(xchiar[-1] + 0.1, 1000, 1000) a_ak_long = power_law(xlong, scale, power) # -- Combine the three sections xcom = hstack([xfitz[xfitz < xchiar[0]], xchiar, xlong]) a_ak_com = hstack([a_ak_fitz[xfitz < xchiar[0]], a_ak_chiar, a_ak_long]) # -- Write the result to a file comments = "#-- wavelength (micron) A_lambda/A_k\n" DataIO.writeCols(filename=ofn, cols=[[comments]]) DataIO.writeCols(filename=ofn, cols=[xcom, a_ak_com], mode="a")
def setStarPars(self): """ Set some standard stellar parameters such as Ak and galactic position. """ self.star_index = DataIO.getInputData().index(self.star_name) self.ll = DataIO.getInputData(keyword='LONG',rindex=self.star_index) self.bb = DataIO.getInputData(keyword='LAT',rindex=self.star_index) snp = DataIO.getInputData(keyword='STAR_NAME_PLOTS',\ remove_underscore=1,rindex=self.star_index) self.star_name_plots = snp
def mergeOpacity(species,lowres='nom_res',highres='high_res'): ''' Merge high-res opacities into a grid of low-res opacities. The wavelength range of the inserted high res opacities is taken from the given high res grid. @param species: The dust species for which this is done. This is also the name of the folder in ~/MCMax/DustOpacities/ that contains the data files. @type species: string @keyword lowres: The subfolder in ~/MCMax/DustOpacities/species containing the low resolution datafiles. (default: low_res) @type lowres: string @keyword highres: The subfolder in ~/MCMax/DustOpacities/species containing the high resolution datafiles. (default: high_res) @type highres: string ''' path = os.path.join(cc.path.mopac,species) lowres_files = [f for f in glob(os.path.join(path,lowres,'*')) if f[-5:] == '.opac'] highres_files = [f for f in glob(os.path.join(path,highres,'*')) if f[-5:] == '.opac'] files = set([os.path.split(f)[1] for f in lowres_files] + \ [os.path.split(f)[1] for f in highres_files]) for f in files: hdfile = os.path.join(path,highres,f) ldfile = os.path.join(path,lowres,f) if os.path.isfile(ldfile) and os.path.isfile(hdfile): hd = DataIO.readCols(hdfile) ld = DataIO.readCols(ldfile) hdw = hd[0] ldw = ld[0] wmin = hdw[0] wmax = hdw[-1] ld_low = [list(col[ldw<wmin]) for col in ld] ld_high = [list(col[ldw>wmax]) for col in ld] hd = [list(col) for col in hd] merged = [ld_low[i] + hd[i] + ld_high[i] for i in range(len(hd))] DataIO.writeCols(filename=os.path.join(path,f),cols=merged)
def copyOutput(self,entry,old_id,new_id): ''' Copy modelling output based on model_id. @param entry: the modeling object for which output is copied @type entry: Molecule() or Transition() @param old_id: The old model_id @type old_id: string @param new_id: the new_model_id @type new_id: string ''' folder_old = os.path.join(cc.path.gout,'models',old_id) folder_new = os.path.join(cc.path.gout,'models',new_id) lsprocess = subprocess.Popen('ls %s'%folder_old,shell=True,\ stdout=subprocess.PIPE) lsfile = lsprocess.communicate()[0].split('\n') lsfile = [os.path.split(line)[1] for line in lsfile if ((line[0:2] == 'ml' or line[0:4] == 'cool') \ and not entry.isMolecule()) \ or line[0:7] == 'coolfgr' \ or line[0:4] == 'para' \ or line[0:5] == 'input'] if not entry.isMolecule(): lsfile = [line for line in lsfile if not (line[0:2] == 'ml' \ and line.split('_')[-1].replace('.dat','') \ != entry.molecule.molecule)] lsfile = [line for line in lsfile if not (line[0:4] == 'cool' \ and (line.split('_')[-1].replace('.dat','') \ != entry.molecule.molecule \ or line.split('_')[-1].replace('.dat','')=='sampling'\ or line[0:7] == 'coolfgr'))] new_lsfile = [line.replace(old_id,new_id) for line in lsfile] DataIO.testFolderExistence(folder_new) lsprocess = subprocess.Popen('ls %s'%folder_new,shell=True,\ stdout=subprocess.PIPE) already_done = lsprocess.communicate()[0].split('\n') for ls,nls in zip(lsfile,new_lsfile): if not nls in already_done: subprocess.call(['ln -s %s %s'%(os.path.join(folder_old,ls),\ os.path.join(folder_new,nls))],\ shell=True)
def __init__(self,code,path,replace_db_entry=0,new_entries=[],\ single_session=0): """ Initializing an instance of ModelingSession. @param code: code for which the modelingsession is created @type code: string @param path: modeling output folder in the code's home folder @type path: string @keyword replace_db_entry: replace an entry in the database with a newly calculated model with a new model id (eg if some general data not included in the inputfiles is changed) (default: 0) @type replace_db_entry: bool @keyword new_entries: The new model_ids when replace_db_entry is 1 of other models in the grid. These are not replaced! (default: []) @type new_entries: list[str] @keyword single_session: If this is the only CC session. Speeds up db check. (default: 0) @type single_session: bool """ self.path = path self.code = code self.model_id = '' self.replace_db_entry = replace_db_entry self.new_entries = new_entries self.single_session = single_session if code == 'Chemistry': self.mutable = [] else: mutablefile = os.path.join(cc.path.aux,\ 'Mutable_Parameters_%s.dat'%code) self.mutable = [ line[0] for line in DataIO.readFile(mutablefile, delimiter=' ') if ' '.join(line) ] self.mutable = [line for line in self.mutable if line[0] != '#'] fout = os.path.join(getattr(cc.path, self.code.lower()), self.path) DataIO.testFolderExistence(os.path.join(fout, 'models'))
def copyOutput(self, entry, old_id, new_id): ''' Copy modelling output based on model_id. @param entry: the modeling object for which output is copied @type entry: Molecule() or Transition() @param old_id: The old model_id @type old_id: string @param new_id: the new_model_id @type new_id: string ''' folder_old = os.path.join(cc.path.gout, 'models', old_id) folder_new = os.path.join(cc.path.gout, 'models', new_id) lsprocess = subprocess.Popen('ls %s'%folder_old,shell=True,\ stdout=subprocess.PIPE) lsfile = lsprocess.communicate()[0].split('\n') lsfile = [os.path.split(line)[1] for line in lsfile if ((line[0:2] == 'ml' or line[0:4] == 'cool') \ and not entry.isMolecule()) \ or line[0:7] == 'coolfgr' \ or line[0:4] == 'para' \ or line[0:5] == 'input'] if not entry.isMolecule(): lsfile = [line for line in lsfile if not (line[0:2] == 'ml' \ and line.split('_')[-1].replace('.dat','') \ != entry.molecule.molecule)] lsfile = [line for line in lsfile if not (line[0:4] == 'cool' \ and (line.split('_')[-1].replace('.dat','') \ != entry.molecule.molecule \ or line.split('_')[-1].replace('.dat','')=='sampling'\ or line[0:7] == 'coolfgr'))] new_lsfile = [line.replace(old_id, new_id) for line in lsfile] DataIO.testFolderExistence(folder_new) lsprocess = subprocess.Popen('ls %s'%folder_new,shell=True,\ stdout=subprocess.PIPE) already_done = lsprocess.communicate()[0].split('\n') for ls, nls in zip(lsfile, new_lsfile): if not nls in already_done: subprocess.call(['ln -s %s %s'%(os.path.join(folder_old,ls),\ os.path.join(folder_new,nls))],\ shell=True)
def writeChi2(self,fn,sort=1,parameters=[]): ''' Write the Chi^2 values to a file. Lists the model id in the first column with the chi^2 value in the second. The chi^2 values can be requested to be sorted. Parameters from the Star() objects can be added as additional columns. Given parameters must be valid. @param fn: The output filename @type fn: str @keyword sort: Sort the star_grid according to the chi^2 values from lowest to highest. Requires calcChi2 to be ran first. (default: 1) @type sort: bool @keyword parameters: The additional model parameters to be added as columns in the file. (default: []) @type parameters: list(str) ''' #-- If no chi^2 was calculated, do nothing if not self.chi2.size: return #-- Write the header comments = ['# '] + ['ID','RedChi^2'] + parameters + ['\n'] DataIO.writeFile(filename=fn,input_lines=comments,delimiter='\t') #-- Define the columns cols = [[s['LAST_MCMAX_MODEL'] for s in self.getStarGrid(sort=sort)]] if sort: isort = np.argsort(self.chi2) cols.append(self.chi2[isort]) else: cols.append(self.chi2) #-- Add additional model parameters if requested for par in parameters: cols.append([s[par] for s in self.getStarGrid(sort=sort)]) #-- Append the columns to the file after the header DataIO.writeCols(filename=fn,cols=cols,mode='a')
def __init__(self,code,path,replace_db_entry=0,new_entries=[],\ single_session=0): """ Initializing an instance of ModelingSession. @param code: code for which the modelingsession is created @type code: string @param path: modeling output folder in the code's home folder @type path: string @keyword replace_db_entry: replace an entry in the database with a newly calculated model with a new model id (eg if some general data not included in the inputfiles is changed) (default: 0) @type replace_db_entry: bool @keyword new_entries: The new model_ids when replace_db_entry is 1 of other models in the grid. These are not replaced! (default: []) @type new_entries: list[str] @keyword single_session: If this is the only CC session. Speeds up db check. (default: 0) @type single_session: bool """ self.path = path self.code = code self.model_id = '' self.replace_db_entry = replace_db_entry self.new_entries = new_entries self.single_session = single_session if code == 'Chemistry': self.mutable = [] else: mutablefile = os.path.join(cc.path.aux,\ 'Mutable_Parameters_%s.dat'%code) self.mutable = [line[0] for line in DataIO.readFile(mutablefile,delimiter=' ') if ' '.join(line)] self.mutable = [line for line in self.mutable if line[0] != '#'] fout = os.path.join(getattr(cc.path,self.code.lower()),self.path) DataIO.testFolderExistence(os.path.join(fout,'models'))
def finalizeVic(self): ''' Finalize a modeling procedure on VIC: successful and failed results are printed to a file, including the transitions. This log file can be used as input for ComboCode again by putting LINE_LISTS=2. ''' for trans in self.trans_in_progress: filename = os.path.join(cc.path.gastronoom,\ self.path,'models',trans.getModelId(),\ trans.makeSphinxFilename(2)) if not os.path.isfile(filename): trans.setModelId('') if self.models.keys(): time_stamp = '%.4i-%.2i-%.2ih%.2i:%.2i:%.2i' \ %(gmtime()[0],gmtime()[1],gmtime()[2],\ gmtime()[3],gmtime()[4],gmtime()[5]) results = ['# Successfully calculated models:'] \ + [self.models[current_model] for current_model in self.models.keys() if current_model not in self.failed.keys()] \ + ['# Unsuccessfully calculated models (see 3 logfiles '+ \ 'for these models):'] \ + [self.models[current_model] for current_model in self.models.keys() if current_model in self.failed.keys()] DataIO.writeFile(os.path.join(cc.path.gastronoom,self.path,\ 'vic_results','log_' + time_stamp),\ results) for current_model,model_id in self.models.items(): model_results = ['# Successfully calculated transitions:'] + \ ['Sphinx %s: %s' %(trans.getModelId(),str(trans)) for trans in self.finished[current_model]] + \ ['# Unsuccessfully calculated transitions (see 2 other ' + \ 'logfiles for these transitions):'] + \ ['Sphinx %s: %s' %(trans.getModelId(),str(trans)) for trans in self.failed[current_model]] DataIO.writeFile(os.path.join(cc.path.gastronoom,self.path,\ 'vic_results','log_results%s_%i'\ %(time_stamp,current_model)),\ model_results) for this_id in self.sphinx_model_ids[current_model]: sphinx_files = os.path.join(cc.path.gastronoom,self.path,\ 'models',this_id,'sph*') subprocess.call(['chmod a+r %s'%sphinx_files],shell=True)
def finalizeVic(self): ''' Finalize a modeling procedure on VIC: successful and failed results are printed to a file, including the transitions. This log file can be used as input for ComboCode again by putting LINE_LISTS=2. ''' for trans in self.trans_in_progress: filename = os.path.join(cc.path.gastronoom,\ self.path,'models',trans.getModelId(),\ trans.makeSphinxFilename(2)) if not os.path.isfile(filename): trans.setModelId('') if self.models.keys(): time_stamp = '%.4i-%.2i-%.2ih%.2i:%.2i:%.2i' \ %(gmtime()[0],gmtime()[1],gmtime()[2],\ gmtime()[3],gmtime()[4],gmtime()[5]) results = ['# Successfully calculated models:'] \ + [self.models[current_model] for current_model in self.models.keys() if current_model not in self.failed.keys()] \ + ['# Unsuccessfully calculated models (see 3 logfiles '+ \ 'for these models):'] \ + [self.models[current_model] for current_model in self.models.keys() if current_model in self.failed.keys()] DataIO.writeFile(os.path.join(cc.path.gastronoom,self.path,\ 'vic_results','log_' + time_stamp),\ results) for current_model, model_id in self.models.items(): model_results = ['# Successfully calculated transitions:'] + \ ['Sphinx %s: %s' %(trans.getModelId(),str(trans)) for trans in self.finished[current_model]] + \ ['# Unsuccessfully calculated transitions (see 2 other ' + \ 'logfiles for these transitions):'] + \ ['Sphinx %s: %s' %(trans.getModelId(),str(trans)) for trans in self.failed[current_model]] DataIO.writeFile(os.path.join(cc.path.gastronoom,self.path,\ 'vic_results','log_results%s_%i'\ %(time_stamp,current_model)),\ model_results) for this_id in self.sphinx_model_ids[current_model]: sphinx_files = os.path.join(cc.path.gastronoom,self.path,\ 'models',this_id,'sph*') subprocess.call(['chmod a+r %s' % sphinx_files], shell=True)
def __init__(self): """ Initiating an instance of the KappaReader. """ self.lspecies = DataIO.getInputData(path=cc.path.usr, keyword="SPECIES_SHORT", filename="Dust.dat") self.lfilenames = DataIO.getInputData(path=cc.path.usr, keyword="PART_FILE", filename="Dust.dat") self.lspec_dens = DataIO.getInputData(path=cc.path.usr, keyword="SPEC_DENS", filename="Dust.dat") self.kappas = dict() self.qext_a = dict() self.waves = dict() self.fns = dict() self.spec_dens = dict()
def makeOpa(self, mode='ZERO', **args): """ Making custom .particle files. Every method called here will put the results in self.output_data. @keyword mode: type of extrapolation (ZERO,FUNCTION,HONY,ZHANG) (default: 'ZERO') @type mode: string @keyword args: Optional keywords required for the other methods of the class @type args: dict """ self.output_data = [] mode = mode.upper() if hasattr(self, 'do' + mode): getattr(self, 'do' + mode)(**args) self.output_data = [ ' '.join(str(line)) for line in self.output_data ] output_filename = '_'.join(['customOpacity',mode] + \ sorted(args.values()) + \ [self.filename]) if self.opacity_file: output_filename.replace('.particle', '.opacity') DataIO.writeFile(filename=os.path.join(cc.path.mopac,\ output_filename),\ input_lines=self.output_data) new_short = self.species + mode #- filename is already present: the new file should have the same #- parameters and the short name can be kept, #- nothing is changed in the Dust.dat file try: DataIO.getInputData(keyword='PART_FILE',filename='Dust.dat')\ .index(output_filename) #- filename is not present: do the normal procedure, ie check if #- short name is already present except ValueError: i = 0 while ' '.join(DataIO.getInputData(keyword='SPECIES_SHORT',\ filename='Dust.dat'))\ .find(new_short) != -1: i += 1 new_short = new_short + str(i) adding_line = [new_short] + \ [str(DataIO.getInputData(keyword=key,\ filename='Dust.dat',\ rindex=self.index)) for key in ['SPEC_DENS','T_DES','T_DESA','T_DESB']] adding_line.insert(2, output_filename) adding_line = '\t\t'.join(adding_line) DataIO.writeFile(os.path.join(cc.path.usr,'Dust.dat'),\ [adding_line+'\n'],mode='a') else: print 'Mode "' + mode + '" not available. Aborting.'
def parseProfile(self): """ Parse the sphinx file 2, which includes all line profile info. The output is stored in dict self.sph2. """ self.sph2 = dict() self.contents["sph2"] = self.sph2 self.sph2["nobeam"] = dict() self.sph2["beam"] = dict() self.sph2["nobeam_cont"] = dict() self.sph2["beam_cont"] = dict() data = self.getFile(self.filename.replace("*", "2")) data_col_1 = [d[0] for d in data] data_i = 6 data_j = DataIO.findString(data_i, data_col_1) self.sph2["nobeam"]["velocity"] = array([float(line[0]) for line in data[data_i:data_j]]) # -- Reverse this flux grid. Sphinx output files give the mirrored # flux grid for the associated velocity grid. self.sph2["nobeam"]["flux"] = array([DataIO.convertFloat(line[-1], nans=1) for line in data[data_i:data_j]]) self.sph2["nobeam"]["flux"] = self.sph2["nobeam"]["flux"][::-1] data_k = data_j + 4 data_l = DataIO.findString(data_k, data_col_1) self.sph2["beam"]["velocity"] = array([float(line[0]) for line in data[data_k:data_l]]) self.sph2["beam"]["flux"] = array([float(line[-1]) for line in data[data_k:data_l]]) self.sph2["beam"]["norm_flux"] = array([float(line[1]) for line in data[data_k:data_l]]) self.sph2["beam"]["tmb"] = array([float(line[2]) for line in data[data_k:data_l]]) self.setContinuum("nobeam", "flux") for lp in ["flux", "norm_flux", "tmb"]: self.setContinuum("beam", lp) if self.sph2["beam"]["velocity"][0] > self.sph2["beam"]["velocity"][-1]: self.sph2["beam"]["velocity"] = self.sph2["beam"]["velocity"][::-1] self.sph2["beam"]["flux"] = self.sph2["beam"]["flux"][::-1] self.sph2["beam"]["norm_flux"] = self.sph2["beam"]["norm_flux"][::-1] self.sph2["beam"]["tmb"] = self.sph2["beam"]["tmb"][::-1] if self.sph2["nobeam"]["velocity"][0] > self.sph2["nobeam"]["velocity"][-1]: self.sph2["nobeam"]["velocity"] = self.sph2["nobeam"]["velocity"][::-1] self.sph2["nobeam"]["flux"] = self.sph2["nobeam"]["flux"][::-1] if True in list(isnan(self.sph2["nobeam"]["flux"])): self.nans_present = True print "WARNING! There are NaN's in the intrinsic line profile " + "with model id %s:" % ( os.path.split(os.path.split(self.filename)[0])[1] ) print os.path.split(self.filename.replace("sph*", "sph2"))[1]
def readModelSpectrum(dpath, rt_spec=1, fn_spec='spectrum45.0.dat'): ''' Read the model output spectrum. If no ray-tracing is requested or no ray-tracing output is found, the average of the MC spectra is taken. @param dpath: folder that contains the MCMax outputfiles @type dpath: string @keyword rt_spec: If a ray-traced spectrum is requested (default: 1) @type rt_spec: bool @keyword fn_spec: The filename of the ray-traced spectrum. Typically this is the default name, but can be different depending on the ray-tracing angle (inclination) that is used. Not used if MCSpec are used. (default: spectrum45.0.dat) @type fn_spec: str @return: The wavelength and flux grids (micron,Jy) @rtype: (array,array) ''' rt_spec = int(rt_spec) try: if rt_spec: dfile = os.path.join(dpath, fn_spec) this_data = DataIO.readCols(dfile) #- if the lists are not empty if list(this_data[0]) and list(this_data[1]): w = this_data[0] f = this_data[1] else: raise IOError else: raise IOError except IOError: print 'No spectrum was found or ray-tracing is off for ' + \ 'this model. Taking average of theta-grid MCSpectra.' dfiles = glob(os.path.join(dpath, 'MCSpec*.dat')) w = DataIO.readCols(filename=dfiles[0])[0] mcy_list = [DataIO.readCols(f)[1] for f in dfiles] f = sum(mcy_list) / len(mcy_list) return (w, f)
def makeOpa(self,mode='ZERO',**args): """ Making custom .particle files. Every method called here will put the results in self.output_data. @keyword mode: type of extrapolation (ZERO,FUNCTION,HONY,ZHANG) (default: 'ZERO') @type mode: string @keyword args: Optional keywords required for the other methods of the class @type args: dict """ self.output_data = [] mode = mode.upper() if hasattr(self,'do' + mode): getattr(self,'do' + mode)(**args) self.output_data = [' '.join(str(line)) for line in self.output_data] output_filename = '_'.join(['customOpacity',mode] + \ sorted(args.values()) + \ [self.filename]) if self.opacity_file: output_filename.replace('.particle','.opacity') DataIO.writeFile(filename=os.path.join(cc.path.mopac,\ output_filename),\ input_lines=self.output_data) new_short = self.species + mode #- filename is already present: the new file should have the same #- parameters and the short name can be kept, #- nothing is changed in the Dust.dat file try: DataIO.getInputData(keyword='PART_FILE',filename='Dust.dat')\ .index(output_filename) #- filename is not present: do the normal procedure, ie check if #- short name is already present except ValueError: i=0 while ' '.join(DataIO.getInputData(keyword='SPECIES_SHORT',\ filename='Dust.dat'))\ .find(new_short) != -1: i+=1 new_short = new_short + str(i) adding_line = [new_short] + \ [str(DataIO.getInputData(keyword=key,\ filename='Dust.dat',\ rindex=self.index)) for key in ['SPEC_DENS','T_DES','T_DESA','T_DESB']] adding_line.insert(2,output_filename) adding_line = '\t\t'.join(adding_line) DataIO.writeFile(os.path.join(cc.path.usr,'Dust.dat'),\ [adding_line+'\n'],mode='a') else: print 'Mode "' + mode + '" not available. Aborting.'
def readCDMS(self): ''' Read data from CDMS line list catalogs for a specific molecule. ''' data = DataIO.readFile(os.path.join(cc.path.ll,\ self.molecule.molecule+'_CDMS.dat'),\ replace_spaces=0) print 'Reading data from CDMS database for %s.' % self.molecule.molecule uncertainties = [float(line[13:21]) for line in data] if min(uncertainties) < 0 and max(uncertainties) == 0: input_xmin = self.x_min input_xmax = self.x_max self.x_min = (self.c / (input_xmin * 10**6))**-1 self.x_max = (self.c / (input_xmax * 10**6))**-1 self.input_unit = 'cm-1' elif min(uncertainties) < 0 and max(uncertainties) > 0: raise ValueError('Uncertainties in CDMS input file for ' + \ 'molecule %s are ambiguous.'\ %self.molecule.molecule) data = self.parseStandardCatalog(data, 'CDMS') if self.input_unit == 'cm-1': data = sorted([[i == 0 \ and self.c*entry*10**-6 \ or entry \ for i,entry in enumerate(line)] for line in data]) self.line_list.extend(data)
def readFile(self, wildcard="*", *args, **kwargs): """ Read a filename and store its contents in the Reader object. The contents are stored in a dictionary, with the filename as key. Note that one filename can refer to multiple files with the use of a wildcard character. This character can be replaced upon calling this method. The contents can be returned with the method getFile. If the file is not found, an empty list is stored instead. Additional args/kwargs are passed to DataIO.readFile (such as delimiter and replace_spaces) @keyword wildcard: if a wildcard character is present in the filename, it can be replaced here. (default: '*') @type wildcard: string """ # -- Replace the wildcard if it is present. fn = self.fn.replace("*", wildcard) # -- Read the file self["contents"][fn] = DataIO.readFile(fn, *args, **kwargs)
def plotExtinction(self, star_grid=[], models=[], plot_default=1, cfg=''): """ Plotting wavelength dependent extinction efficiencies wrt grain size. This always depends on a star_grid or one created from a list of MCMax model ids. Plotted are the total efficiencies, including relative weights between the included dust species. This is the input for GASTRoNOoM! @keyword star_grid: List of Star() instances. If default, model ids have to be given. (default: []) @type star_grid: list[Star()] @keyword models: The model ids, only required if star_grid is [] (default: []) @type models: list[string] @keyword cfg: path to the Plotting2.plotCols config file. If default, the hard-coded default plotting options are used. (default: '') @type cfg: string """ print '***********************************' print '** Plotting Q_ext/a.' if not star_grid and not models: print 'Input is undefined. Aborting.' return elif not star_grid and models: star_grid = self.makeMCMaxStars(models=models) x = [] y = [] keys = [] for star in star_grid: try: inputfile = os.path.join(cc.path.gdata, star['TEMDUST_FILENAME']) opacities = DataIO.readCols(filename=inputfile) x.append(opacities[0]) y.append(opacities[1]) keys.append('$Q_\mathrm{ext}/a$ for MCMax %s'\ %star['LAST_MCMAX_MODEL'].replace('_','\_')) except IOError: pass filename = os.path.join(self.pplot,'gastronoom_opacities_%s'\ %star['LAST_MCMAX_MODEL']) title = 'GASTRoNOoM Extinction Efficiencies in %s'\ %(self.star_name_plots) filename = Plotting2.plotCols(x=x,y=y,cfg=cfg,filename=filename,\ xaxis='$\lambda$ ($\mu$m)',keytags=keys,\ yaxis='$Q_{ext}/a$ (cm$^{-1}$)',\ plot_title=title,key_location=(0.7,0.6),\ xlogscale=1,ylogscale=1,fontsize_key=20) print '** The extinction efficiency plot can be found at:' print filename print '***********************************'
def plotExtinction(self,star_grid=[],models=[],plot_default=1,cfg=''): """ Plotting wavelength dependent extinction efficiencies wrt grain size. This always depends on a star_grid or one created from a list of MCMax model ids. Plotted are the total efficiencies, including relative weights between the included dust species. This is the input for GASTRoNOoM! @keyword star_grid: List of Star() instances. If default, model ids have to be given. (default: []) @type star_grid: list[Star()] @keyword models: The model ids, only required if star_grid is [] (default: []) @type models: list[string] @keyword cfg: path to the Plotting2.plotCols config file. If default, the hard-coded default plotting options are used. (default: '') @type cfg: string """ print '***********************************' print '** Plotting Q_ext/a.' if not star_grid and not models: print 'Input is undefined. Aborting.' return elif not star_grid and models: star_grid = self.makeMCMaxStars(models=models) x = [] y = [] keys = [] for star in star_grid: try: inputfile = os.path.join(cc.path.gdata,star['TEMDUST_FILENAME']) opacities = DataIO.readCols(filename=inputfile) x.append(opacities[0]) y.append(opacities[1]) keys.append('$Q_\mathrm{ext}/a$ for MCMax %s'\ %star['LAST_MCMAX_MODEL'].replace('_','\_')) except IOError: pass filename = os.path.join(self.pplot,'gastronoom_opacities_%s'\ %star['LAST_MCMAX_MODEL']) title = 'GASTRoNOoM Extinction Efficiencies in %s'\ %(self.star_name_plots) filename = Plotting2.plotCols(x=x,y=y,cfg=cfg,filename=filename,\ xaxis='$\lambda$ ($\mu$m)',keytags=keys,\ yaxis='$Q_{ext}/a$ (cm$^{-1}$)',\ plot_title=title,key_location=(0.7,0.6),\ xlogscale=1,ylogscale=1,fontsize_key=20) print '** The extinction efficiency plot can be found at:' print filename print '***********************************'
def __readCDMS(self): ''' Read data from CDMS line list catalogs for a specific molecule. ''' data = DataIO.readFile(self.fn,\ replace_spaces=0) print 'Reading data from CDMS database for' print self.fn #-- If the uncertainties are negative, change the unit of min/max to # cm-1 uncertainties = [float(line[13:21]) for line in data] if min(uncertainties) < 0 and max(uncertainties) == 0: self.x_min = self.x_min.to(1. / u.cm, equivalencies=u.spectral()) self.x_max = self.x_max.to(1. / u.cm, equivalencies=u.spectral()) elif min(uncertainties) < 0 and max(uncertainties) > 0: raise ValueError('Uncertainties in CDMS input file for ' + \ 'file %s are ambiguous.'\ %self.fn) data = self.__parseCatalog(data) #-- If unit was changed, change the f values to MHz, the default unit rcm = u.Unit("1 / cm") if self.x_min.unit == rcm: data = sorted([[ (entry * rcm).to(u.MHz, equivalencies=u.spectral()) if not i else entry for i, entry in enumerate(line) ] for line in data]) self.line_list = data
def updateDustMCMaxDatabase(filename): ''' Update dust filenames in MCMax database with the new OPAC_PATH system. @param filename: The file and path to the MCMax database. @type filename: str ''' i = 0 new_filename = '%s_new'%(filename) db_old = Database(filename) db_new = Database(new_filename) path = os.path.join(cc.path.usr,'Dust_updatefile.dat') dustfiles = DataIO.readCols(path) pfn_old = list(dustfiles[0]) pfn_new = list(dustfiles[1]) for k,v in db_old.items(): dd = v['dust_species'] dd_new = dict() for pfn,cont in dd.items(): try: new_key = pfn_new[pfn_old.index(pfn)] dd_new[new_key] = cont except ValueError: dd_new[pfn] = cont v['dust_species'] = dd_new db_new[k] = v db_new.sync()
def readCDMS(self): ''' Read data from CDMS line list catalogs for a specific molecule. ''' data = DataIO.readFile(os.path.join(cc.path.ll,\ self.molecule.molecule+'_CDMS.dat'),\ replace_spaces=0) print 'Reading data from CDMS database for %s.'%self.molecule.molecule uncertainties = [float(line[13:21]) for line in data] if min(uncertainties) < 0 and max(uncertainties) == 0: input_xmin = self.x_min input_xmax = self.x_max self.x_min = (self.c/(input_xmin*10**6))**-1 self.x_max = (self.c/(input_xmax*10**6))**-1 self.input_unit = 'cm-1' elif min(uncertainties) < 0 and max(uncertainties) > 0: raise ValueError('Uncertainties in CDMS input file for ' + \ 'molecule %s are ambiguous.'\ %self.molecule.molecule) data = self.parseStandardCatalog(data,'CDMS') if self.input_unit == 'cm-1': data = sorted([[i == 0 \ and self.c*entry*10**-6 \ or entry \ for i,entry in enumerate(line)] for line in data]) self.line_list.extend(data)
def readData(self): ''' Read the raw SED data. ''' for dt, fn in zip(self.data_types, self.data_filenames): data = DataIO.readCols(fn, nans=0) #-- Currently, error bars only available for these types of data. if 'Photometric' in dt or 'MIDI' in dt or 'Sacha' in dt: #-- Sort MIDI data if 'MIDI' in dt: cdat = [ dd[(data[0] <= 13.) * (data[0] >= 8.)] for dd in data ] i = argsort(cdat[0]) self.data[(dt, fn)] = (cdat[0][i], cdat[1][i], cdat[2][i]) else: self.data[(dt, fn)] = (data[0], data[1], data[2]) if dt == 'Photometric_IvS': self.photbands = data[3] self.photwave = data[0] else: #-- Still sorting for PACS. Obsolete when separate bands for # PACS are available. i = argsort(data[0]) self.data[(dt, fn)] = (data[0][i], data[1][i])
def getSphinxConvolution(self,star,fn): ''' Read the sphinx convolution and return if it has already been done. Returns None if the convolution is not available. @param star: The Star() object @type star: Star() @param fn: The filename of the dataset (band) for which the convolution is to be returned. @type fn: str @return: The sphinx convolution result. (wavelength, flux) @rtype: array ''' this_id = star['LAST_PACS_MODEL'] if not this_id: return ([],[]) fn = os.path.split(fn)[1] sphinx_file = os.path.join(cc.path.gout,'stars',self.star_name,\ 'PACS_results',this_id,'%s_%s'%('sphinx',fn)) return DataIO.readCols(sphinx_file)
def updateDustMCMaxDatabase(filename): """ Update dust filenames in MCMax database with the new OPAC_PATH system. @param filename: The file and path to the MCMax database. @type filename: str """ i = 0 new_filename = "%s_new" % (filename) db_old = Database(filename) db_new = Database(new_filename) path = os.path.join(cc.path.usr, "Dust_updatefile.dat") dustfiles = DataIO.readCols(path) pfn_old = list(dustfiles[0]) pfn_new = list(dustfiles[1]) for k, v in db_old.items(): dd = v["dust_species"] dd_new = dict() for pfn, cont in dd.items(): try: new_key = pfn_new[pfn_old.index(pfn)] dd_new[new_key] = cont except ValueError: dd_new[pfn] = cont v["dust_species"] = dd_new db_new[k] = v db_new.sync()
def readDustInfo(self): """ Read all column densities, min/max temperatures and min/max radii for the species involved in the MCMax model. Note that the self.coldens dictionary does not give real column densities! This dict merely gives column densities in a prescribed shell with given min and max radius, in order to compare with the H2 col density. """ dens = self.star.getDustDensity() temp = self.star.getDustTemperature() compf = os.path.join( cc.path.mcmax, self.star.path_mcmax, "models", self.star["LAST_MCMAX_MODEL"], "composition.dat" ) comp = DataIO.readCols(compf) self.rad = comp.pop(0) * self.au self.r_outer = self.rad[-1] for species in self.star.getDustList(): # - Save the actual density profile for this dust species, as well # - as calculating the full column density of a dust species. self.dustfractions[species] = comp.pop(0) self.compd[species] = self.dustfractions[species] * dens self.fullcoldens[species] = trapz(x=self.rad, y=self.compd[species]) # - Determine the column density from 90% of the dust species formed # - onward, based on the mass fractions! # - Not before, because the comparison with H2 must be made, # - and this will skew the result if not solely looking at where the # - dust has (almost) all been formed. # - We also save min amd max radii, for use with the H2 calculation a_species = self.star["A_%s" % species] maxdens = max(self.compd[species]) mindens = maxdens * 10 ** (-10) radsel = self.rad[(self.dustfractions[species] > 0.9 * a_species) * (self.compd[species] > mindens)] denssel = self.compd[species][ (self.dustfractions[species] > 0.9 * a_species) * (self.compd[species] > mindens) ] self.coldens[species] = trapz(x=radsel, y=denssel) if radsel.size: self.r_min_cd[species] = radsel[0] self.r_max_cd[species] = radsel[-1] else: print "Threshold dust mass fraction not reached for %s." % species self.r_min_cd[species] = 0 self.r_max_cd[species] = 0 # - Determine the actual destruction radius and temperature. # - Taken where the density reaches 1% of the maximum density # - (not mass fraction). self.r_des[species] = self.rad[self.compd[species] > (maxdens * 0.01)][0] self.t_des[species] = temp[self.compd[species] > (maxdens * 0.01)][0] # - e-10 as limit for minimum is ok, because if shell is 100000 R* # - the mass conservation dictates ~ (10^5)^2 = 10^10 (r^2 law) # - decrease in density. Shells this big dont occur anyway. self.r_max[species] = self.rad[self.compd[species] > mindens][-1] self.t_min[species] = temp[self.compd[species] > mindens][-1]
def __readCDMS(self): ''' Read data from CDMS line list catalogs for a specific molecule. ''' data = DataIO.readFile(self.fn,\ replace_spaces=0) print 'Reading data from CDMS database for' print self.fn #-- If the uncertainties are negative, change the unit of min/max to # cm-1 uncertainties = [float(line[13:21]) for line in data] if min(uncertainties) < 0 and max(uncertainties) == 0: self.x_min = self.x_min.to(1./u.cm,equivalencies=u.spectral()) self.x_max = self.x_max.to(1./u.cm,equivalencies=u.spectral()) elif min(uncertainties) < 0 and max(uncertainties) > 0: raise ValueError('Uncertainties in CDMS input file for ' + \ 'file %s are ambiguous.'\ %self.fn) data = self.__parseCatalog(data) #-- If unit was changed, change the f values to MHz, the default unit rcm = u.Unit("1 / cm") if self.x_min.unit == rcm: data = sorted([[(entry*rcm).to(u.MHz,equivalencies=u.spectral()) if not i else entry for i,entry in enumerate(line)] for line in data]) self.line_list = data
def readFile(self, wildcard='*', *args, **kwargs): ''' Read a filename and store its contents in the Reader object. The contents are stored in a dictionary, with the filename as key. Note that one filename can refer to multiple files with the use of a wildcard character. This character can be replaced upon calling this method. The contents can be returned with the method getFile. If the file is not found, an empty list is stored instead. Additional args/kwargs are passed to DataIO.readFile (such as delimiter and replace_spaces) @keyword wildcard: if a wildcard character is present in the filename, it can be replaced here. (default: '*') @type wildcard: string ''' #-- Replace the wildcard if it is present. fn = self.fn.replace('*', wildcard) #-- Read the file self['contents'][fn] = DataIO.readFile(fn, *args, **kwargs)
def readData(self): ''' Read in data, taking special care of NaNs. Four colums are taken as input! wave - contsub - original - continuum Two columns still works, but may result in errors in other places in the code. Data are always read in Jy versus micron, for both SPIRE and PACS. ''' self.data_wave_list = [] self.data_flux_list = [] self.data_original_list = [] self.data_continuum_list = [] for filename in self.data_filenames: data = DataIO.readCols(filename=filename,nans=1) self.data_wave_list.append(data[0]) self.data_flux_list.append(data[1]) if len(data) == 2: continue self.data_original_list.append(data[2]) self.data_continuum_list.append(data[3])
def readData(self): ''' Read in data, taking special care of NaNs. Four colums are taken as input! wave - contsub - original - continuum Two columns still works, but may result in errors in other places in the code. Data are always read in Jy versus micron, for both SPIRE and PACS. ''' self.data_wave_list = [] self.data_flux_list = [] self.data_original_list = [] self.data_continuum_list = [] for filename in self.data_filenames: data = DataIO.readCols(filename=filename, nans=1) self.data_wave_list.append(data[0]) self.data_flux_list.append(data[1]) if len(data) == 2: continue self.data_original_list.append(data[2]) self.data_continuum_list.append(data[3])
def readData(self): ''' Read the raw SED data. ''' for dt,fn in zip(self.data_types,self.data_filenames): data = DataIO.readCols(fn,nans=0) #-- Currently, error bars only available for these types of data. if 'Photometric' in dt or 'MIDI' in dt or 'Sacha' in dt: #-- Sort MIDI data if 'MIDI' in dt: cdat = [dd[(data[0]<=13.)*(data[0]>=8.)] for dd in data] i = argsort(cdat[0]) self.data[(dt,fn)] = (cdat[0][i],cdat[1][i],cdat[2][i]) else: self.data[(dt,fn)] = (data[0],data[1],data[2]) if dt == 'Photometric_IvS': self.photbands = data[3] self.photwave = data[0] else: #-- Still sorting for PACS. Obsolete when separate bands for # PACS are available. i = argsort(data[0]) self.data[(dt,fn)] = (data[0][i],data[1][i])
def setOutputFolders(self): ''' Set the output folders. If the folders do not already exist, they are created. The locations are saved in cc.path for later use, but this is generally only done inside a ComboCode session. Each module sets these themselves ''' cc.path.gout = os.path.join(cc.path.gastronoom,self.path_gastronoom) cc.path.mout = os.path.join(cc.path.mcmax,self.path_mcmax) DataIO.testFolderExistence(cc.path.gout) DataIO.testFolderExistence(cc.path.mout)
def __init__(self,path_mcmax='runTest',replace_db_entry=0,db=None,\ new_entries=[]): """ Initializing an instance of ModelingSession. @keyword db: the MCMax database (default: None) @type db: Database() @keyword replace_db_entry: replace an entry in the MCMax database with a newly calculated model with a new model id (for instance if some general data not included in the inputfiles is changed) (default: 0) @type replace_db_entry: bool @keyword path_mcmax: modeling folder in MCMax home (default: 'runTest') @type path_mcmax: string @keyword new_entries: The new model_ids when replace_db_entry is 1 of other models in the grid. These are not replaced! (default: []) @type new_entries: list[str] """ super(MCMax, self).__init__(code='MCMax',path=path_mcmax,\ replace_db_entry=replace_db_entry,\ new_entries=new_entries) #-- Convenience path cc.path.mout = os.path.join(cc.path.mcmax,self.path) DataIO.testFolderExistence(os.path.join(cc.path.mout,\ 'data_for_gastronoom')) self.db = db self.mcmax_done = False #- Read standard input file with all parameters that should be included #- as well as some dust specific information inputfilename = os.path.join(cc.path.aux,'inputMCMax.dat') self.standard_inputfile = DataIO.readDict(inputfilename,\ convert_floats=1,\ convert_ints=1,\ comment_chars=['#','*'])
def checkVlsr(self): """ Check if the Vlsr was set correctly. If not, it is taken from Star.dat. It is assumed the vlsr in the fits file is correct within 25%. If it is not, the value from Star.dat is used. """ try: star_index = DataIO.getInputData(keyword='STAR_NAME')\ .index(self.star_name) vlsr = DataIO.getInputData(keyword='V_LSR', rindex=star_index) except KeyError, ValueError: print 'Star not found in Star.dat for %s. '%(self.filename) + \ 'Add star to Star.dat!' raise IOError()
def coolingDbRetrieval(path_gastronoom,r_outer=None): ''' Reconstruct a cooling database based on the mline database and the GASTRoNOoM inputfiles. Only works if the water MOLECULE convenience keywords, the MOLECULE R_OUTER and/or the MOLECULE ENHANCE_ABUNDANCE_FACTOR keywords were not adapted! @param path_gastronoom: The path_gastronoom to the output folder @type path_gastronoom: string @keyword r_outer: The outer radius used for the cooling model, regardless of the outer_r_mode parameter. (default: None) @type r_outer: float ''' #-- Convenience path cc.path.gout = os.path.join(cc.path.gastronoom,path_gastronoom) coolkeys_path = os.path.join(cc.path.aux,'Input_Keywords_Cooling.dat') coolkeys = DataIO.readCols(coolkeys_path,make_float=0,make_array=0)[0] extra_keys = ['ENHANCE_ABUNDANCE_FACTOR','MOLECULE_TABLE','ISOTOPE_TABLE',\ 'ABUNDANCE_FILENAME','NUMBER_INPUT_ABUNDANCE_VALUES',\ 'KEYWORD_TABLE'] coolkeys = [k for k in coolkeys if k not in extra_keys] cool_db_path = os.path.join(cc.path.gout,'GASTRoNOoM_cooling_models.db') ml_db_path = os.path.join(cc.path.gout,'GASTRoNOoM_mline_models.db') subprocess.call(['mv %s %s_backupCoolDbRetrieval'\ %(cool_db_path,cool_db_path)],shell=True) cool_db = Database(db_path=cool_db_path) ml_db = Database(db_path=ml_db_path) for ml_id in ml_db.keys(): file_path = os.path.join(cc.path.gout,'models',\ 'gastronoom_%s.inp'%ml_id) input_dict = DataIO.readDict(file_path) input_dict = dict([(k,v) for k,v in input_dict.items() if k in coolkeys]) cool_db[ml_id] = input_dict if r_outer <> None: cool_db[ml_id]['R_OUTER'] = r_outer cool_db.sync()
def setSpecies(self, species): """ Change the species of the current CustomOpacity instance. @param species: the species short name @type species: string """ self.species = species self.index = DataIO.getInputData(keyword='SPECIES_SHORT',\ filename='Dust.dat')\ .index(self.species) self.filename = DataIO.getInputData(keyword='PART_FILE',\ filename='Dust.dat',\ rindex=self.index) fn = os.path.join(cc.path.mopac, self.filename) self.input_data = DataIO.readFile(filename=fn, delimiter=' ')