def _parse_yanny(self): """ Parse the yanny file (provided by :attr:`file`) for the bandhead database. Returns: :obj:`list`: The list of :class:`mangadap.par.parset.ParSet` instances for each line of the database. """ # Read the yanny file par = yanny(filename=self.file, raw=True) if len(par['DAPABI']['index']) == 0: raise ValueError('Could not find DAPABI entries in {0}!'.format( self.file)) # Check if any of the bands are dummy bands and warn the user self.dummy = numpy.any(numpy.array(par['DAPABI']['blueside']) < 0, axis=1) self.dummy |= numpy.any(numpy.array(par['DAPABI']['redside']) < 0, axis=1) self.dummy |= numpy.any(numpy.array(par['DAPABI']['primary']) < 0, axis=1) if numpy.sum(self.dummy) > 0: warnings.warn( 'Bands with negative wavelengths are used to insert dummy values.' ' Ignoring input bands with indices: {0}'.format( numpy.array(par['DAPABI']['index'])[self.dummy])) # Setup the array of absorption-line index database parameters self.size = len(par['DAPABI']['index']) parlist = [] for i in range(self.size): invac = par['DAPABI']['waveref'][i] == 'vac' comp = par['DAPABI']['component'][i] != 0 parlist += [ BandPassFilterPar(index=par['DAPABI']['index'][i], name=par['DAPABI']['name'][i], blueside=par['DAPABI']['blueside'][i] if invac \ else airtovac(numpy.array(par['DAPABI']['blueside'][i])), redside=par['DAPABI']['redside'][i] if invac \ else airtovac(numpy.array(par['DAPABI']['redside'][i])), primary=par['DAPABI']['primary'][i] if invac \ else airtovac(numpy.array(par['DAPABI']['primary'][i])), units=par['DAPABI']['units'][i], integrand='flambda', component=comp) ] return parlist
def _parse_yanny(self): """ Parse the yanny file (provided by :attr:`file`) for the emission-line database. Returns: :obj:`list`: The list of :class:`~mangadap.par.parset.ParSet` instances for each line of the database. """ # Read the yanny file par = yanny(filename=self.file, raw=True) if len(par['DAPEML']['index']) == 0: raise ValueError('Could not find DAPEML entries in {0}!'.format( self.file)) # Setup the array of emission line database parameters self.size = len(par['DAPEML']['index']) parlist = [] for i in range(self.size): invac = par['DAPEML']['waveref'][i] == 'vac' tie_index = -1 if par['DAPEML']['tie'][i][0] == 'None' \ else int(par['DAPEML']['tie'][i][0]) tie_par = [ None if t == 'None' else t for t in par['DAPEML']['tie'][i][1:] ] parlist += [EmissionLinePar(index=par['DAPEML']['index'][i], name=par['DAPEML']['name'][i], restwave=par['DAPEML']['restwave'][i] if invac else airtovac(par['DAPEML']['restwave'][i]), action=par['DAPEML']['action'][i], tie_index=tie_index, tie_par=tie_par, blueside=par['DAPEML']['blueside'][i] if invac else \ airtovac(numpy.array(par['DAPEML']['blueside'][i])), redside=par['DAPEML']['redside'][i] if invac else \ airtovac(numpy.array(par['DAPEML']['redside'][i])))] return parlist
def _parse_yanny(self): """ Parse the yanny file (provided by :attr:`file`) for the artifact database. Returns: :obj:`list`: The list of :class:`mangadap.par.parset.ParSet` instances for each line of the database. """ # Read the yanny file par = yanny(filename=self.file, raw=True) if len(par['DAPART']['index']) == 0: raise ValueError('Could not find DAPART entries in {0}!'.format(self.file)) # Setup the array of emission line database parameters self.size = len(par['DAPART']['index']) parlist = [] for i in range(self.size): invac = par['DAPART']['waveref'][i] == 'vac' parlist += [ ArtifactPar(index=par['DAPART']['index'][i], name=par['DAPART']['name'][i], waverange=numpy.asarray(par['DAPART']['waverange'][i]) \ if invac else airtovac(par['DAPEML']['waverange'][i]) )] return parlist
def available_reduction_assessments(): r""" Return the list of available reduction assessment methods. To get a list of default methods provided by the DAP do:: from mangadap.proc.reductionassessments import available_reduction_assessments rdx_methods = available_reduction_assessments() print(rdx_methods) Each element in the `rdx_methods` list is an instance of :class:`ReductionAssessmentDef`, which is printed using the :class:`ParSet` base class representation function. New methods can be included by adding ini config files to `$MANGADAP_DIR/mangadap/config/reduction_assessments`. See an example file at `$MANGADAP_DIR/example_ini/example_reduction_assessment_config.ini`. Returns: :obj:`list`: A list of :class:`ReductionAssessmentDef` objects, each defining a separate assessment method. Raises: IOError: Raised if no reduction assessment configuration files could be found. KeyError: Raised if the assessment method keywords are not all unique. ValueError: Raised if a wavelength range or a response function are not defined by any of the methods. """ # Check the configuration files exist search_dir = os.path.join(defaults.dap_config_root(), 'reduction_assessments') ini_files = glob.glob(os.path.join(search_dir, '*.ini')) if len(ini_files) == 0: raise IOError('Could not find any configuration files in {0} !'.format( search_dir)) # Build the list of library definitions assessment_methods = [] for f in ini_files: # Read the config file cnfg = DefaultConfig(f=f, interpolate=True) # Ensure it has the necessary elements to define the template # library def_range, def_response = validate_reduction_assessment_config(cnfg) in_vacuum = cnfg.getbool('in_vacuum', default=False) if def_range: waverange = cnfg.getlist('wave_limits', evaluate=True) if not in_vacuum: waverange = airtovac(waverange) assessment_methods += [ ReductionAssessmentDef(key=cnfg['key'], waverange=waverange, covariance=cnfg.getbool('covariance', default=False), minimum_frac=cnfg.getfloat( 'minimum_frac', default=0.8)) ] elif def_response: response = numpy.genfromtxt(cnfg['response_function_file'])[:, :2] if not in_vacuum: response[:, 0] = airtovac(response[:, 0]) assessment_methods += [ ReductionAssessmentDef(key=cnfg['key'], response_func=response, covariance=cnfg.getbool('covariance', default=False), minimum_frac=cnfg.getfloat( 'minimum_frac', default=0.8)) ] else: raise ValueError( 'Must define a wavelength range or a response function.') # Check the keywords of the libraries are all unique if len(numpy.unique( numpy.array([ method['key'] for method in assessment_methods ]) )) \ != len(assessment_methods): raise KeyError( 'Reduction assessment method keywords are not all unique!') # Return the default list of assessment methods return assessment_methods
for filename_redshifts in filelist_redshifts: print(filename_redshifts) filename_spec1D = filename_redshifts.replace( '_redshift.fits', '.fits') print(filename_spec1D) print(filename_redshifts) redshifts = Table.read(filename_redshifts) spec = Table.read(filename_spec1D) print(redshifts) print(spec) redshifts['z'] = redshifts['z'] + dv_vacair / c_kms * ( 1 + redshifts['z']) redshifts.write(filename_redshifts.replace('_spec1D', '_spec1D_vac'), overwrite=False) spec['wave'] = airtovac(spec['wave'] * u.Angstrom) spec.write(filename_spec1D.replace('_spec1D', '_spec1D_vac'), overwrite=False) print('') os.rename('{}_spec1D'.format(args.m), '{}_spec1D_air'.format(args.m)) os.rename('{}_spec1D_vac'.format(args.m), '{}_spec1D'.format(args.m)) else: print('Vacuum directory already exists, will not overwrite')