def onto_raster(self, output_raster, resample_errors=True, resample_mask=True): """ Resample this spectrum onto a user-specified wavelength raster. :param output_raster: The raster we should resample this Spectrum onto. :param resample_errors: Should we bother resampling the errors as well as the data itself? If not, the errors will be meaningless after the resampling operation, but the function will return 30% quicker. :param resample_mask: Should we bother resampling the spectrum's mask as the data itself? If not, the mask will be cleared, but the function will return 30% quicker. :return: New Spectrum object. """ new_values = self._resample(x_new=output_raster, x_in=self._input.wavelengths, y_in=self._input.values) if resample_errors: new_value_errors = self._resample(x_new=output_raster, x_in=self._input.wavelengths, y_in=self._input.value_errors) else: new_value_errors = np.zeros_like(new_values) output = Spectrum(wavelengths=output_raster, values=new_values, value_errors=new_value_errors, metadata=self._input.metadata.copy()) if resample_mask and self._input.mask_set: output.mask = self._resample(x_new=output_raster, x_in=self._input.wavelengths, y_in=self._input.mask) > 0.5 output.mask_set = not np.all(output.mask) return output
def gaussian_convolve(self, sigma): """ Convolve this spectrum with a Gaussian PSF. :param sigma: Standard deviation of point spread function in pixels. :return: New Spectrum object. """ new_values = gaussian_filter1d(input=self._input.values, sigma=sigma) output = Spectrum(wavelengths=self._input.wavelengths, values=new_values, value_errors=self._input.value_errors, metadata=self._input.metadata.copy()) if self._input.mask_set: output.copy_mask_from(self._input) return output
else: header_dictionary.update(abundance_data[star_name]) # 1. Extract continuum-normalised spectrum from FITS file data = f[1].data wavelengths = data['Arg'] flux = data['Fun'] flux_errors = data['Var'] # Create a unique ID for this PEPSI spectrum unique_id = hashlib.md5(os.urandom(32)).hexdigest()[:16] header_dictionary["uid"] = unique_id pepsi_spectrum = Spectrum(wavelengths=wavelengths, values=flux, value_errors=flux_errors, metadata=header_dictionary) output_libraries['original'].insert( spectra=pepsi_spectrum, filenames=star_name, metadata_list={'continuum_normalised': 1}) output_libraries['original'].insert( spectra=pepsi_spectrum, filenames=star_name, metadata_list={'continuum_normalised': 0}) # 2. Correct radial velocity # pepsi_spectrum_rest_frame = pepsi_spectrum.correct_radial_velocity(radial_velocity)
] grid_axis_index_combinations = itertools.product(*grid_axis_indices) # Turn Brani's set of templates into a spectrum library with path specified above library_path = os_path.join(workspace, target_library_name) library = SpectrumLibrarySqlite(path=library_path, create=True) # Brani's template spectra do not have any error vectors associated with them, so add an array of zeros errors_dummy = np.zeros_like(wavelength_raster) # Import each template spectrum in turn for i, axis_indices in enumerate(grid_axis_index_combinations): filename = "template{:06d}".format(i) metadata = {"Starname": filename} item = flux_templates for axis_counter, index in enumerate(axis_indices): metadata_key = grid_axes[axis_counter][0] metadata_value = grid_axis_values[axis_counter][index] metadata[metadata_key] = metadata_value metadata[metadata_key + "_index"] = index item = item[index] # Turn data into a Spectrum object spectrum = Spectrum(wavelengths=wavelength_raster, values=item, value_errors=errors_dummy, metadata=metadata) # Import spectrum into our SpectrumLibrary library.insert(spectra=spectrum, filenames=filename)
def do_synthesis(self): # Iterate over the spectra we're supposed to be synthesizing with open(self.logfile, "w") as result_log: for star in self.star_list: star_name = star['name'] unique_id = hashlib.md5(os.urandom(32)).hexdigest()[:16] metadata = { "Starname": str(star_name), "uid": str(unique_id), "Teff": float(star['Teff']), "[Fe/H]": float(star['[Fe/H]']), "logg": float(star['logg']), "microturbulence": float(star["microturbulence"]) } # User can specify that we should only do every nth spectrum, if we're running in parallel self.counter_output += 1 if (self.args.limit > 0) and (self.counter_output > self.args.limit): break if (self.counter_output - self.args.skip) % self.args.every != 0: continue # Pass list of the abundances of individual elements to TurboSpectrum free_abundances = dict(star['free_abundances']) for element, abundance in list(free_abundances.items()): metadata["[{}/H]".format(element)] = float(abundance) # Propagate all ionisation states into metadata metadata.update(star['extra_metadata']) # Configure Turbospectrum with the stellar parameters of the next star self.synthesizer.configure( t_eff=float(star['Teff']), metallicity=float(star['[Fe/H]']), log_g=float(star['logg']), stellar_mass=1 if "stellar_mass" not in star else star["stellar_mass"], turbulent_velocity=1 if "microturbulence" not in star else star["microturbulence"], free_abundances=free_abundances ) # Make spectrum time_start = time.time() turbospectrum_out = self.synthesizer.synthesise() time_end = time.time() # Log synthesizer status logfile_this = os.path.join(self.args.log_to, "{}.log".format(star_name)) open(logfile_this, "w").write(json.dumps(turbospectrum_out)) # Check for errors errors = turbospectrum_out['errors'] if errors: result_log.write("[{}] {:6.0f} sec {}: {}\n".format(time.asctime(), time_end - time_start, star_name, errors)) logging.warn("Star <{}> could not be synthesised. Errors were: {}". format(star_name, errors)) result_log.flush() continue else: logging.info("Synthesis completed without error.") # Fetch filename of the spectrum we just generated filepath = os_path.join(turbospectrum_out["output_file"]) # Insert spectrum into SpectrumLibrary try: filename = "spectrum_{:08d}".format(self.counter_output) # First import continuum-normalised spectrum, which is in columns 1 and 2 metadata['continuum_normalised'] = 1 spectrum = Spectrum.from_file(filename=filepath, metadata=metadata, columns=(0, 1), binary=False) self.library.insert(spectra=spectrum, filenames=filename) # Then import version with continuum, which is in columns 1 and 3 metadata['continuum_normalised'] = 0 spectrum = Spectrum.from_file(filename=filepath, metadata=metadata, columns=(0, 2), binary=False) self.library.insert(spectra=spectrum, filenames=filename) except (ValueError, IndexError): result_log.write("[{}] {:6.0f} sec {}: {}\n".format(time.asctime(), time_end - time_start, star_name, "Could not read bsyn output")) result_log.flush() continue # Update log file to show our progress result_log.write("[{}] {:6.0f} sec {}: {}\n".format(time.asctime(), time_end - time_start, star_name, "OK")) result_log.flush()
parser.set_defaults(create=True) args = parser.parse_args() # Set path to workspace where we create libraries of spectra our_path = os_path.split(os_path.abspath(__file__))[0] workspace = args.workspace if args.workspace else os_path.join(our_path, "../../../workspace") os.system("mkdir -p {}".format(workspace)) # Create new spectrum library library_name = re.sub("/", "_", args.library) library_path = os_path.join(workspace, library_name) library = SpectrumLibrarySqlite(path=library_path, create=args.create) # Open fits spectrum f = fits.open(args.filename) data = f[1].data wavelengths = data['LAMBDA'] fluxes = data['FLUX'] # Create 4GP spectrum object spectrum = Spectrum(wavelengths=wavelengths, values=fluxes, value_errors=np.zeros_like(wavelengths), metadata={ "imported_from": args.filename }) # Insert spectrum object into spectrum library library.insert(spectra=spectrum, filenames=os_path.split(args.filename)[1])
library_path = os_path.join(workspace, out_library) library = SpectrumLibrarySqlite(path=library_path, create=True) # Import each star in turn for star in training_set: filepath = os_path.join(test_spectra_path, training_set_dir, "{}_SNR250.txt".format(star["Starname"])) filename = os_path.split(filepath)[1] metadata = astropy_row_to_dict(star) metadata["continuum_normalised"] = 1 metadata["SNR"] = 250 # Read star from text file and import it into our SpectrumLibrary spectrum = Spectrum.from_file(filename=filepath, metadata=metadata, binary=False) library.insert(spectra=spectrum, filenames=filename) # Import high- and low-resolution test sets into spectrum libraries for test_set_dir, out_library in (("testset/HRS", "hawkins_apokasc_test_set_hrs"), ("testset/LRS", "hawkins_apokasc_test_set_lrs")): # Turn training set into a spectrum library with path specified above library_path = os_path.join(workspace, out_library) library = SpectrumLibrarySqlite(path=library_path, create=True) # Import each star in turn test_set = glob.glob(
# Recreate a Cannon instance, using the saved state model = CannonInstance_2018_01_09(training_set=training_spectra, load_from_file=args.cannon + ".cannon", label_names=cannon_output["labels"], censors=censoring_masks, threads=None) cannon = model._model # Create new spectrum library for output library_name = re.sub("/", "_", args.output_library) library_path = os_path.join(workspace, library_name) output_library = SpectrumLibrarySqlite(path=library_path, create=args.create) # Query Cannon's internal model of each test spectrum in turn for test_item in cannon_output['spectra']: label_values = test_item['cannon_output'].copy() label_vector = np.asarray( [label_values[key] for key in cannon_output["labels"]]) cannon_predicted_spectrum = cannon.predict(label_vector)[0] spectrum_object = Spectrum(wavelengths=cannon.dispersion[overall_mask], values=cannon_predicted_spectrum[overall_mask], value_errors=cannon.s2[overall_mask]) output_library.insert(spectra=spectrum_object, filenames=test_item['Starname'], metadata_list=dict_merge( test_item['spectrum_metadata'], test_item['cannon_output']))
def process_spectra(self, spectra_list): """ Add Gaussian noise to a list of 4GP Spectrum objects. :param spectra_list: A list of the spectra we should pass through 4FS. Each entry in the list should be a list of tuple with two entries: (input_spectrum, input_spectrum_continuum_normalised). These reflect the contents of the third and second columns of Turbospectrum's ASCII output respectively. :type spectra_list: (list, tuple) of (list, tuple) of Spectrum objects """ output = [ ] # output[ spectrum_number ][ snr ] = [ full_spectrum, continuum normalised ] for spectrum in spectra_list: # Convolve and resample onto new wavelength raster. # Each wavelength arm is separately convolved by mean pixel spacing. resampled_spectrum = [ ] # resampled_spectrum[ 0=full spectrum ; 1=continuum normalised ][ wavelength_arm ] for index, item in enumerate(spectrum): resampled_spectrum.append([]) for (raster, pixel_spacing) in self.wavelength_arms: convolver = SpectrumConvolver(item) convolved = convolver.gaussian_convolve(pixel_spacing) resampler = SpectrumResampler(convolved) resampled = resampler.onto_raster(raster) resampled_spectrum[-1].append(resampled.values) # Calculate continuum spectrum by dividing the flux normalised spectrum by continuum normalised spectrum continuum_per_arm = [] for index_arm in range(len(self.wavelength_arms)): continuum_per_arm.append(resampled_spectrum[0][index_arm] / resampled_spectrum[1][index_arm]) continuum = np.concatenate(continuum_per_arm) # Measure the integrated signal within the range of each SNR definition mean_signal_per_pixel = {} for snr_definition_name, wavelength_min, wavelength_max in self.snr_definitions: indices = (wavelength_min <= self.wavelength_raster) * ( self.wavelength_raster <= wavelength_max) pixel_count = np.sum(indices) pixel_sum = np.sum(continuum[indices]) mean_signal_per_pixel[ snr_definition_name] = pixel_sum / pixel_count # Add noise to spectra at each SNR in turn output_item = {} output.append(output_item) for snr_value in self.snr_list: output_values = np.zeros(0) output_value_errors = np.zeros(0) output_values_cn = np.zeros(0) output_value_errors_cn = np.zeros(0) # Add noise to each wavelength arm individually for index_arm, (raster, pixel_spacing) in enumerate( self.wavelength_arms): snr_definition = self.use_snr_definitions[index_arm] signal_level = mean_signal_per_pixel[snr_definition] noise_level = signal_level / snr_value arm_length = len(raster) # Synthesize some Gaussian noise noise = np.random.normal(loc=0, scale=noise_level, size=arm_length) # Add noise into flux-normalised spectrum noised_signal = resampled_spectrum[0][index_arm] + noise noised_signal_errors = np.ones_like( noised_signal) * noise_level # Compute the new continuum-normalised spectrum by dividing by the pure continuum we computed noised_signal_cn = noised_signal / continuum_per_arm[ index_arm] noised_signal_errors_cn = noised_signal_errors / continuum_per_arm[ index_arm] # Concatenate the various wavelength arms together output_values = np.append(output_values, noised_signal) output_value_errors = np.append(output_value_errors, noised_signal_errors) output_values_cn = np.append(output_values_cn, noised_signal_cn) output_value_errors_cn = np.append( output_value_errors_cn, noised_signal_errors_cn) # Convert output spectra into Spectrum objects metadata = spectrum[0].metadata.copy() metadata['continuum_normalised'] = 0 metadata['SNR'] = float(snr_value) output_spectrum = Spectrum(wavelengths=self.wavelength_raster, values=output_values, value_errors=output_value_errors, metadata=metadata.copy()) metadata['continuum_normalised'] = 1 output_spectrum_cn = Spectrum( wavelengths=self.wavelength_raster, values=output_values_cn, value_errors=output_value_errors_cn, metadata=metadata.copy()) # Add to output data structure output_item[snr_value] = (output_spectrum, output_spectrum_cn) # Return output spectra to user # output[ spectrum_number ][ snr ] = [ flux normalised , continuum normalised ] return output
np.linspace(6, 10, 20)]) # An analytic function for a spectral line def lorentzian(x, x0, fwhm): return 1 / pi * (0.5 * fwhm) / (np.square(x - x0) + np.square(0.5 * fwhm)) # Create a dummy spectrum x_in = raster_original absorption = (lorentzian(x_in, 3, 0.5) + lorentzian(x_in, 4.5, 0.2) + lorentzian(x_in, 6, 0.01) + lorentzian(x_in, 8, 0.2) + lorentzian(x_in, 9, 0.01)) spectrum_original = np.exp(-absorption) spectrum_original_object = Spectrum( wavelengths=raster_original, values=spectrum_original, value_errors=np.zeros_like(spectrum_original)) # Create list of the spectra we're going to save to disk output = [spectrum_original_object] # Create a more sensible raster to sample the spectrum onto resampler = SpectrumResampler(input_spectrum=spectrum_original_object) for raster_new in [ np.linspace(0, 12, 240), np.linspace(0, 12, 48), np.linspace(0, 12, 24) ]: spectrum_new_object = resampler.onto_raster(output_raster=raster_new) output.append(spectrum_new_object)
def combine_spectra(self, template_numbers, path='outdir_LRS/', setup='LRS' ): """ 4FS produces three output fits files for both 4MOST's LRS and HRS modes. These contain the three spectra arms. However, the Cannon expects all its data in a single unified spectrum. This function stitches the three arms into a single data set. Combine the multiple fits files from the 4FS into a single ascii file with all arms included. NOTE: The point at which one are is favoured over another in the stitching process is hardcoded should be changed in future versions. :param template_numbers: Each spectrum is allocated an ID number by the function `generate_4FS_template_list` above when it is prepared for input into 4FS. These ID numbers are also present in the output filenames of the observed spectra that 4FS produces. This parameter is a list of the IDs of the spectra we are to post-process. :type template_numbers: List or tuple of ints :param path: Set the path to the 4FS output FITS files we are to post-process. This path is relative to `self.tmp_dir`. :type path: str :param setup: Set to either (1) 'LRS' - low res or (2) 'HRS' - high res. Specifies the mode in which 4MOST is working. :type setup: str :return : A list of 4GP Spectrum objects. """ bands = ("blue", "green", "red") if setup == "LRS": snr_definitions = self.lrs_use_snr_definitions else: snr_definitions = self.hrs_use_snr_definitions # SNR definitions are red, green, blue. But we index the bands (blue, green, red). snr_definitions = snr_definitions[::-1] # Extract exposure times from 4FS summary file exposure_times = {} with open(os_path.join(path, '4FS_ETC_summary.txt')) as f: for line in f: words = line.split() if len(words) < 6: continue # First column of data file lists the run number try: run_counter = int(words[0]) except ValueError: continue # Sixth column is exposure time in seconds if words[5] == "nan": t_exposure = np.nan else: t_exposure = float(words[5]) exposure_times[str(run_counter)] = t_exposure # Start extracting spectra from FITS files output = {} run_counter = 0 for i in template_numbers: output[i] = {} for snr in self.snr_list: run_counter += 1 # Load in the three output spectra -- blue, green, red arms d = [] for j, band in enumerate(bands): snr_definition = snr_definitions[j] if (snr_definition is not None) and (len(snr_definition) > 0): fits_data = fits.open(os_path.join(path, 'specout_template_template_{}_SNR{:.1f}_{}_{}_{}.fits'. format(i, snr, snr_definitions[j], setup, band))) data = fits_data[2].data else: data = { 'LAMBDA': np.zeros(0), 'REALISATION': np.zeros(0), 'FLUENCE': np.zeros(0), 'SKY': np.zeros(0), 'SNR': np.zeros(0) } d.append(data) # Read the data from the FITS files wavelengths = [item['LAMBDA'] for item in d] fluxes = [(item['REALISATION'] - item['SKY']) for item in d] fluences = [item['FLUENCE'] for item in d] snrs = [item['SNR'] for item in d] # In 4MOST LRS mode, the wavelengths bands overlap, so we cut off the ends of the bands # Stitch arms based on where SNR is best -- hardcoded here may need changed in future versions if setup == 'LRS': indices = ( np.where(wavelengths[0] <= 5327.7)[0], np.where((wavelengths[1] > 5327.7) & (wavelengths[1] <= 7031.7))[0], np.where(wavelengths[2] > 7031.7)[0] ) wavelengths = [item[indices[j]] for j, item in enumerate(wavelengths)] fluxes = [item[indices[j]] for j, item in enumerate(fluxes)] fluences = [item[indices[j]] for j, item in enumerate(fluences)] snrs = [item[indices[j]] for j, item in enumerate(snrs)] # Append the three arms of 4MOST together into a single spectrum wavelengths_final = np.concatenate(wavelengths) fluxes_final = np.concatenate(fluxes) # fluences_final = np.concatenate(fluences) snrs_final = np.concatenate(snrs) # Load continuum spectra continuum_filenames = [os_path.join(path, 'specout_template_template_{}_c_{}_{}.fits'. format(i, setup, band)) for band in bands] have_continuum_version = os.path.exists(continuum_filenames[0]) if have_continuum_version: d_c = [fits.open(item)[2].data for item in continuum_filenames] # Read the data from the FITS files wavelengths_c = [item['LAMBDA'] for item in d_c] # fluxes_c = [(item['REALISATION'] - item['SKY']) for item in d_c] fluences_c = [item['FLUENCE'] for item in d_c] if setup == 'LRS': indices = ( np.where(wavelengths_c[0] <= 5327.7)[0], np.where((wavelengths_c[1] > 5327.7) & (wavelengths_c[1] <= 7031.7))[0], np.where(wavelengths_c[2] > 7031.7)[0] ) # wavelengths_c = [item[indices[j]] for j, item in enumerate(wavelengths_c)] # fluxes_c = [item[indices[j]] for j, item in enumerate(fluxes_c)] fluences_c = [item[indices[j]] for j, item in enumerate(fluences_c)] # Combine everything into one set of arrays to be saved # wavelengths_final_c = np.concatenate(wavelengths_c) fluxes_final_c = np.concatenate([fluence_c * max(fluence) / max(fluence_c) for fluence, fluence_c in zip(fluences, fluences_c) if len(fluence) > 0]) # Do continuum normalisation normalised_fluxes_final = fluxes_final / fluxes_final_c # Remove bad pixels # Any pixels where flux > 2 or flux < 0 get reset to zero for other downstream codes normalised_fluxes_final[ np.where((normalised_fluxes_final > 2.0) | (normalised_fluxes_final <= 0.00))[0]] = 0 # Turn data into a 4GP Spectrum object metadata = self.metadata_store[i] # Add metadata about 4FS settings metadata['continuum_normalised'] = 0 metadata['SNR'] = float(snr) metadata['SNR_per'] = "pixel" if self.snr_per_pixel else "A" metadata['magnitude'] = float(self.magnitude) metadata['exposure'] = exposure_times.get(str(run_counter), np.nan) if (snr_definitions[0] == snr_definitions[1]) and (snr_definitions[1] == snr_definitions[2]): metadata['snr_definition'] = snr_definitions[0] else: metadata['snr_definition'] = ",".join(snr_definitions) # Insert spectrum into library spectrum = Spectrum(wavelengths=wavelengths_final, values=fluxes_final, value_errors=fluxes_final / snrs_final, metadata=metadata.copy()) if have_continuum_version: metadata['continuum_normalised'] = 1 spectrum_continuum_normalised = Spectrum(wavelengths=wavelengths_final, values=normalised_fluxes_final, value_errors=normalised_fluxes_final / snrs_final, metadata=metadata.copy()) else: spectrum_continuum_normalised = None output[i][snr] = { "spectrum": spectrum, "spectrum_continuum_normalised": spectrum_continuum_normalised } return output
def make_fits_spectral_template(self, input_spectrum, input_spectrum_continuum_normalised, output_filename='test1.fits', resolution=50000, continuum_only=False ): """ Generate a 4FS readable fits template from the Turbospectrum output. :param input_spectrum: Spectrum object that we want to pass through 4FS. :type input_spectrum: Spectrum :param input_spectrum_continuum_normalised: Continuum-normalised version of the Spectrum object that we want to pass through 4FS. This can be null, in which case we only produce a flux-normalised spectrum as output. :type input_spectrum_continuum_normalised: Spectrum :param output_filename: Output filename for FITS file we store in our temporary workspace. :type output_filename: str :param resolution: The spectral resolution of the input spectrum, stored in the FITS headers :type resolution: float :param continuum_only: Select whether to output full spectrum, including spectral lines, or just the continuum outline. :type continuum_only: bool :return: None """ # Extract data from input spectra wavelength_raster = input_spectrum.wavelengths flux = input_spectrum.values if input_spectrum_continuum_normalised is not None: assert input_spectrum.raster_hash == input_spectrum_continuum_normalised.raster_hash, \ "Continuum-normalised spectrum needs to have the same wavelength raster as the original." continuum_normalised_flux = input_spectrum_continuum_normalised.values if not continuum_only: data = flux else: # If we only want continuum, without lines, then divide by continuum_normalised flux data = flux / continuum_normalised_flux else: data = flux lambda_min = np.min(wavelength_raster) delta_lambda = wavelength_raster[1] - wavelength_raster[0] fits_spectrum = Spectrum(wavelengths=wavelength_raster, values=data, value_errors=np.zeros_like(wavelength_raster), metadata={}) # Renormalise spectrum to a standard R-band magnitude magnitude = fits_spectrum.photometry(self.photometric_band) if self.magnitude_unreddened: magnitude -= input_spectrum.metadata["A_{}".format(self.photometric_band)] data *= pow(10, -0.4 * (self.reference_magnitude - magnitude)) # Turn spectrum into a fits file hdu_1 = fits.PrimaryHDU(data) hdu_1.header['CRTYPE1'] = "LINEAR " hdu_1.header['CRPIX1'] = 1.0 hdu_1.header['CRVAL1'] = lambda_min hdu_1.header['CDELT1'] = delta_lambda hdu_1.header['CUNIT1'] = "Angstrom" hdu_1.header['BUNIT'] = "erg/s/cm2/Angstrom" hdu_1.header['ABMAG'] = self.reference_magnitude if resolution is not None: fwhm_res = np.mean(wavelength_raster / resolution) hdu_1.header['RESOLUTN'] = fwhm_res hdu_list = fits.HDUList([hdu_1]) hdu_list.writeto(os_path.join(self.tmp_dir, output_filename))
def deredden(self, e_bv, r=3.1): """ Redden this spectrum. :param e_bv: E(B-V). Positive values deredden a spectrum. Negative values increase the reddening of a spectrum. :type e_bv: float :param r: A(V)/E(B-V). Typically assumed to be 3.1 for a standard dust grain size spectrum. :type r: float :return: New Spectrum object. """ x = 1e4 / self._input.wavelengths extinction = np.zeros_like(self._input.wavelengths) # Regime 1 mask = (x >= 8) h = x - 8 a = -1.073 - 0.628 * h + 0.137 * h * h - 0.070 * h * h * h b = 13.670 + 4.257 * h - 0.420 * h * h + 0.374 * h * h * h extinction[mask] = r * a[mask] + b[mask] # Regime 2 mask = (x >= 5.9) * (x < 8) h = x - 5.9 fa = -0.04473 * h * h - 0.009779 * h * h * h fb = 0.2130 * h * h + 0.1207 * h * h * h a = 1.752 - 0.316 * x - 0.104 / ((x - 4.67)**2 + 0.341) + fa b = -3.090 + 1.825 * x + 1.206 / ((x - 4.62)**2 + 0.263) + fb extinction[mask] = r * a[mask] + b[mask] # Regime 3 mask = (x >= 3.3) * (x < 5.9) a = 1.752 - 0.316 * x - 0.104 / ((x - 4.67)**2 + 0.341) b = -3.090 + 1.825 * x + 1.206 / ((x - 4.62)**2 + 0.263) extinction[mask] = r * a[mask] + b[mask] # Regime 4 mask = (x >= 1.1) * (x < 3.3) y = x - 1.82 a = (1 + 0.17699 * y - 0.50447 * y * y - 0.02427 * y**3 + 0.72085 * y**4 + 0.01979 * y**5 - 0.77530 * y**6 + 0.32999 * y**7) b = (1.41338 * y + 2.28305 * y * y + 1.07233 * y**3 - 5.38434 * y**4 - 0.62251 * y**5 + 5.30260 * y**6 - 2.09002 * y**7) extinction[mask] = r * a[mask] + b[mask] # Regime 5 mask = (x >= 0.2) * (x < 1.1) a = 0.574 * x**1.61 b = -0.527 * x**1.61 extinction[mask] = r * a[mask] + b[mask] # Regime 6 mask = (x < 0.2) # This is the far-IR range. We use Howarth (1983), but normalized # to Cardelli et al., 1989 at x=0.2 # 0.05056 is A_lambda/E(B-V) of Howarth, 1983 at x=0.2 xx = 0.2**1.61 hilfy = (r * 0.574 - 0.527) * xx / 0.05056 extinction_6 = hilfy * x * ((1.86 - 0.48 * x) * x - 0.1) extinction[mask] = extinction_6[mask] multiplier = 10**(0.4 * (extinction * e_bv)) output = Spectrum(wavelengths=self._input.wavelengths, values=self._input.values * multiplier, value_errors=self._input.value_errors * multiplier, metadata=self._input.metadata.copy()) if self._input.mask_set: output.copy_mask_from(self._input) return output
# Open fits spectrum f = fits.open(template) data = f[1].data wavelengths = data['LAMBDA'] fluxes = data['FLUX'] # Open ASCII spectrum # f = np.loadtxt(template).T # wavelengths = f[0] # fluxes = f[1] # Create 4GP spectrum object spectrum = Spectrum(wavelengths=wavelengths, values=fluxes, value_errors=np.zeros_like(wavelengths), metadata={ "Starname": name, "imported_from": template }) # Work out magnitude mag_intrinsic = spectrum.photometry(args.photometric_band) # Pass template to 4FS degraded_spectra = etc_wrapper.process_spectra( spectra_list=((spectrum, None),) ) # Loop over LRS and HRS for mode in degraded_spectra: # Loop over the spectra we simulated (there was only one!)
result_log.write("\n[{}] {}... ".format(time.asctime(), object_name)) result_log.flush() # Convolve spectrum flux_data = input_spectrum.values flux_data_convolved = np.convolve(a=flux_data, v=convolution_kernel, mode='same') flux_errors = input_spectrum.value_errors flux_errors_convolved = np.convolve(a=flux_errors, v=convolution_kernel, mode='same') output_spectrum = Spectrum(wavelengths=input_spectrum.wavelengths, values=flux_data_convolved, value_errors=flux_errors_convolved, metadata=input_spectrum.metadata) # Import degraded spectra into output spectrum library output_library.insert(spectra=output_spectrum, filenames=input_spectrum_id['filename'], metadata_list={ "convolution_width": kernel_width, "convolution_kernel": args.kernel }) # If we put database in /tmp while adding entries to it, now return it to original location if args.db_in_tmp: del output_library os.system("mv /tmp/tmp_{}.db {}".format( library_name, os_path.join(library_path, "index.db")))