print removed_points print removed_flux grid.fluxes = np.delete(values, i, axis=0) grid.index = np.delete(points, i, axis=0) print len(grid.index) starspectrum = Spectrum1D.from_array(dispersion=wavelengths, flux=removed_flux, dispersion_unit=u.angstrom, uncertainty=removed_flux * (1 / 100.)) interp1 = Interpolate(starspectrum) norm1 = Normalize(starspectrum, 2) model = grid | interp1 | norm1 setattr(model, 'teff_0', removed_points[0]) setattr(model, 'logg_0', removed_points[1]) setattr(model, 'mh_0', removed_points[2]) setattr(model, 'alpha_0', removed_points[3]) ''' result = mtf.fit_array(starspectrum, model, R_fixed=25000.) print result.median for a in result.median.keys(): setattr(model, a, result.median[a])
def sl_response_plot_three(starname, g, specdir='/group/data/nirspec/spectra/', snr=30., nnorm=2): file1 = glob.glob(specdir + starname + '_order34*.dat') file2 = glob.glob(specdir + starname + '_order35*.dat') file3 = glob.glob(specdir + starname + '_order36*.dat') starspectrum34 = read_fits_file.read_nirspec_dat( file1, desired_wavelength_units='Angstrom', wave_range=[2.245, 2.275]) starspectrum34.uncertainty = ( np.zeros(len(starspectrum34.flux.value)) + 1.0 / np.float(snr)) * starspectrum34.flux.unit starspectrum35 = read_fits_file.read_nirspec_dat( file2, desired_wavelength_units='Angstrom', wave_range=[2.181, 2.2103]) starspectrum35.uncertainty = ( np.zeros(len(starspectrum35.flux.value)) + 1.0 / np.float(snr)) * starspectrum35.flux.unit starspectrum36 = read_fits_file.read_nirspec_dat( file3, desired_wavelength_units='Angstrom', wave_range=[2.1168, 2.145]) starspectrum36.uncertainty = ( np.zeros(len(starspectrum36.flux.value)) + 1.0 / np.float(snr)) * starspectrum36.flux.unit interp1 = Interpolate(starspectrum34) convolve1 = InstrumentConvolveGrating.from_grid(g, R=24000) rot1 = RotationalBroadening.from_grid(g, vrot=np.array([10.0])) norm1 = Normalize(starspectrum34, nnorm) interp2 = Interpolate(starspectrum35) convolve2 = InstrumentConvolveGrating.from_grid(g, R=24000) #rot2 = RotationalBroadening.from_grid(g,vrot=np.array([10.0])) norm2 = Normalize(starspectrum35, nnorm) interp3 = Interpolate(starspectrum36) convolve3 = InstrumentConvolveGrating.from_grid(g, R=24000) #rot3 = RotationalBroadening.from_grid(g,vrot=np.array([10.0])) norm3 = Normalize(starspectrum36, nnorm) model = g | rot1 | Splitter3() | DopplerShift(vrad=0) & DopplerShift(vrad=0) & DopplerShift(vrad=0) | \ convolve1 & convolve2 & convolve3 | interp1 & interp2 & interp3 | \ norm1 & norm2 & norm3 h5_files_us = glob.glob( '/u/rbentley/metallicity/spectra_fits/masked_fit_results/orders34-35-36-37/masked*.h5' ) cut_lis = [] for filename in h5_files_us: print filename.split('_') cut_lis += [(float(filename.split('_')[6]), filename)] cut_lis = sorted(cut_lis, key=getKey) h5_files = [i[1] for i in cut_lis] sl_val = [] print h5_files for filename in h5_files: print filename.split('_')[6] gc_result = MultiNestResult.from_hdf5(filename) print gc_result for a in apogee_vals.keys(): setattr(model, a, apogee_vals[a]) sl_mh1, sl_mh2, sl_mh3 = mtf.s_lambda_three_order( model, 'mh', model.mh_0.value, 0.1) w1, f1, w2, f2, w3, f3 = model() #combine all sl_mh,w,f, lists sl_mh = np.concatenate((sl_mh1, sl_mh2, sl_mh3)) w = np.concatenate((w1, w2, w3)) f = np.concatenate((f1, f2, f3)) starfluxall = np.concatenate( (starspectrum34.flux.value, starspectrum35.flux.value, starspectrum36.flux.value)) starwaveall = np.concatenate( (starspectrum34.wavelength.value, starspectrum35.wavelength.value, starspectrum36.wavelength.value)) sigma_bounds = gc_result.calculate_sigmas(1) sigmas = [] for a in sigma_bounds.keys(): print a sigmas += [(sigma_bounds[a][1] - sigma_bounds[a][0]) / 2.] print sigmas abs_sl_mh = [] mask_sl_f = [] mask_sl_w = [] data_sl_f = [] for i in range(len(sl_mh)): abs_sl_mh += [np.abs(sl_mh[i])] if abs(sl_mh[i]) < float(filename.split('_')[6]): mask_sl_f += [starfluxall[i]] mask_sl_w += [starwaveall[i]] else: data_sl_f += [starfluxall[i]] print sigmas sl_val += [(float(filename.split('_')[6]), len(mask_sl_f), gc_result.median['vrad_3'], gc_result.median['vrad_4'], gc_result.median['vrad_5'], gc_result.median['logg_0'], gc_result.median['mh_0'], gc_result.median['alpha_0'], gc_result.median['teff_0'], sigmas)] print len(starfluxall) return sl_val
#g = load_grid('/u/rbentley/metallicity/grids/phoenix_t2500_6000_w22350_22900_R40000_o34.h5') #for order 34 w, f = g() testspec_w = np.linspace(w[0], w[-1], np.amax(w) - np.amin(w)) testspec_f = np.ones(len(testspec_w)) testspec_u = np.ones(len(testspec_w)) * 0.001 testspec = SKSpectrum1D.from_array( wavelength=testspec_w * u.angstrom, flux=testspec_f * u.Unit('erg/s/cm^2/angstrom'), uncertainty=testspec_u * u.Unit('erg/s/cm^2/angstrom')) interp1 = Interpolate(starspectrum35) convolve1 = InstrumentConvolveGrating.from_grid(g, R=24000) rot1 = RotationalBroadening.from_grid(g, vrot=np.array([10.0])) norm1 = Normalize(starspectrum35, 2) # concatenate the spectral grid (which will have the stellar parameters) with other # model components that you want to fit model = g | rot1 | DopplerShift(vrad=0.0) | convolve1 | interp1 | norm1 # add likelihood parts like1 = Chi2Likelihood(starspectrum35) #like1_l1 = SpectralL1Likelihood(spectrum) fit_model = model | like1 #print fit_model #This is the fit itself. gc_result is a set of parameters from the best MultiNest fit to the data. #This cell takes time to evaluate.
def assemble_model(spectral_grid=None, spectrum=None, normalize_parts=None, normalize_npol=None, filter_set=None, mag_type='vega', **kwargs): """ Parameters ---------- spectral_grid: ~specgrid.SpectralGrid spectral grid to be used in observation spectrum: ~specutils.Spectrum1D spectrum to be used for interpolation, if None neither interpolation nor will be performed [default None] normalize_npol: int degree of polynomial to be used for interpolation, only if not None and spectrum is not None will the normalization plugin be used [default None] plugin_names: ~li st of ~str select between the following available plugin choices: {stellar_operations} {instrument_operations} Returns ------- : ~Model Model with the requested operations """ parameters = kwargs.copy() def assemble_model_part(operations): observation_model = None for operation in operations: if ((filter_set is not None) and (operation.__name__.startswith('CCM89'))): continue param_values = {} for param_name in operation.param_names: if param_name in parameters: param_values[param_name] = parameters.pop(param_name) if param_values != {}: if getattr(operation, 'requires_observed_spectrum', False): param_values['observed'] = spectrum if hasattr(operation, 'from_grid'): current_stellar_operation = operation.from_grid( spectral_grid, **param_values) else: current_stellar_operation = operation(**param_values) if observation_model is None: observation_model = current_stellar_operation else: observation_model = (observation_model | current_stellar_operation) return observation_model stellar_operations = assemble_model_part( StellarOperationModel.__subclasses__()) spectrograph_operations = assemble_model_part( SpectrographOperationModel.__subclasses__()) if spectrum is not None: if spectrograph_operations is None: spectrograph_operations = Interpolate(spectrum) else: spectrograph_operations = (spectrograph_operations | Interpolate(spectrum)) if normalize_npol is not None: if normalize_parts: spectrograph_operations = ( spectrograph_operations | NormalizeParts( spectrum, normalize_parts, normalize_npol)) else: spectrograph_operations = (spectrograph_operations | Normalize( spectrum, normalize_npol)) if filter_set is not None: if 'a_v' in kwargs: imager_operations = (CCM89Extinction(a_v=parameters.pop('a_v')) | Photometry.from_grid(spectral_grid, filter_set=filter_set, mag_type=mag_type)) else: imager_operations = Photometry.from_grid(spectral_grid, filter_set=filter_set, mag_type=mag_type) else: imager_operations = None if parameters != {}: raise ValueError('Given parameters {0} not understood - please do not ' 'add grid parameters as these are already given by ' 'the grid'.format(', '.join(list(parameters.keys())))) if imager_operations is not None and spectrograph_operations is not None: if spectral_grid is not None: starkit_model = spectral_grid | stellar_operations else: starkit_model = stellar_operations starkit_model = starkit_model | DoubleSpectrum() starkit_model = starkit_model | (spectrograph_operations & imager_operations) elif imager_operations is not None: if spectral_grid is not None: starkit_model = (spectral_grid | stellar_operations | imager_operations) else: starkit_model = (stellar_operations | imager_operations) elif spectrograph_operations is not None: if spectral_grid is not None: starkit_model = (spectral_grid | stellar_operations | spectrograph_operations) else: starkit_model = (stellar_operations | spectrograph_operations) else: if spectral_grid is not None: starkit_model = spectral_grid | stellar_operations else: starkit_model = stellar_operations return starkit_model ObservationModel.__class__.fit_parameters = property( fit_parameters_property) ObservationModel.rename('ObservationModel') return ObservationModel
def sl_response_plot_four(starname, g, specdir='/group/data/nirspec/spectra/', snr=30., nnorm=2): file1 = glob.glob(specdir + starname + '_order34*.dat') file2 = glob.glob(specdir + starname + '_order35*.dat') file3 = glob.glob(specdir + starname + '_order36*.dat') file4 = glob.glob(specdir + starname + '_order37*.dat') starspectrum34 = read_fits_file.read_nirspec_dat( file1, desired_wavelength_units='micron') starspectrum35 = read_fits_file.read_nirspec_dat( file2, desired_wavelength_units='micron') starspectrum36 = read_fits_file.read_nirspec_dat( file3, desired_wavelength_units='micron') starspectrum37 = read_fits_file.read_nirspec_dat( file4, desired_wavelength_units='micron') waverange34 = [ np.amin(starspectrum34.wavelength.value[:970]), np.amax(starspectrum34.wavelength.value[:970]) ] waverange35 = [ np.amin(starspectrum35.wavelength.value[:970]), np.amax(starspectrum35.wavelength.value[:970]) ] waverange36 = [ np.amin(starspectrum36.wavelength.value[:970]), np.amax(starspectrum36.wavelength.value[:970]) ] waverange37 = [ np.amin(starspectrum37.wavelength.value[:970]), np.amax(starspectrum37.wavelength.value[:970]) ] starspectrum34 = read_fits_file.read_nirspec_dat( file1, desired_wavelength_units='Angstrom', wave_range=waverange34) starspectrum34.uncertainty = ( np.zeros(len(starspectrum34.flux.value)) + 1.0 / np.float(snr)) * starspectrum34.flux.unit starspectrum35 = read_fits_file.read_nirspec_dat( file2, desired_wavelength_units='Angstrom', wave_range=waverange35) starspectrum35.uncertainty = ( np.zeros(len(starspectrum35.flux.value)) + 1.0 / np.float(snr)) * starspectrum35.flux.unit starspectrum36 = read_fits_file.read_nirspec_dat( file3, desired_wavelength_units='Angstrom', wave_range=waverange36) starspectrum36.uncertainty = ( np.zeros(len(starspectrum36.flux.value)) + 1.0 / np.float(snr)) * starspectrum36.flux.unit starspectrum37 = read_fits_file.read_nirspec_dat( file4, desired_wavelength_units='Angstrom', wave_range=waverange37) starspectrum37.uncertainty = ( np.zeros(len(starspectrum37.flux.value)) + 1.0 / np.float(snr)) * starspectrum37.flux.unit interp1 = Interpolate(starspectrum34) convolve1 = InstrumentConvolveGrating.from_grid(g, R=24000) rot1 = RotationalBroadening.from_grid(g, vrot=np.array([10.0])) norm1 = Normalize(starspectrum34, nnorm) interp2 = Interpolate(starspectrum35) convolve2 = InstrumentConvolveGrating.from_grid(g, R=24000) #rot2 = RotationalBroadening.from_grid(g,vrot=np.array([10.0])) norm2 = Normalize(starspectrum35, nnorm) interp3 = Interpolate(starspectrum36) convolve3 = InstrumentConvolveGrating.from_grid(g, R=24000) #rot3 = RotationalBroadening.from_grid(g,vrot=np.array([10.0])) norm3 = Normalize(starspectrum36, nnorm) interp4 = Interpolate(starspectrum37) convolve4 = InstrumentConvolveGrating.from_grid(g, R=24000) #rot3 = RotationalBroadening.from_grid(g,vrot=np.array([10.0])) norm4 = Normalize(starspectrum37, nnorm) model = g | rot1 | Splitter4() | DopplerShift(vrad=0) & DopplerShift(vrad=0) & DopplerShift(vrad=0) & DopplerShift(vrad=0) | \ convolve1 & convolve2 & convolve3 & convolve4 | interp1 & interp2 & interp3 & interp4 | \ norm1 & norm2 & norm3 & norm4 h5_files_us = glob.glob( '/u/rbentley/metallicity/spectra_fits/masked_fit_results/orders34-35-36-37/masked*' + starname + '_order34-37.h5') cut_lis = [] for filename in h5_files_us: print filename.split('_') cut_lis += [(float(filename.split('_')[6]), filename)] cut_lis = sorted(cut_lis, key=getKey) h5_files = [i[1] for i in cut_lis] sl_val = [] combined_data_mask_model = {} for filename in h5_files: print filename print filename.split('_')[6] gc_result = MultiNestResult.from_hdf5(filename) print gc_result for a in apogee_vals.keys(): setattr(model, a, apogee_vals[a]) w1, f1, w2, f2, w3, f3, w4, f4 = model() sl_mh1, sl_mh2, sl_mh3, sl_mh4 = mtf.s_lambda_four_order( model, 'mh', model.mh_0.value, 0.1) #combine all sl_mh,w,f, lists sl_mh = np.concatenate((sl_mh1, sl_mh2, sl_mh3, sl_mh4)) w = np.concatenate((w1 / (gc_result.median['vrad_3'] / 3e5 + 1.0), w2 / (gc_result.median['vrad_4'] / 3e5 + 1.0), w3 / (gc_result.median['vrad_5'] / 3e5 + 1.0), w4 / (gc_result.median['vrad_6'] / 3e5 + 1.0))) f = np.concatenate((f1, f2, f3, f4)) starfluxall = np.concatenate( (starspectrum34.flux.value, starspectrum35.flux.value, starspectrum36.flux.value, starspectrum37.flux.value)) starwaveall = np.concatenate( (starspectrum34.wavelength.value / (gc_result.median['vrad_3'] / 3e5 + 1.0), starspectrum35.wavelength.value / (gc_result.median['vrad_4'] / 3e5 + 1.0), starspectrum36.wavelength.value / (gc_result.median['vrad_5'] / 3e5 + 1.0), starspectrum37.wavelength.value / (gc_result.median['vrad_6'] / 3e5 + 1.0))) sigma_bounds = gc_result.calculate_sigmas(1) sigmas = [] for a in sigma_bounds.keys(): #print a sigmas += [(sigma_bounds[a][1] - sigma_bounds[a][0]) / 2.] #print sigmas abs_sl_mh = [] mask_sl_f = [] mask_sl_w = [] data_sl_f = [] for i in range(len(sl_mh)): abs_sl_mh += [np.abs(sl_mh[i])] if abs(sl_mh[i]) < float(filename.split('_')[6]): mask_sl_f += [starfluxall[i]] mask_sl_w += [starwaveall[i]] else: data_sl_f += [starfluxall[i]] print sigmas combined_data_mask_model.update({ filename.split('_')[6]: [(starwaveall, starfluxall), (w, f), (mask_sl_w, mask_sl_f)] }) sl_val += [ (float(filename.split('_')[6]), len(mask_sl_f), gc_result.median['vrad_3'], gc_result.median['vrad_4'], gc_result.median['vrad_5'], gc_result.median['vrad_6'], gc_result.median['logg_0'], gc_result.median['mh_0'], gc_result.median['alpha_0'], gc_result.median['teff_0'], sigmas) ] print len(starfluxall) return sl_val, combined_data_mask_model
def plot_multi_order_fit(starname, g=None, savefile=None, specdir='/group/data/nirspec/spectra/', savedir='../nirspec_fits/', snr=30.0, nnorm=2, save_model=False, plot_maximum=False): # plot the results of a multiple order fit on observed spectrum. file1 = glob.glob(specdir + starname + '_order34*.dat') file2 = glob.glob(specdir + starname + '_order35*.dat') file3 = glob.glob(specdir + starname + '_order36*.dat') if savefile is None: savefile = os.path.join( savedir, 'unmasked_' + starname + '_order34-36' + like_str + '.h5') # restore MultiNest savefile result = MultiNestResult.from_hdf5(savefile) starspectrum34 = read_fits_file.read_nirspec_dat( file1, desired_wavelength_units='Angstrom', wave_range=[2.245, 2.275]) starspectrum34.uncertainty = ( np.zeros(len(starspectrum34.flux.value)) + 1.0 / np.float(snr)) * starspectrum34.flux.unit starspectrum35 = read_fits_file.read_nirspec_dat( file2, desired_wavelength_units='Angstrom', wave_range=[2.181, 2.2103]) starspectrum35.uncertainty = ( np.zeros(len(starspectrum35.flux.value)) + 1.0 / np.float(snr)) * starspectrum35.flux.unit starspectrum36 = read_fits_file.read_nirspec_dat( file3, desired_wavelength_units='Angstrom', wave_range=[2.1168, 2.145]) starspectrum36.uncertainty = ( np.zeros(len(starspectrum36.flux.value)) + 1.0 / np.float(snr)) * starspectrum36.flux.unit if g is not None: interp1 = Interpolate(starspectrum34) convolve1 = InstrumentConvolveGrating.from_grid(g, R=24000) rot1 = RotationalBroadening.from_grid(g, vrot=np.array([10.0])) norm1 = Normalize(starspectrum34, nnorm) interp2 = Interpolate(starspectrum35) convolve2 = InstrumentConvolveGrating.from_grid(g, R=24000) #rot2 = RotationalBroadening.from_grid(g,vrot=np.array([10.0])) norm2 = Normalize(starspectrum35, nnorm) interp3 = Interpolate(starspectrum36) convolve3 = InstrumentConvolveGrating.from_grid(g, R=24000) #rot3 = RotationalBroadening.from_grid(g,vrot=np.array([10.0])) norm3 = Normalize(starspectrum36, nnorm) model = g | rot1 | Splitter3() | DopplerShift(vrad=0) & DopplerShift(vrad=0) & DopplerShift(vrad=0) | \ convolve1 & convolve2 & convolve3 | interp1 & interp2 & interp3 | \ norm1 & norm2 & norm3 if plot_maximum: model.teff_0 = result.maximum['teff_0'] model.logg_0 = result.maximum['logg_0'] model.mh_0 = result.maximum['mh_0'] model.alpha_0 = result.maximum['alpha_0'] model.vrot_1 = result.maximum['vrot_1'] model.vrad_3 = result.maximum['vrad_3'] model.vrad_4 = result.maximum['vrad_4'] model.vrad_5 = result.maximum['vrad_5'] model.R_6 = result.maximum['R_6'] model.R_7 = result.maximum['R_7'] model.R_8 = result.maximum['R_8'] #model.R_9 = result.median['R_9'] else: model.teff_0 = result.median['teff_0'] model.logg_0 = result.median['logg_0'] model.mh_0 = result.median['mh_0'] model.alpha_0 = result.median['alpha_0'] model.vrot_1 = result.median['vrot_1'] model.vrad_3 = result.median['vrad_3'] model.vrad_4 = result.median['vrad_4'] model.vrad_5 = result.median['vrad_5'] model.R_6 = result.median['R_6'] model.R_7 = result.median['R_7'] model.R_8 = result.median['R_8'] #model.R_9 = result.median['R_9'] w1, f1, w2, f2, w3, f3 = model() else: file1 = os.path.join(savedir, starname + '_order34_model.txt') file2 = os.path.join(savedir, starname + '_order35_model.txt') file3 = os.path.join(savedir, starname + '_order36_model.txt') w1, f1 = np.loadtxt(file1, usecols=(0, 1), unpack=True) w2, f2 = np.loadtxt(file2, usecols=(0, 1), unpack=True) w3, f3 = np.loadtxt(file3, usecols=(0, 1), unpack=True) # teff_0 3363.211996 # logg_0 1.691725 # mh_0 0.936003 # alpha_0 -0.027917 # vrot_1 1.378488 # vrad_3 -538.550269 # vrad_4 -239.851862 # vrad_5 -541.044943 # vrad_6 -540.432821 # R_7 20000.000000 # R_8 20000.000000 # R_9 20000.000000 # R_10 20000.000000 plt.clf() observed_wave = (starspectrum36.wavelength, starspectrum35.wavelength, starspectrum34.wavelength) observed_flux = (starspectrum36.flux, starspectrum35.flux, starspectrum34.flux) model_wave = (w3, w2, w1) model_flux = (f3, f2, f1) max_result = result.maximum vels = (max_result['vrad_5'], max_result['vrad_4'], max_result['vrad_3']) print 'maximum likelihood:' print max_result print 'median:' print result.median print '1 sigma:' print result.calculate_sigmas(1) for i in xrange(len(observed_wave)): plt.subplot(4, 1, i + 1) velfac = 1.0 / (vels[i] / 3e5 + 1.0) xwave = observed_wave[i] * velfac plt.plot(xwave, observed_flux[i]) plt.plot(model_wave[i] * velfac, model_flux[i]) plt.ylim(0.2, 1.2) plt.xlabel('Wavelength (Angstrom)') plt.ylabel('Flux') plt.xlim(np.min(xwave.value), np.max(xwave.value)) plotlines.oplotlines(angstrom=True, arcturus=True, alpha=0.5, molecules=False, size=12) plt.tight_layout() plt.show() if save_model: comment1 = 'teff %f,logg %f,mh %f,alpha %f,vrot %f,vrad %f,R %f' % \ (model.teff_0.value,model.logg_0.value,model.mh_0.value,model.alpha_0.value, model.vrot_1.value,model.vrad_3.value,model.R_6.value) comment2 = 'teff %f,logg %f,mh %f,alpha %f,vrot %f,vrad %f,R %f' % \ (model.teff_0.value,model.logg_0.value,model.mh_0.value,model.alpha_0.value, model.vrot_1.value,model.vrad_4.value,model.R_7.value) comment3 = 'teff %f,logg %f,mh %f,alpha %f,vrot %f,vrad %f,R %f' % \ (model.teff_0.value,model.logg_0.value,model.mh_0.value,model.alpha_0.value, model.vrot_1.value,model.vrad_5.value,model.R_8.value) file1 = os.path.join(savedir, starname + '_order34_model.txt') file2 = os.path.join(savedir, starname + '_order35_model.txt') file3 = os.path.join(savedir, starname + '_order36_model.txt') write_spectrum.write_txt(w1, f1, file1, comments=comment1) write_spectrum.write_txt(w2, f2, file2, comments=comment2) write_spectrum.write_txt(w3, f3, file3, comments=comment3)
def fit_star_multi_order(starname, g, specdir='/group/data/nirspec/spectra/', savedir='../nirspec_fits/', snr=30.0, nnorm=2, teff_range=[2500, 6000], vrad_range=[-600, 600], logg_range=[0., 4.5], mh_range=[-2., 1.0], vrot_range=[0, 20], R=40000, verbose=True, alpha_range=[-1., 1.], r_range=[15000.0, 40000.0], R_fixed=None, logg_fixed=None, l1norm=False): # fit a spectrum of a star with multiple orders that can have different velocities file1 = glob.glob(specdir + starname + '_order34*.dat') file2 = glob.glob(specdir + starname + '_order35*.dat') file3 = glob.glob(specdir + starname + '_order36*.dat') starspectrum34 = read_fits_file.read_nirspec_dat( file1, desired_wavelength_units='Angstrom', wave_range=[2.245, 2.275]) starspectrum34.uncertainty = ( np.zeros(len(starspectrum34.flux.value)) + 1.0 / np.float(snr)) * starspectrum34.flux.unit starspectrum35 = read_fits_file.read_nirspec_dat( file2, desired_wavelength_units='Angstrom', wave_range=[2.181, 2.2103]) starspectrum35.uncertainty = ( np.zeros(len(starspectrum35.flux.value)) + 1.0 / np.float(snr)) * starspectrum35.flux.unit starspectrum36 = read_fits_file.read_nirspec_dat( file3, desired_wavelength_units='Angstrom', wave_range=[2.1168, 2.145]) starspectrum36.uncertainty = ( np.zeros(len(starspectrum36.flux.value)) + 1.0 / np.float(snr)) * starspectrum36.flux.unit interp1 = Interpolate(starspectrum34) convolve1 = InstrumentConvolveGrating.from_grid(g, R=24000) rot1 = RotationalBroadening.from_grid(g, vrot=np.array([10.0])) norm1 = Normalize(starspectrum34, nnorm) interp2 = Interpolate(starspectrum35) convolve2 = InstrumentConvolveGrating.from_grid(g, R=24000) #rot2 = RotationalBroadening.from_grid(g,vrot=np.array([10.0])) norm2 = Normalize(starspectrum35, nnorm) interp3 = Interpolate(starspectrum36) convolve3 = InstrumentConvolveGrating.from_grid(g, R=24000) #rot3 = RotationalBroadening.from_grid(g,vrot=np.array([10.0])) norm3 = Normalize(starspectrum36, nnorm) model = g | rot1 | Splitter3() | DopplerShift(vrad=0) & DopplerShift(vrad=0) & DopplerShift(vrad=0) | \ convolve1 & convolve2 & convolve3 | interp1 & interp2 & interp3 | \ norm1 & norm2 & norm3 w1, f1, w2, f2, w3, f3 = model() plt.clf() plt.plot(w1, f1) plt.plot(starspectrum34.wavelength, starspectrum34.flux) plt.plot(w2, f2) plt.plot(starspectrum35.wavelength, starspectrum35.flux) plt.plot(w3, f3) plt.plot(starspectrum36.wavelength, starspectrum36.flux) # likelihoods if l1norm: like1 = L1Likelihood(starspectrum34) like2 = L1Likelihood(starspectrum35) like3 = L1Likelihood(starspectrum36) else: like1 = Chi2Likelihood(starspectrum34) like2 = Chi2Likelihood(starspectrum35) like3 = Chi2Likelihood(starspectrum36) fit_model = model | like1 & like2 & like3 | Combiner3() print fit_model.__class__ print fit_model() teff_prior = priors.UniformPrior(*teff_range) if logg_fixed is not None: logg_prior = priors.FixedPrior(logg_fixed) else: logg_prior = priors.UniformPrior(*logg_range) mh_prior = priors.UniformPrior(*mh_range) alpha_prior = priors.UniformPrior(*alpha_range) vrot_prior = priors.UniformPrior(*vrot_range) vrad_prior1 = priors.UniformPrior(*vrad_range) vrad_prior2 = priors.UniformPrior(*vrad_range) vrad_prior3 = priors.UniformPrior(*vrad_range) # R_prior1 = priors.FixedPrior(R) # R_prior2 = priors.FixedPrior(R) # R_prior3 = priors.FixedPrior(R) # R_prior4 = priors.FixedPrior(R) if R_fixed is not None: R_prior1 = priors.FixedPrior(R_fixed) R_prior2 = priors.FixedPrior(R_fixed) R_prior3 = priors.FixedPrior(R_fixed) else: R_prior1 = priors.UniformPrior(*r_range) R_prior2 = priors.UniformPrior(*r_range) R_prior3 = priors.UniformPrior(*r_range) fitobj = MultiNest(fit_model, [teff_prior, logg_prior, mh_prior, alpha_prior, vrot_prior, \ vrad_prior1,vrad_prior2,vrad_prior3,R_prior1,R_prior2,\ R_prior3]) fitobj.run(verbose=verbose, importance_nested_sampling=False, n_live_points=400) result = fitobj.result if l1norm: like_str = '_l1norm' else: like_str = '' result.to_hdf( os.path.join(savedir, 'unmasked_' + starname + '_order34-36' + like_str + '.h5')) print result.calculate_sigmas(1) print result.median # save the individual model spectra with the max posterior value model.teff_0 = result.maximum['teff_0'] model.logg_0 = result.maximum['logg_0'] model.mh_0 = result.maximum['mh_0'] model.alpha_0 = result.maximum['alpha_0'] model.vrot_1 = result.maximum['vrot_1'] model.vrad_3 = result.maximum['vrad_3'] model.vrad_4 = result.maximum['vrad_4'] model.vrad_5 = result.maximum['vrad_5'] model.R_7 = result.maximum['R_6'] model.R_8 = result.maximum['R_7'] model.R_9 = result.maximum['R_8'] w1, f1, w2, f2, w3, f3 = model() comment1 = 'teff %f,logg %f,mh %f,alpha %f,vrot %f,vrad %f,R %f' % \ (model.teff_0.value,model.logg_0.value,model.mh_0.value,model.alpha_0.value, model.vrot_1.value,model.vrad_3.value,model.R_7.value) comment2 = 'teff %f,logg %f,mh %f,alpha %f,vrot %f,vrad %f,R %f' % \ (model.teff_0.value,model.logg_0.value,model.mh_0.value,model.alpha_0.value, model.vrot_1.value,model.vrad_4.value,model.R_8.value) comment3 = 'teff %f,logg %f,mh %f,alpha %f,vrot %f,vrad %f,R %f' % \ (model.teff_0.value,model.logg_0.value,model.mh_0.value,model.alpha_0.value, model.vrot_1.value,model.vrad_5.value,model.R_9.value) file1 = os.path.join(savedir, starname + '_order34_model.txt') file2 = os.path.join(savedir, starname + '_order35_model.txt') file3 = os.path.join(savedir, starname + '_order36_model.txt') write_spectrum.write_txt(w1, f1, file1, comments=comment1) write_spectrum.write_txt(w2, f2, file2, comments=comment2) write_spectrum.write_txt(w3, f3, file3, comments=comment3)
# load the BOSZ grid. Do this only ONCE!! It takes a long time and lots of memory g = load_grid('/u/rbentley/metallicity/spectra_fits/phoenix_t2500_6000_w20000_24000_R40000.h5')#'/u/rbentley/metallicity/spectra_fits/test_bosz_t2500_6000_w20000_24000_R40000.h5') print 'grid loaded' interp1 = Interpolate(starspectrum35) print 'interpolated' convolve1 = InstrumentConvolveGrating.from_grid(g,R=24000) print 'convolved' rot1 = RotationalBroadening.from_grid(g,vrot=np.array([10.0])) print 'rot broadend' norm1 = Normalize(starspectrum35,2) print 'normalized' # concatenate the spectral grid (which will have the stellar parameters) with other # model components that you want to fit model = g | rot1 |DopplerShift(vrad=radv)| convolve1 | interp1 | norm1 print 'model concaten' w,f = model() #plt.plot(w,f) model.teff_0 = teff model.logg_0 = logg model.mh_0 = mh
def fit(input_file, spectrum=None, teff_prior=[10000.0, 35000.0], logg_prior=[2.0, 5.0], mh_prior=[-1.0, 0.8], alpha_prior=[-0.25, 0.5], vrot_prior=[0, 350.0], vrad_prior=[-5000, 5000], R_prior=4000.0, wave_range=None, outdir='./', snr=30.0, norm_order=2, g=None, molecfit=False, wavelength_units='micron', debug=False, **kwargs): ''' Given a fits file, read in and fit the spectrum using a grid Passes keyword arguements into read_fits_file using **kwargs ''' if g is None: print('need to input grid in g keyword') return 0 teff_prior1 = priors.UniformPrior(teff_prior[0], teff_prior[1]) logg_prior1 = priors.UniformPrior(logg_prior[0], logg_prior[1]) mh_prior1 = priors.UniformPrior(-1.0, 0.8) alpha_prior1 = priors.UniformPrior(-0.25, 0.5) vrot_prior1 = priors.UniformPrior(0, 350.0) vrad_prior1 = priors.UniformPrior(-5000, 5000) #R_prior1 = priors.UniformPrior(1500,10000) R_prior1 = priors.FixedPrior(R_prior) # wavelength range for the fit #wave_range = None file_part = os.path.splitext(os.path.split(input_file)[-1])[0] file_part = os.path.join(outdir, file_part) extension = os.path.splitext(input_file)[-1] spectrum_file = file_part + extension fit_file = file_part + '.h5' plot_file = file_part + '.pdf' corner_file = file_part + '_corner.pdf' model_file = file_part + '_model.txt' # best fit model print('copying file from %s to %s' % (input_file, spectrum_file)) shutil.copyfile(input_file, spectrum_file) # read in the spectrum and set the uncertainty as 1/SNR if spectrum is None: if molecfit: spectrum = read_fits_file.read_txt_file( spectrum_file, desired_wavelength_units=wavelength_units, wave_range=wave_range, molecfit=True) else: if (extension == '.csv') or (extension == '.txt'): if extension == '.csv': delimiter = ',' else: delimiter = None spectrum = read_fits_file.read_txt_file( spectrum_file, desired_wavelength_units='angstrom', delimiter=delimiter, wave_range=wave_range, wavelength_units=wavelength_units, **kwargs) else: spectrum = read_fits_file.read_fits_file( spectrum_file, desired_wavelength_units='angstrom', wavelength_units=wavelength_units, wave_range=wave_range) spectrum.uncertainty = np.zeros(len(spectrum.flux)) + 1.0 / snr # setup the model interp1 = Interpolate(spectrum) convolve1 = InstrumentConvolveGrating.from_grid(g, R=R_prior) rot1 = RotationalBroadening.from_grid(g, vrot=np.array([10.0])) norm1 = Normalize(spectrum, norm_order) model = g | rot1 | DopplerShift(vrad=0) | convolve1 | interp1 | norm1 # add likelihood parts like1 = Chi2Likelihood(spectrum) #like1_l1 = SpectralL1Likelihood(spectrum) fit_model = model | like1 fitobj = MultiNest(fit_model, [ teff_prior1, logg_prior1, mh_prior1, alpha_prior1, vrot_prior1, vrad_prior1, R_prior1 ]) fitobj.run(verbose=debug) result = fitobj.result logging.info('saving results to: ' + fit_file) result.to_hdf(fit_file) # print some of the results into the log m = result.maximum sig = result.calculate_sigmas(1) for k in sig.keys(): print('%s\t %f\t %f\t %f\t %f' % (k, m[k], sig[k][0], sig[k][1], (sig[k][1] - sig[k][0]) / 2.0)) # evaluating the model model.teff_0 = result.maximum.teff_0 model.logg_0 = result.maximum.logg_0 model.mh_0 = result.maximum.mh_0 model.vrot_1 = result.maximum.vrot_1 model.vrad_2 = result.maximum.vrad_2 model.R_3 = result.maximum.R_3 model_wave, model_flux = model() logging.info('saving model spectrum to: ' + model_file) save_spectrum(model_wave, model_flux, model_file) plt.figure(figsize=(12, 6)) plt.plot(model_wave, model_flux, label='Best Fit Model') plt.plot(spectrum.wavelength, spectrum.flux, label='Data') plt.ylim( np.nanmin(spectrum.flux.value) - 0.2, np.nanmax(spectrum.flux.value) + 0.2) plt.xlabel('Wavelength (Angstrom)') plt.ylabel('Flux') plt.title(spectrum_file) plt.legend() plt.savefig(plot_file) result.plot_triangle( parameters=['teff_0', 'logg_0', 'mh_0', 'alpha_0', 'vrot_1', 'vrad_2']) logging.info('saving corner plot to: ' + corner_file) plt.savefig(corner_file) # try to free up memory fitobj = 0 fit_model = 0 return result