def test_load_line_lists(): line_lists = arcl_io.load_line_lists(['HgI', 'ZnI']) # Unknown line_lists = arcl_io.load_line_lists(['HgI', 'ZnI'], unknown=True)
def generate_hdf(sav_file, instr, lamps, outfil, dtoler=0.6): """ Given an input LR IDL save file, generate an hdf5 IDs arc lines too Parameters ---------- sav_file : str Root name of the IDL save file from LowRedux, e.g. lris_blue_600.sav lamps outfil Returns ------- """ from pypit import pyputils msgs = pyputils.get_dummy_logger() from pypit import arwave from pypit import arutils arutils.dummy_settings() # from arclines.pypit_utils import find_peaks from arclines.io import load_line_lists # # Read IDL save file sav_file = os.getenv('LONGSLIT_DIR') + 'calib/linelists/' + sav_file s = readsav(sav_file) ctbl = Table(s['calib']) # For writing later # Line list alist = load_line_lists(lamps) # One spectrum? ashape = s['archive_arc'].shape if len(ashape) == 1: nspec = 1 npix = ashape[0] else: nspec = s['archive_arc'].shape[0] npix = ashape[1] # Meta data mdict = dict( npix=npix, instr=instr, lamps=[str(ilamp) for ilamp in lamps], # For writing to hdf5 nspec=nspec, infil=sav_file, IDairvac='vac') print("Processing {:d} spectra in {:s}".format(mdict['nspec'], sav_file)) # Start output outh5 = h5py.File(out_path + outfil, 'w') outh5.create_group('arcs') # Loop on spectra for ss in range(mdict['nspec']): sss = str(ss) # Parse if nspec == 1: spec = s['archive_arc'] else: spec = s['archive_arc'][ss] calib = s['calib'][ss] # Peaks tampl, tcent, twid, w, yprep = find_peaks(spec) pixpk = tcent[w] pixampl = tampl[w] # Wavelength solution if calib['func'] == 'CHEBY': wv_air = cheby_val(calib['ffit'], np.arange(mdict['npix']), calib['nrm'], calib['nord']) elif calib['func'] == 'POLY': wv_air = poly_val(calib['ffit'], np.arange(mdict['npix']), calib['nrm']) # Check blue->red or vice-versa if ss == 0: if wv_air[0] > wv_air[-1]: mdict['bluered'] = False else: mdict['bluered'] = True # Peak waves if calib['func'] == 'CHEBY': twave_air = cheby_val(calib['ffit'], pixpk, calib['nrm'], calib['nord']) else: twave_air = poly_val(calib['ffit'], pixpk, calib['nrm']) # Air to Vac twave_vac = arwave.airtovac(twave_air * u.AA) wave_vac = arwave.airtovac(wv_air * u.AA) if ss == 0: disp = np.median(np.abs(wave_vac - np.roll(wave_vac, 1))) print("Average dispersion = {:g}".format(disp)) # IDs idwv = np.zeros_like(pixpk) idsion = np.array([str('12345')] * len(pixpk)) for kk, twv in enumerate(twave_vac.value): # diff diff = np.abs(twv - alist['wave']) if np.min(diff) < dtoler: imin = np.argmin(diff) idwv[kk] = alist['wave'][imin] #idsion[kk] = alist['Ion'][imin] NIST idsion[kk] = alist['ion'][imin] # Red to blue? if mdict['bluered'] is False: pixpk = mdict['npix'] - 1 - pixpk # Re-sort asrt = np.argsort(pixpk) pixpk = pixpk[asrt] idwv = idwv[asrt] # Reverse spec = spec[::-1] wave_vac = wave_vac[::-1] # Output outh5['arcs'].create_group(sss) # Datasets outh5['arcs'][sss]['wave'] = wave_vac outh5['arcs'][sss]['wave'].attrs['airvac'] = 'vac' outh5['arcs'][sss]['spec'] = spec outh5['arcs'][sss]['spec'].attrs['flux'] = 'counts' outh5['arcs'][sss]['pixpk'] = pixpk outh5['arcs'][sss]['ID'] = idwv outh5['arcs'][sss]['ID'].attrs['airvac'] = 'vac' outh5['arcs'][sss]['Ion'] = idsion # LR wavelengths outh5['arcs'][sss]['LR_wave'] = wv_air outh5['arcs'][sss]['LR_wave'].attrs['airvac'] = 'air' # LR Fit outh5['arcs'][sss].create_group('LR_fit') for key in ctbl.keys(): outh5['arcs'][sss]['LR_fit'][key] = ctbl[ss][key] # Meta data outh5.create_group('meta') for key in mdict.keys(): try: outh5['meta'][key] = mdict[key] except TypeError: # Probably a unicode thing pdb.set_trace() # Close outh5.close() print('Wrote {:s}'.format(out_path + outfil))
def tst_quad_match_with_lowredux(low_redux_hdf, instr, swv_uncertainty=500.): """ Returns ------- """ # Load for instrument if instr == 'LRISb': llist = arcl_io.load_line_lists(['CdI', 'HgI', 'ZnI'], unknown=True) # Cut gdwv = (llist['wave'] > 3000.) & (llist['wave'] < 5600.) cut_llist = llist[gdwv] # wvdata = cut_llist['wave'].data # Add a key missing line #wvdata = np.append(wvdata, [3404.6978]) # Sort wvdata.sort() # disp = 1.26 # Ang/binned pix pix_tol = 2 cut_choice = 2 # 0 might be a touch better.. elif instr == 'LRISr': #llist = arcl_io.load_line_lists(['ArI','HgI','KrI','NeI','XeI'], unknown=True) llist = arcl_io.load_line_lists( ['ArI', 'HgI', 'NeI'], unknown=True) # No Kr or Xe in LowRedux # Cut gdwv = (llist['wave'] > 5600.) & (llist['wave'] < 9000. ) # WILL WANT TO EXTEND cut_llist = llist[gdwv] # wvdata = cut_llist['wave'].data # Add a key missing line #wvdata = np.append(wvdata, [3404.6978]) # Sort wvdata.sort() # disp = 1.6 # Ang/binned pix pix_tol = 1 cut_choice = 2 # ARBITRARY else: raise IOError("Not ready for this instrument") # Open hdf = h5py.File(low_redux_hdf, 'r') mdict = {} for key in hdf['meta'].keys(): mdict[key] = hdf['meta'][key].value # Loop on spec extras = [] for ispec in range(mdict['nspec']): all_tcent = hdf['arcs/' + str(ispec) + '/pixpk'].value spec = hdf['arcs/' + str(ispec) + '/spec'].value wave = hdf['arcs/' + str(ispec) + '/wave'].value # vacuum npix = wave.size # if False: from scipy.interpolate import interp1d fwv = interp1d(np.arange(npix), wave, kind='cubic') #fwv(all_tcent[3]) pdb.set_trace() amps = [] for itc in all_tcent: pix = int(np.round(itc)) amps.append(spec[pix]) amps = np.array(amps) # Trim tcent on amplitude if cut_choice == 0: cut_amp = amps > 1000. elif cut_choice == 1: mxa = np.max(amps) cut_amp = amps > 0.2 * mxa elif cut_choice == 2: cut_amp = amps > 500. tcent = all_tcent[cut_amp] nlin = tcent.size # init with Truth final_idx = {} for ii in range(nlin): final_idx[ii] = {} final_idx[ii]['matches'] = [] # Truth (if any) widx = int(np.round(tcent[ii])) mtw = np.where(np.abs(wvdata - wave[widx]) < 2 * disp)[0] # Catches bad LRISb line if len(mtw) == 0: if (instr == 'LRISb') & (wave[widx] < 5600): # LRISb only extras.append(wave[widx]) # print("No match for index={:d}, wave={:g}, amp={:g}".format( ii,wave[widx],spec[widx])) final_idx[ii]['truth'] = -1 elif len(mtw) == 1: final_idx[ii]['truth'] = mtw[0] else: if instr == 'LRISb': final_idx[ii]['truth'] = mtw[ 0] # Might have had this wrong! Not 4681.45 else: pdb.set_trace() # for idx in range(nlin - 4): for jj in range(4): sub_idx = idx + np.arange(5).astype(int) msk = np.array([True] * 5) msk[jj + 1] = False # Setup sidx = sub_idx[msk] spec_lines = np.array(tcent)[sidx] # widx = int(np.round(tcent[idx])) wvmnx = [ wave[widx] - swv_uncertainty, wave[widx] + swv_uncertainty ] if idx == 0: twv_min = wave[widx] # Run matches = arch_patt.match_quad_to_list(spec_lines, wvdata, wvmnx, disp, tol=pix_tol) # Save for match in matches: for ii in range(4): final_idx[sidx[ii]]['matches'].append(match[ii]) ''' for idx in range(nlin-4): # Setup spec_lines = np.array(tcent[idx:idx+4]) # widx = int(np.round(tcent[idx])) wvmnx = [wave[widx]-swv_uncertainty, wave[widx]+swv_uncertainty] if idx == 0: twv_min = wave[widx] # Run matches = arch_patt.match_quad_to_list(spec_lines, wvdata, wvmnx, disp, tol=pix_tol) # Save for match in matches: for ii in range(4): final_idx[idx+ii]['matches'].append(match[ii]) ''' # Grade grades = grade_fidx_results(final_idx) # PRINT if ispec == 0: print("II nDet nPerf nGood nOK nRisk nAmb nFail wvmin") print("{:2d} {:3d} {:3d} {:3d} {:3d} {:3d} {:3d} {:3d} {:g}". format(ispec, grades['ndetect'], grades['nPerf'], grades['nGood'], grades['nOK'], grades['nRisk'], grades['nAmb'], grades['nFail'], twv_min)) if ispec == 94: pdb.set_trace() extras = np.array(extras) extras.sort()
def vette_unkwn_against_lists(U_lines, uions, tol_NIST=0.2, NIST_only=False, tol_llist=2., verbose=False): """ Query unknown lines against NIST database Parameters ---------- U_lines : Table uions : list or ndarray list of lines to check against tol_NIST : float, optional Tolerance for a match with NIST tol_llist : float, optional Tolerance for a match with arclines line lists Returns ------- mask : int array 2 = NIST (and add) 1 = Add these 0 = Do not add these wv_match : ndarray str array """ from arclines import io as arcl_io mask = np.ones(len(U_lines)).astype(int) wv_match = np.array(['XXI 12233.2312']*len(U_lines)) # Loop on NIST for ion in uions: # Load nist = arcl_io.load_nist(ion) # Try to match for ss,row in enumerate(U_lines): dwv = np.abs(nist['wave']-row['wave']) imin = np.argmin(np.abs(dwv)) #if verbose: # print("Closest match to ion={:s} for {:g} is".format(ion,row['wave'])) # print(nist[['Ion','wave','RelInt']][imin]) # Match? if dwv[imin] < tol_NIST: wv_match[ss] = '{:s} {:.4f}'.format(ion,nist['wave'][imin]) mask[ss] = 2 if verbose: print("UNKNWN Matched to NIST: ion={:s} {:g} with {:g}".format( ion,nist['wave'][imin], row['wave'])) #print(nist[['Ion','wave','RelInt','Aki']][imin]) if NIST_only: return mask, wv_match # Our line lists line_list = arcl_io.load_line_lists(uions, skip=True) if line_list is None: return mask, wv_match for ss,row in enumerate(U_lines): dwv = np.abs(line_list['wave']-row['wave']) imin = np.argmin(np.abs(dwv)) # Match? if dwv[imin] < tol_llist: mask[ss] = 0 if verbose: print("UNKNWN Matched to arclines: ion={:s} {:g} with {:g}".format( line_list['ion'][imin], line_list['wave'][imin], row['wave'])) print(" ---- Will not add it") return mask, wv_match
def basic(spec, lines, wv_cen, disp, siglev=20., min_ampl=300., swv_uncertainty=350., pix_tol=2, plot_fil=None, min_match=5, **kwargs): """ Basic holy grail algorithm Parameters ---------- spec : spectrum lines : list List of arc lamps on wv_cen : float Guess at central wavelength disp : float Dispersion A/pix siglev min_ampl swv_uncertainty pix_tol plot_fil Returns ------- status : int """ # Init line-lists and wavelength 'guess' npix = spec.size wave = wv_cen + (np.arange(npix) - npix / 2.) * disp line_lists = arcl_io.load_line_lists(lines, unknown=True) wvdata = line_lists['wave'].data # NIST + Extra isrt = np.argsort(wvdata) wvdata = wvdata[isrt] # Find peaks all_tcent, cut_tcent, icut = arch_utils.arc_lines_from_spec( spec, siglev=siglev, min_ampl=min_ampl) # Matching match_idx, scores = arch_patt.run_quad_match( cut_tcent, wave, wvdata, disp, swv_uncertainty=swv_uncertainty, pix_tol=pix_tol) # Check quadrants xquad = npix // 4 + 1 print("================================================================") print("Checking quadrants:") print("----------------------------------------------------------------") for jj in range(4): tc_in_q = (cut_tcent >= jj * xquad) & (cut_tcent < (jj + 1) * xquad) cstat = 'quad {:d}: ndet={:d}'.format(jj, np.sum(tc_in_q)) # Stats for key in ['Perf', 'Good', 'OK', 'Amb']: in_stat = scores[tc_in_q] == key cstat += ' {:s}={:d}'.format(key, np.sum(in_stat)) # Print print(cstat) print("----------------------------------------------------------------") # Go for it!? mask = np.array([False] * len(all_tcent)) IDs = [] for kk, score in enumerate(scores): if score in ['Perf', 'Good', 'Ok']: mask[icut[kk]] = True uni, counts = np.unique(match_idx[kk]['matches'], return_counts=True) imx = np.argmax(counts) IDs.append(wvdata[uni[imx]]) ngd_match = np.sum(mask) if ngd_match < min_match: print("Insufficient matches to continue") status = -1 return status, ngd_match, match_idx, scores, None # Fit NIST_lines = line_lists['NIST'] > 0 ifit = np.where(mask)[0] final_fit = arch_fit.iterative_fitting(spec, all_tcent, ifit, IDs, line_lists[NIST_lines], disp, plot_fil=plot_fil) # Return status = 1 return status, ngd_match, match_idx, scores, final_fit
def semi_brute(spec, lines, wv_cen, disp, siglev=20., min_ampl=300., outroot=None, debug=False, do_fit=True, verbose=False, fit_parm=None, min_nmatch=0, lowest_ampl=200.): """ Parameters ---------- spec lines wv_cen disp siglev min_ampl outroot debug do_fit verbose fit_parm min_nmatch lowest_ampl Returns ------- best_dict : dict final_fit : dict """ # imports from astropy.table import vstack from linetools import utils as ltu from arclines import plots as arcl_plots # Load line lists line_lists = arcl_io.load_line_lists(lines) unknwns = arcl_io.load_unknown_list(lines) npix = spec.size # Lines all_tcent, cut_tcent, icut = arch_utils.arc_lines_from_spec( spec, min_ampl=min_ampl) # Best best_dict = dict(nmatch=0, ibest=-1, bwv=0., min_ampl=min_ampl) # 3 things to fiddle: # pix_tol -- higher for fewer lines 1/2 # unknowns -- on for fewer lines off/on # scoring -- weaken for more lines ?? # Loop on unknowns for unknown in [False, True]: if unknown: tot_list = vstack([line_lists, unknwns]) else: tot_list = line_lists wvdata = np.array(tot_list['wave'].data) # Removes mask if any wvdata.sort() sav_nmatch = best_dict['nmatch'] # Loop on pix_tol for pix_tol in [1., 2.]: # Scan on wavelengths arch_patt.scan_for_matches(wv_cen, disp, npix, cut_tcent, wvdata, best_dict=best_dict, pix_tol=pix_tol) # Lower minimum amplitude ampl = min_ampl while (best_dict['nmatch'] < min_nmatch): ampl /= 2. if ampl < lowest_ampl: break all_tcent, cut_tcent, icut = arch_utils.arc_lines_from_spec( spec, min_ampl=ampl) arch_patt.scan_for_matches(wv_cen, disp, npix, cut_tcent, wvdata, best_dict=best_dict, pix_tol=pix_tol, ampl=ampl) # Save linelist? if best_dict['nmatch'] > sav_nmatch: best_dict['line_list'] = tot_list best_dict['unknown'] = unknown best_dict['ampl'] = unknown if best_dict['nmatch'] == 0: print('---------------------------------------------------') print('Report:') print(':: No matches! Could be you input a bad wvcen or disp value') print('---------------------------------------------------') return # Report print('---------------------------------------------------') print('Report:') print(':: Number of lines recovered = {:d}'.format(all_tcent.size)) print(':: Number of lines analyzed = {:d}'.format(cut_tcent.size)) print(':: Number of Perf/Good/Ok matches = {:d}'.format( best_dict['nmatch'])) print(':: Best central wavelength = {:g}A'.format(best_dict['bwv'])) print(':: Best solution used pix_tol = {}'.format(best_dict['pix_tol'])) print(':: Best solution had unknown = {}'.format(best_dict['unknown'])) print('---------------------------------------------------') if debug: match_idx = best_dict['midx'] for kk in match_idx.keys(): uni, counts = np.unique(match_idx[kk]['matches'], return_counts=True) print('kk={}, {}, {}, {}'.format(kk, uni, counts, np.sum(counts))) # Write scores #out_dict = best_dict['scores'] #jdict = ltu.jsonify(out_dict) #ltu.savejson(pargs.outroot+'.scores', jdict, easy_to_read=True, overwrite=True) # Write IDs if outroot is not None: out_dict = dict(pix=cut_tcent, IDs=best_dict['IDs']) jdict = ltu.jsonify(out_dict) ltu.savejson(outroot + '.json', jdict, easy_to_read=True, overwrite=True) print("Wrote: {:s}".format(outroot + '.json')) # Plot if outroot is not None: arcl_plots.match_qa(spec, cut_tcent, best_dict['line_list'], best_dict['IDs'], best_dict['scores'], outroot + '.pdf') print("Wrote: {:s}".format(outroot + '.pdf')) # Fit final_fit = None if do_fit: # Read in Full NIST Tables full_NIST = arcl_io.load_line_lists(lines, NIST=True) # KLUDGE!!!!! keep = full_NIST['wave'] > 8800. line_lists = vstack([line_lists, full_NIST[keep]]) # NIST_lines = line_lists['NIST'] > 0 ifit = np.where(best_dict['mask'])[0] if outroot is not None: plot_fil = outroot + '_fit.pdf' else: plot_fil = None # Purge UNKNOWNS from ifit imsk = np.array([True] * len(ifit)) for kk, idwv in enumerate(np.array(best_dict['IDs'])[ifit]): if np.min(np.abs(line_lists['wave'][NIST_lines] - idwv)) > 0.01: imsk[kk] = False ifit = ifit[imsk] # Allow for weaker lines in the fit all_tcent, weak_cut_tcent, icut = arch_utils.arc_lines_from_spec( spec, min_ampl=lowest_ampl) add_weak = [] for weak in weak_cut_tcent: if np.min(np.abs(cut_tcent - weak)) > 5.: add_weak += [weak] if len(add_weak) > 0: cut_tcent = np.concatenate([cut_tcent, np.array(add_weak)]) # Fit final_fit = arch_fit.iterative_fitting(spec, cut_tcent, ifit, np.array( best_dict['IDs'])[ifit], line_lists[NIST_lines], disp, plot_fil=plot_fil, verbose=verbose, aparm=fit_parm) if plot_fil is not None: print("Wrote: {:s}".format(plot_fil)) # Return return best_dict, final_fit
def general(spec, lines, min_ampl=300., outroot=None, debug=False, do_fit=True, verbose=False, fit_parm=None, lowest_ampl=200.): """ Parameters ---------- spec lines siglev min_ampl outroot debug do_fit verbose fit_parm min_nmatch lowest_ampl Returns ------- best_dict : dict final_fit : dict """ # imports from astropy.table import vstack from linetools import utils as ltu from arclines import plots as arcl_plots # Import the triangles algorithm from arclines.holy.patterns import triangles # Load line lists line_lists = arcl_io.load_line_lists(lines) unknwns = arcl_io.load_unknown_list(lines) npix = spec.size # Lines all_tcent, cut_tcent, icut = arch_utils.arc_lines_from_spec(spec, min_ampl=min_ampl) use_tcent = all_tcent.copy() #use_tcent = cut_tcent.copy() # min_ampl is having not effect at present # Best best_dict = dict(nmatch=0, ibest=-1, bwv=0., min_ampl=min_ampl) ngrid = 1000 # Loop on unknowns for unknown in [False, True]: if unknown: tot_list = vstack([line_lists,unknwns]) else: tot_list = line_lists wvdata = np.array(tot_list['wave'].data) # Removes mask if any wvdata.sort() sav_nmatch = best_dict['nmatch'] # Loop on pix_tol for pix_tol in [1.]:#, 2.]: # Triangle pattern matching dindex, lindex, wvcen, disps = triangles(use_tcent, wvdata, npix, 5, 10, pix_tol) # Remove any invalid results ww = np.where((wvcen > 0.0) & (disps > 0.0)) dindex = dindex[ww[0], :] lindex = lindex[ww[0], :] disps = disps[ww] wvcen = wvcen[ww] # Setup the grids and histogram binw = np.linspace(max(np.min(wvcen), np.min(wvdata)), min(np.max(wvcen), np.max(wvdata)), ngrid) bind = np.linspace(np.min(np.log10(disps)), np.max(np.log10(disps)), ngrid) histimg, xed, yed = np.histogram2d(wvcen, np.log10(disps), bins=[binw, bind]) histimg = gaussian_filter(histimg, 3) # Find the best combination of central wavelength and dispersion bidx = np.unravel_index(np.argmax(histimg), histimg.shape) debug = False if debug: from matplotlib import pyplot as plt plt.clf() plt.imshow(histimg[:, ::-1].T, extent=[binw[0], binw[-1], bind[0], bind[-1]], aspect='auto') plt.axvline(binw[bidx[0]], color='r', linestyle='--') plt.axhline(bind[bidx[1]], color='r', linestyle='--') plt.show() print(histimg[bidx], binw[bidx[0]], 10.0**bind[bidx[1]]) pdb.set_trace() # Find all good solutions nsel = 5 # Select all solutions around the best solution within a square of side 2*nsel wlo = binw[bidx[0] - nsel] whi = binw[bidx[0] + nsel] dlo = 10.0 ** bind[bidx[1] - 5*nsel] dhi = 10.0 ** bind[bidx[1] + 5*nsel] wgd = np.where((wvcen > wlo) & (wvcen < whi) & (disps > dlo) & (disps < dhi)) dindex = dindex[wgd[0], :].flatten() lindex = lindex[wgd[0], :].flatten() # Given this solution, fit for all detlines arch_patt.solve_triangles(use_tcent, wvdata, dindex, lindex, best_dict) if best_dict['nmatch'] > sav_nmatch: best_dict['pix_tol'] = pix_tol # Save linelist? if best_dict['nmatch'] > sav_nmatch: best_dict['bwv'] = binw[bidx[0]] best_dict['bdisp'] = 10.0**bind[bidx[1]] best_dict['line_list'] = tot_list.copy() best_dict['unknown'] = unknown best_dict['ampl'] = unknown # Try to pick up some extras by turning off/on unknowns if best_dict['unknown']: tot_list = line_lists else: tot_list = vstack([line_lists,unknwns]) # Retrieve the wavelengths of the linelist and sort wvdata = np.array(tot_list['wave'].data) # Removes mask if any wvdata.sort() if best_dict['nmatch'] == 0: print('---------------------------------------------------') print('Report:') print(':: No matches! Try another algorithm') print('---------------------------------------------------') return # Report print('---------------------------------------------------') print('Report:') print(':: Number of lines recovered = {:d}'.format(all_tcent.size)) print(':: Number of lines analyzed = {:d}'.format(use_tcent.size)) print(':: Number of acceptable matches = {:d}'.format(best_dict['nmatch'])) print(':: Best central wavelength = {:g}A'.format(best_dict['bwv'])) print(':: Best dispersion = {:g}A/pix'.format(best_dict['bdisp'])) print(':: Best solution used pix_tol = {}'.format(best_dict['pix_tol'])) print(':: Best solution had unknown = {}'.format(best_dict['unknown'])) print('---------------------------------------------------') # Write IDs if outroot is not None: out_dict = dict(pix=use_tcent, IDs=best_dict['IDs']) jdict = ltu.jsonify(out_dict) ltu.savejson(outroot+'.json', jdict, easy_to_read=True, overwrite=True) print("Wrote: {:s}".format(outroot+'.json')) # Plot if outroot is not None: tmp_list = vstack([line_lists, unknwns]) arcl_plots.match_qa(spec, use_tcent, tmp_list, best_dict['IDs'], best_dict['scores'], outroot+'.pdf') print("Wrote: {:s}".format(outroot+'.pdf')) # Fit final_fit = None if do_fit: # Good lines = NIST or OH good_lines = np.any([line_lists['NIST']>0, line_lists['ion'] == 'OH'], axis=0) # ifit = np.where(best_dict['mask'])[0] if outroot is not None: plot_fil = outroot+'_fit.pdf' else: plot_fil = None # Purge UNKNOWNS from ifit imsk = np.array([True]*len(ifit)) for kk, idwv in enumerate(np.array(best_dict['IDs'])[ifit]): if np.min(np.abs(line_lists['wave'][good_lines]-idwv)) > 0.01: imsk[kk] = False ifit = ifit[imsk] # Allow for weaker lines in the fit all_tcent, weak_cut_tcent, icut = arch_utils.arc_lines_from_spec(spec, min_ampl=lowest_ampl) use_weak_tcent = all_tcent.copy() add_weak = [] for weak in use_weak_tcent: if np.min(np.abs(use_tcent-weak)) > 5.: add_weak += [weak] if len(add_weak) > 0: use_tcent = np.concatenate([use_tcent, np.array(add_weak)]) # Fit final_fit = arch_fit.iterative_fitting(spec, use_tcent, ifit, np.array(best_dict['IDs'])[ifit], line_lists[good_lines], best_dict['bdisp'], plot_fil=plot_fil, verbose=verbose, aparm=fit_parm) if plot_fil is not None: print("Wrote: {:s}".format(plot_fil)) # Return return best_dict, final_fit
def load_low_redux(version, src_file, ions, plot=False, min_hist=10, cut_amp_val=400., wvmnx=[0., 1e9]): """ Parameters ---------- version : int 1 : Find extras from set of LowRedux fits src_file : str plot Returns ------- """ import warnings if version != 1: raise IOError("Unimplemented version!") import h5py from scipy.interpolate import interp1d from arclines.pypit_utils import find_peaks from arclines.io import load_line_lists # Load existing line lists line_list = load_line_lists(ions, skip=True) if line_list is None: # Should be a 'by scratch case' warnings.warn( "No line lists found matching your ions: {}".format(ions)) print("I hope you are building from scratch here..") return mk_src_dict() wvdata = line_list['wave'].data # Open hdf = h5py.File(src_path + src_file, 'r') mdict = {} for key in hdf['meta'].keys(): mdict[key] = hdf['meta'][key].value # Loop on spec extras = [] eamps = [] for ispec in range(mdict['nspec']): spec = hdf['arcs/' + str(ispec) + '/spec'].value wave = hdf['arcs/' + str(ispec) + '/wave'].value # vacuum disp = np.median(np.abs(wave - np.roll(wave, 1))) npix = wave.size # Find peaks for extras tampl, tcent, twid, w, yprep = find_peaks(spec) all_tcent = tcent[w] # Function for more precise wavelengths fwv = interp1d(np.arange(npix), wave) #, kind='cubic') # amps = [] for itc in all_tcent: pix = int(np.round(itc)) amps.append(np.max(spec[pix - 1:pix + 2])) amps = np.array(amps) # Trim tcent on amplitude cut_amp = amps > cut_amp_val # 500. tcent = all_tcent[cut_amp] nlin = tcent.size # init with Truth for ii in range(nlin): wvt = float(fwv(tcent[ii])) mtw = np.where(np.abs(wvdata - wvt) < 4 * disp)[0] # Deals with bad wavelength solutions if len(mtw) == 0: if (wvt > wvmnx[0]) & (wvt < wvmnx[1]): # LRISb only extras.append(wvt) eamps.append(amps[ii]) # Repackage extras = np.array(extras) isort = np.argsort(extras) extras = extras[isort] eamps = np.array(eamps)[isort] # Group -- Super-crude friends of friends final_extras = [] final_amps = [] cnt = 0 while cnt <= extras.size: # First try ingroup = np.abs(extras - extras[cnt]) < 2 * disp for ii in range(3): # For some convergence # Median mngroup = np.median(extras[ingroup]) ingroup = np.abs(extras - mngroup) < 2 * disp if np.sum(ingroup) > min_hist: final_extras.append(np.median(extras[ingroup])) final_amps.append(np.median(eamps[ingroup])) # Step newe = mngroup + 5 * disp gdcnt = np.where(extras > newe)[0] if len(gdcnt) == 0: break else: cnt = gdcnt[0] # Table U_lines = Table() U_lines['wave'] = final_extras U_lines['ion'] = str('UNKNWN').rjust(str_len_dict['ion']) U_lines['NIST'] = 0 U_lines['amplitude'] = final_amps # Find the best spectrum max_nex = 0 for ispec in range(mdict['nspec']): spec = hdf['arcs/' + str(ispec) + '/spec'].value wave = hdf['arcs/' + str(ispec) + '/wave'].value # vacuum minwv, maxwv = np.min(wave), np.max(wave) nex = np.sum((final_extras > minwv) & (final_extras < maxwv)) if nex > max_nex: svi = ispec max_nex = nex # Find pixel values spec = hdf['arcs/' + str(svi) + '/spec'].value wave = hdf['arcs/' + str(svi) + '/wave'].value # vacuum # Extras fpix = interp1d(wave, np.arange(npix)) #, kind='cubic') epix = fpix(final_extras) # Plot?? if plot: # Match to NIST mask, wv_match = arcl_utils.vette_unkwn_against_lists(U_lines, ions) npix = wave.size for ss, fex in enumerate(final_extras): if mask[ss] == 2: # Matched to NIST lbl = '{:.4f}'.format(fex) + ' [{:s}]'.format(wv_match[ss]) else: lbl = 'UNKNWN {:.4f}'.format(fex) pextras['IDs'].append(lbl) # Plot arcl_plots.arc_ids(spec, [], [], src_file.replace('.hdf5', '.pdf'), title=src_file.replace('.hdf5', ''), extras=pextras) # Return return mk_src_dict(U_lines=U_lines, epix=epix, spec=spec, wave=wave)