def fix_snr_measurements(db_file): con = sq3.connect(db_file) cur = con.cursor() sql_input = "SELECT * from Spectra inner join Events ON Spectra.SN = Events.SN" print 'Updating metadata step 2/7' bad_ivars = [] SN_Array = composite.grab(sql_input, multi_epoch=True, make_corr=False, selection='max_coverage', grab_all=True, db_file=db_file) for SN in SN_Array: nan_bool_flux = np.isnan(SN.flux) non_nan_data = np.where(nan_bool_flux == False) non_nan_data = np.array(non_nan_data[0]) if len(non_nan_data) > 0: x1 = non_nan_data[0] x2 = non_nan_data[-1] error = 1. / np.sqrt(SN.ivar[x1:x2]) snr = np.nanmedian(SN.flux[x1:x2] / error) # print SN.filename, SN.SNR, snr if not np.isnan(snr): cur.execute("UPDATE Spectra SET snr = ? where filename = ?", (snr, SN.filename)) con.commit()
def grab(query, multi_epoch=True, make_corr=False, selection='max_coverage', grab_all=False, verbose=False, db_file=None): """This function takes a SQL query and provides a list spectrum objects (defined in composite.py) satisfy this query. Args: query: The SQL query string Keyword Args: multi_epoch: If True, include all spectra for a given SN that satisify the query. If False, choose one 1 spectrum per SN based on the selection keyword. make_corr: If True, remove spectra that have been marked as 'questionable', peculiar events (Iax), and spectra that do not have host extinction estimates. selection: If multi_epoch is False, this string defines the selection criteria for choosing a single spectrum from a SN. Options are: 'maximum_coverage'(default): largest wavelength range 'maximum_coverage_choose_uv': largest wavelength range but prioritize hst and swift spectra 'choose_bluest': smallest minimum wavelength 'max_snr': highest signal to noise 'accurate_phase': closest to middle of the phase bin. TODO: implement this without parsing the query (currently requires uncommenting code) 'max_coverage_splice': allows multiple spectra from the same SN as long as overlap is < 500 A grab_all: If True, ignore other arguments and return all data that satisfy the SQL query. This also ignores metadata and sets a very basic list of spectrum attributes. Returns: An array of spectrum objects populated with metadata retrieved from the SQL query. """ if db_file is None: db_file = glob.glob('../data/*.db')[0] print 'Using: ' + db_file spec_array = composite.grab(query, multi_epoch=multi_epoch, make_corr=make_corr, grab_all=grab_all, db_file=db_file) spec_array = composite.prelim_norm(spec_array) if verbose: print "Name", "Filename", "Source", "SNR", "Phase", "MJD", "MJD_max", "z", "Host Morphology", "Minwave", "Maxwave" for spec in spec_array: print spec.name, spec.filename, spec.source, spec.SNR, spec.phase, spec.mjd, spec.mjd_max, spec.redshift, spec.ned_host, spec.wavelength[ spec.x1], spec.wavelength[spec.x2] return spec_array
def repair_bad_variance_spectra(db_file): con = sq3.connect(db_file) cur = con.cursor() sql_input = "SELECT * from Spectra inner join Events ON Spectra.SN = Events.SN" print 'querying' bad_ivars = [] SN_Array = composite.grab(sql_input, multi_epoch=True, make_corr=False, selection='max_coverage', grab_all=True) for SN in SN_Array: if len(np.where(np.isnan(SN.ivar))[0] == True) == 5500: bad_ivars.append(SN) for SN in bad_ivars: print SN.source if SN.source == 'bsnip': path = '../data/spectra/bsnip/' + SN.filename elif SN.source == 'cfa': path = '../data/spectra/cfa/' + SN.filename.split( '-')[0] + '/' + SN.filename elif SN.source == 'other': path = '../data/spectra/other/' + SN.filename elif SN.source == 'uv': path = '../data/spectra/uv/' + SN.filename spectrum = np.loadtxt(path) newdata, snr = prep.compprep(spectrum, SN.name, SN.redshift, SN.source, use_old_error=False, testing=False) try: interped = msg.packb(newdata) cur.execute("UPDATE Spectra SET snr = ? where filename = ?", (snr.value, SN.filename)) cur.execute( "UPDATE Spectra SET Interpolated_Spectra = ? where filename = ?", (buffer(interped), SN.filename)) print "Added: ", SN.filename except Exception, e: print "Interp failed: ", SN.filename
def sky_correlation(): test_query = " " query = "SELECT * FROM Supernovae " + test_query SN_Array = comp.grab(query) sky = pyfits.open('../personal/AdamSnyder/kecksky.fits') crval = sky[0].header['CRVAL1'] delta = sky[0].header['CDELT1'] skyflux = sky[0].data[0] start = crval stop = crval + ceil(len(skyflux) * delta) skywave = [(start + delta * i) for i in range(len(skyflux))] # Find wavelength overlap spline_rep = interpolate.splrep(skywave, skyflux) telluric_spec = np.loadtxt('../data/etc/telluric_spec.dat') telluric_spec = np.transpose(telluric_spec) tell_wave = telluric_spec[0] tell_flux = telluric_spec[1] spline_rep_tell = interpolate.splrep(skywave, skyflux) corr_dict = {} for SN in SN_Array: good = np.where((SN.wavelength >= 7550.) & (SN.wavelength <= 7650.)) # good = np.where((SN.wavelength >= skywave[0]) & (SN.wavelength <= skywave[-1])) sky_flux_interp = interpolate.splev(SN.wavelength[good], spline_rep_tell) # SN.sky_corr = np.corrcoef(SN.flux[good]*1./np.average(SN.flux[good]), sky_flux_interp)[0][1] SN.tell_corr = np.corrcoef( SN.flux[good] * 1. / np.average(SN.flux[good]), sky_flux_interp)[0][1] print SN.tell_corr for SN in SN_Array: if np.absolute(SN.tell_corr) > .4: plt.plot(SN.wavelength, SN.flux) plt.show()
# if bands[i] == 'I': # plt.plot(times[i], mags[i], 'o', label = bands[i]) plt.gca().invert_yaxis() plt.legend() plt.show() else: print 'Insufficient Data' PH_Array = fed.grab_phot_data("SELECT * FROM Photometry") print len(PH_Array) print PH_Array[1].name plot_light_curves(PH_Array[1].light_curves) SN_Array = comp.grab( "SELECT * from Supernovae inner join Photometry ON Supernovae.SN = Photometry.SN where velocity between -98 and -12", make_corr=False) print len(SN_Array) print SN_Array[1].name plot_light_curves(SN_Array[1].light_curves) #Photometric metadata: # (SN, RA, DEC, zCMB_salt, e_zCMB_salt, Bmag_salt, e_Bmag_salt, s_salt, e_s_salt, c_salt, e_c_salt, mu_salt, e_mu_salt, # zCMB_salt2, e_zCMB_salt2, Bmag_salt2, e_Bmag_salt2, x1_salt2, e_x1_salt2, c_salt2, e_c_salt2, mu_salt2, e_mu_salt2, # zCMB_mlcs31, e_zCMB_mlcs31, mu_mlcs31, e_mu_mlcs31, delta_mlcs31, e_delta_mlcs31, av_mlcs31, e_av_mlcs31, # zCMB_mlcs17, e_zCMB_mlcs17, mu_mlcs17, e_mu_mlcs17, delta_mlcs17, e_delta_mlcs17, av_mlcs17, e_av_mlcs17, # glon_host, glat_host, cz_host, czLG_host, czCMB_host, mtype_host, xpos_host, ypos_host, t1_host, filt_host, Ebv_host, # zCMB_lc, zhel_lc, mb_lc, e_mb_lc, c_lc, e_c_lc, x1_lc, e_x1_lc, logMst_lc, e_logMst_lc, tmax_lc, e_tmax_lc, cov_mb_s_lc, cov_mb_c_lc, cov_s_c_lc, bias_lc, # av_25, dm15_cfa, dm15_from_fits, separation, # Photometry)
def add_homogenized_photometry(db_file): csp_phot = '../data/info_files/CSP_phot.txt' phot_dict = {} with open(csp_phot) as csp: lines = csp.readlines() for line in lines[1:]: name = line.split()[0].lower()[2:] date = float(line.split()[1]) band = line.split()[2] mag = float(line.split()[3]) mag_err = float(line.split()[4]) if name not in phot_dict.keys(): phot_dict[name] = {} phot_dict[name][band] = [[], [], [], 'CSP'] phot_dict[name][band][0].append(date) phot_dict[name][band][1].append(mag) phot_dict[name][band][2].append(mag_err) else: if band in phot_dict[name].keys(): phot_dict[name][band][0].append(date) phot_dict[name][band][1].append(mag) phot_dict[name][band][2].append(mag_err) else: phot_dict[name][band] = [[], [], [], 'CSP'] phot_dict[name][band][0].append(date) phot_dict[name][band][1].append(mag) phot_dict[name][band][2].append(mag_err) for name in phot_dict.keys(): for band in phot_dict[name].keys(): mjd_order = np.argsort(np.asarray(phot_dict[name][band][0])) phot_dict[name][band][0] = [ phot_dict[name][band][0][i] for i in mjd_order ] phot_dict[name][band][1] = [ phot_dict[name][band][1][i] for i in mjd_order ] phot_dict[name][band][2] = [ phot_dict[name][band][2][i] for i in mjd_order ] con = sq3.connect(db_file) cur = con.cursor() cur.execute('PRAGMA TABLE_INFO({})'.format("Events")) names = [tup[1] for tup in cur.fetchall()] # print names if 'Homogenized_Photometry' not in names: cur.execute( """ALTER TABLE Events ADD COLUMN Homogenized_Photometry TEXT""") sql_input = "SELECT * from Spectra inner join Events ON Spectra.SN = Events.SN" # print 'querying' SN_Array = composite.grab(sql_input, multi_epoch=False, make_corr=False, selection='max_coverage', grab_all=True) for SN in SN_Array: if SN.name in phot_dict.keys(): phot_blob = msg.packb(phot_dict[SN.name]) cur.execute( "UPDATE Events SET Homogenized_Photometry = ? where SN = ?", (buffer(phot_blob), SN.name)) else: cur.execute( "UPDATE Events SET Homogenized_Photometry = ? where SN = ?", (None, SN.name)) else: sql_input = "SELECT * from Spectra inner join Events ON Spectra.SN = Events.SN" # print 'querying' SN_Array = composite.grab(sql_input, multi_epoch=False, make_corr=False, selection='max_coverage', grab_all=True) for SN in SN_Array: if SN.name in phot_dict.keys(): phot_blob = msg.packb(phot_dict[SN.name]) cur.execute( "UPDATE Events SET Homogenized_Photometry = ? where SN = ?", (buffer(phot_blob), SN.name)) else: cur.execute( "UPDATE Events SET Homogenized_Photometry = ? where SN = ?", (None, SN.name)) con.commit()
def add_salt2_survey_ID_column(db_file): salt2 = ascii.read("../data/info_files/SALT2mu_fpan.fitres", delimiter=r'\s', guess=False) salt2_ID_dict = build_salt2_ID_dict(salt2) id_file = "../data/info_files/SURVEY.DEF" id_dict = {} with open(id_file) as file: id_lines = file.readlines() for line in id_lines: if not line.startswith('#') and len( line.split()) > 2 and line.split()[0] == 'SURVEY:': id_dict[line.split()[2]] = line.split()[1] sn_id_dict = {} for sn in salt2_ID_dict.keys(): sn_id_dict[sn] = id_dict[salt2_ID_dict[sn]] con = sq3.connect(db_file) cur = con.cursor() cur.execute('PRAGMA TABLE_INFO({})'.format("Events")) names = [tup[1] for tup in cur.fetchall()] # print names if 'salt2_phot_source' not in names: cur.execute("""ALTER TABLE Events ADD COLUMN salt2_phot_source TEXT""") sql_input = "SELECT * from Spectra inner join Events ON Spectra.SN = Events.SN" # print 'querying' SN_Array = composite.grab(sql_input, multi_epoch=False, make_corr=False, selection='max_coverage', grab_all=True) for SN in SN_Array: if SN.name in sn_id_dict.keys(): # print SN.name, sn_id_dict[SN.name] cur.execute( "UPDATE Events SET salt2_phot_source = ? where SN = ?", (sn_id_dict[SN.name], SN.name)) else: cur.execute( "UPDATE Events SET salt2_phot_source = ? where SN = ?", (None, SN.name)) else: sql_input = "SELECT * from Spectra inner join Events ON Spectra.SN = Events.SN" # print 'querying' SN_Array = composite.grab(sql_input, multi_epoch=False, make_corr=False, selection='max_coverage', grab_all=True) for SN in SN_Array: if SN.name in sn_id_dict.keys(): # print SN.name, sn_id_dict[SN.name] cur.execute( "UPDATE Events SET salt2_phot_source = ? where SN = ?", (sn_id_dict[SN.name], SN.name)) else: cur.execute( "UPDATE Events SET salt2_phot_source = ? where SN = ?", (None, SN.name)) con.commit()
file.write('\caption{CAPTION}\n') file.write('\label{tab:1}\n') file.write('\end{table*}') file.close() def write_ascii_table(filename, table, caption=None): with open('../../../Paper_Drafts/' + filename, 'w') as file: file.write(table) file.close() if __name__ == "__main__": SN_Array = composite.grab( "SELECT * from Spectra inner join Events ON Spectra.SN = Events.SN order by Spectra.SN", multi_epoch=True, make_corr=False, grab_all=True) tab_arr = [] refs = [] for SN in SN_Array: if SN.ref is not None: ref = SN.ref else: ref = 'Unknown' refs.append(ref) ref_set = sorted(set(refs), key=refs.index) ref_nums = [] for i in range(len(ref_set)):