def output_source_catalogue(output_folder): vo_files = glob.glob('day*/*_src_comp.vot') sources = None for vof in vo_files: sources = read_sources(vof, sources) # Write out the catalogue sources.meta['name'] = 'THOR Sources ' + str(datetime.date.today()) vot = votable.from_table(sources) vot.to_xml(output_folder + "/thor-sources.vot") vo_files = glob.glob('day*/*_src_isle.vot') islands = None for vof in vo_files: islands = read_sources(vof, islands) # Write out the catalogue islands.meta['name'] = 'THOR Islands ' + str(datetime.date.today()) vot = votable.from_table(islands) vot.to_xml(output_folder + "/thor-islands.vot") # Create a map by day of the islands isle_day_map = {} for isle in islands: day = isle['Day'] if day not in isle_day_map: isle_day_map[day] = [] day_list = isle_day_map[day] day_list.append(isle) return isle_day_map
def votable_format(ra, dec, radius, nearest, result): ztfdic = {} votable = result.text.encode(encoding='UTF-8') bio = io.BytesIO(votable) votable = parse(bio) table = parse_single_table(bio).to_table() if len(table) <= 0: ztfdic['0'] = 'not found' return ztfdic #'not found' tablas = table.group_by('oid') #the most close object to radius if nearest is True: minztf = id_nearest(ra, dec, radius, tablas) buf = io.BytesIO() votable = from_table(tablas.groups[minztf]) writeto(votable, buf) ztfdic[str(tablas.groups[minztf]['oid'][0])] = ( buf.getvalue().decode("utf-8")) return ztfdic # all objects in radius else: for group in tablas.groups: buf = io.BytesIO() votable = from_table(group) writeto(votable, buf) ztfdic[str(group['oid'][0])] = (buf.getvalue().decode("utf-8")) return ztfdic
def dict_tovot(tabdata, tabname='votable.xml', phot=False, binary=True): """ Converts dictionary table **tabdata** to a VOTable with name **tabname** Parameters ---------- tabdata: list SQL query dictionary list from running query_dict.execute() tabname: str The name of the VOTable to be created phot: bool Parameter specifying if the table contains photometry to be merged binary: bool Parameter specifying if the VOTable should be saved as a binary. This is necessary for tables with lots of text columns. """ # Check if input is a dictionary if not isinstance(tabdata[0], dict): raise TypeError('Table must be a dictionary. Call the SQL query with query_dict.execute()') # Create an empty table to store the data t = Table() colnames = tabdata[0].keys() # If this is a photometry table, parse it and make sure to have the full list of columns if phot: tabdata = photparse(tabdata) colnames = tabdata[0].keys() for i in range(len(tabdata)): tmpcol = tabdata[i].keys() for elem in tmpcol: if elem not in colnames: colnames.append(elem) # No need for band column any more try: colnames.remove('band') except ValueError: pass # Run through all the columns and create them for elem in colnames: table_add(t, tabdata, elem) # Output to a file print('Creating table...') votable = from_table(t) # Required in some cases (ie, for lots of text columns) if binary: votable.set_all_tables_format('binary') votable.to_xml(tabname) print('Table created: {}'.format(tabname))
def output_emission_spectra(filename, longitude, latitude, velocity, em_mean, em_std, ems): """ Write the emission spectrum (velocity, flux and opacity) to a votable format file. :param filename: The filename to be created :param longitude: The galactic longitude of the target object :param latitude: The galactic latitude of the target object :param velocity: :param em_mean: :param em_std: :param ems: """ table = Table(meta={'name': filename, 'id': 'emission'}) table.add_column(Column(name='velocity', data=velocity, unit='m/s')) table.add_column(Column(name='em_mean', data=em_mean, unit='K')) table.add_column(Column(name='em_std', data=em_std, unit='K')) for i in range(len(ems)): table.add_column(Column(name='em_'+str(i), data=ems[i].flux, unit='K')) votable = from_table(table) votable.infos.append(Info('longitude', 'longitude', longitude.value)) votable.infos.append(Info('latitude', 'latitude', latitude.value)) writeto(votable, filename)
def output_spectra(spectrum, opacity, filename, longitude, latitude, em_mean, em_std, temp_bright, beam_area, sigma_tau): """ Write the spectrum (velocity, flux and opacity) to a votable format file. :param spectrum: The spectrum to be output. :param opacity: The opacity to be output. :param filename: The filename to be created :param longitude: The galactic longitude of the target object :param latitude: The galactic latitude of the target object """ table = Table(meta={'name': filename, 'id': 'opacity'}) table.add_column(Column(name='plane', data=spectrum.plane)) table.add_column(Column(name='velocity', data=spectrum.velocity, unit='m/s')) table.add_column(Column(name='opacity', data=opacity)) table.add_column(Column(name='flux', data=spectrum.flux, unit='Jy', description='Flux per beam')) table.add_column(Column(name='temp_brightness', data=temp_bright, unit='K')) table.add_column(Column(name='sigma_tau', data=sigma_tau, description='Noise in the absorption profile')) if len(em_mean) > 0: # The emission may not be available, so don't include it if not table.add_column(Column(name='em_mean', data=em_mean, unit='K')) table.add_column(Column(name='em_std', data=em_std, unit='K')) votable = from_table(table) votable.infos.append(Info('longitude', 'longitude', longitude.value)) votable.infos.append(Info('latitude', 'latitude', latitude.value)) votable.infos.append(Info('beam_area', 'beam_area', beam_area)) writeto(votable, filename)
def get(self): get_vars = self.request.arguments ra = -1 dec = -1 radius = -1 if 'RA' in get_vars: ra = float(get_vars['RA'][0]) if 'DEC' in get_vars: dec = float(get_vars['DEC'][0]) if 'SR' in get_vars: radius = float(get_vars['SR'][0]) if min([ra,dec,radius])==-1: print("ERROR: Incorrect request") return result = newtable() for row in conesearch(ra, dec, radius): result.add_row(row) vo_out = votable.from_table(result) # Should not have to do this! Bad API for votable! vo_out.to_xml("tempfile.xml") outstring = "" with open("tempfile.xml","r") as vofile: for line in vofile: outstring+=line self.write(outstring)
def mkcol(file_in, file_out, ra_cen, dec_cen): #import the vot file t = parse_single_table(str(file_in)).to_table() print("reading the file: " + str(file_in)) print("please wait") #new column of SNR and area (in the unit of beam) t['SNR'] = t['peak_flux'] / t['local_rms'] t['area'] = t['int_flux'] / t['peak_flux'] #calculating separation c0 = SkyCoord(Angle(ra_cen * u.deg), Angle(dec_cen * u.deg), frame='fk5') seplist = [] for i in range(len(t)): c = SkyCoord(Angle(t['ra'][i] * u.deg), Angle(t['dec'][i] * u.deg), frame='fk5') sep = c.separation(c0) seplist.append(sep.degree) t['sep'] = seplist t['sep'].unit = 'deg' #saving file votable = from_table(t) writeto(votable, str(file_out)) print(str(file_out) + "saved successfully :)") print() return
def output_emission_spectra(filename, longitude, latitude, velocity, em_mean, em_std, ems): """ Write the emission spectrum (velocity, flux and opacity) to a votable format file. :param filename: The filename to be created :param longitude: The galactic longitude of the target object :param latitude: The galactic latitude of the target object :param velocity: :param em_mean: :param em_std: :param ems: """ table = Table(meta={'name': filename, 'id': 'emission'}) table.add_column( Column(name='velocity', data=velocity, unit='m/s', description='velocity relative to LSRK')) table.add_column(Column(name='em_mean', data=em_mean, unit='K')) table.add_column(Column(name='em_std', data=em_std, unit='K')) for i in range(len(ems)): table.add_column( Column(name='em_' + str(i), data=ems[i].flux, unit='K')) votable = from_table(table) votable.infos.append(Info('ra', 'longitude', longitude.value)) votable.infos.append(Info('dec', 'latitude', latitude.value)) writeto(votable, filename)
def make_Dias2014_cut_vot(): inpath = ( '/home/luke/local/tess-trex/catalogs/' 'Dias_2014_prob_gt_50_pct_vizier.vot') outpath = ('../data/cluster_data/' 'Dias_2014_prob_gt_50_pct_to_gaia_archive.vot') if os.path.exists(outpath): print('found {}, skipping'.format(outpath)) return tab = parse(inpath) t = tab.get_first_table().to_table() J_mag = np.array(t['Jmag']) Ks_mag = np.array(t['Kmag']) # is actually Ks G_mag_estimate = estimate_Gaia_G_given_2mass_J_Ks(J_mag, Ks_mag) okinds = np.isfinite(G_mag_estimate) d14_ucac4_ids = np.array(t['UCAC4'])[okinds] #TODO: check if J2015 transformation needed. ra_deg = np.array(t['RAJ2000'])[okinds] dec_deg = np.array(t['DEJ2000'])[okinds] import IPython; IPython.embed() outtab = Table() outtab['ra'] = ra_deg*u.deg outtab['dec'] = dec_deg*u.deg outtab['gmag_estimate'] = G_mag_estimate[okinds]*u.mag outtab['ucac_id'] = d14_ucac4_ids v_outtab = from_table(outtab) writeto(v_outtab, outpath) print('--> made {}'.format(outpath))
def mksub(file_in, file_out): #import the vot file t = parse_single_table(str(file_in)).to_table() print("reading the file: " + str(file_in)) print("the original file has " + str(len(t)) + " rows") print("please wait") dellist = [] #rule of making subset (edit here) for i in range(len(t)): if t['sep'][i] >= 10.0: dellist.append(i) del t[dellist] print(str(len(dellist)) + " rows deleted") print("the subset has " + str(len(t)) + " rows") #saving file votable = from_table(t) writeto(votable, str(file_out)) print("subset created :)") print() return
def find_j19_matches(gaskap_table, no_match_cat=None): print ('\nCross-matching with Jameson et al 2019', no_match_cat) j19_table = ascii.read('jameson2019.csv', format='csv') col_index = Column(name='index', data=1+np.arange(len(j19_table))) j19_table.add_column(col_index) coo_j19 = SkyCoord(j19_table['ra']*u.deg, j19_table['dec']*u.deg) coo_gaskap = SkyCoord(gaskap_table['ra'], gaskap_table['dec']) idx_j19, d2d_j19, d3d_j19 = coo_gaskap.match_to_catalog_sky(coo_j19) matched = d2d_j19 <= 18.5*u.arcsec # This cutoff allows for the widest separation without adding duplicates matched_j19_idx = idx_j19[matched] un_matched_j19_idx = [i for i in np.arange(len(j19_table)) if i not in matched_j19_idx] j19_unmatched = j19_table[un_matched_j19_idx] print ("Found {} sources in Jameson et al 2019 not in GASKAP data.".format(len(j19_unmatched))) coo_j19_unm = SkyCoord(j19_unmatched['ra']*u.deg, j19_unmatched['dec']*u.deg) idx_gaskap, d2d_gaskap, d3d_gaskap = coo_j19_unm.match_to_catalog_sky(coo_gaskap) close_gaskap_comp_names = gaskap_table[idx_gaskap]['comp_name'] col_closest = Column(name='closest_gaskap', data=close_gaskap_comp_names) col_gaskap_ra = Column(name='gaskap_ra', data=gaskap_table[idx_gaskap]['ra']) col_gaskap_dec = Column(name='gaskap_dec', data=gaskap_table[idx_gaskap]['dec']) col_sep = Column(name='gaskap_sep', data=d2d_gaskap.to(u.arcsec)) j19_unmatched.add_columns([col_closest, col_gaskap_ra, col_gaskap_dec, col_sep]) if no_match_cat: print (j19_unmatched) j19_unm_vo_table = from_table(j19_unmatched) writeto(j19_unm_vo_table, no_match_cat) return j19_table, idx_j19, d2d_j19, matched, j19_unmatched
def output_spectra(spectrum, opacity, filename, longitude, latitude, em_mean, em_std, temp_bright, beam_area, sigma_tau, sigma_tau_smooth, mean): """ Write the spectrum (velocity, flux and opacity) to a votable format file. :param spectrum: The spectrum to be output. :param opacity: The opacity to be output. :param filename: The filename to be created :param longitude: The galactic longitude of the target object :param latitude: The galactic latitude of the target object """ table = Table(meta={'name': filename, 'id': 'opacity'}) table.add_column(Column(name='plane', data=spectrum.plane)) table.add_column( Column(name='velocity', data=spectrum.velocity, unit='m/s', description='velocity relative to LSRK')) table.add_column(Column(name='opacity', data=opacity)) #following smooth should be using same parameter with line 950 hann_window = np.hanning(31) hann_kernel = CustomKernel(hann_window) smooth_y = convolve(opacity, hann_kernel, boundary='extend') table.add_column( Column(name='smooth_opacity', data=smooth_y, description='opacity smooth with hanning window 31 channels.')) table.add_column( Column(name='flux', data=spectrum.flux, unit='Jy', description='Flux per beam')) table.add_column(Column(name='temp_brightness', data=temp_bright, unit='K')) table.add_column( Column( name='sigma_tau', data=sigma_tau, description='Noise in the absorption profile, before smoothing')) table.add_column( Column(name='sigma_tau_smooth', data=sigma_tau_smooth, description='Noise in the absorption profile, after smoothing')) if len(em_mean) > 0: # The emission may not be available, so don't include it if not table.add_column(Column(name='em_mean', data=em_mean, unit='K')) table.add_column(Column(name='em_std', data=em_std, unit='K')) votable = from_table(table) votable.infos.append(Info('ra', 'longitude', longitude.value)) votable.infos.append(Info('dec', 'latitude', latitude.value)) votable.infos.append(Info('beam_area', 'beam_area', beam_area)) votable.infos.append(Info('cont', 'continuum', mean)) writeto(votable, filename)
def get_image_params_table(dest_folder, sbid, targets, beams, beam_locs, max_sep_close=0.55 * u.deg, max_sep_far=0.8 * u.deg): comp_names = [] comp_ra = [] comp_dec = [] included_beam_nums = [] included_beam_interleaves = [] included_beam_ids = [] included_beam_sep = [] for tgt in targets: target_loc = SkyCoord(ra=tgt['ra_deg_cont'] * u.degree, dec=tgt['dec_deg_cont'] * u.degree, frame='icrs') src_beams, src_beam_sep = get_beams_near_src(target_loc, beam_locs, beams, max_sep=max_sep_close) if len(src_beams) == 0: #print ("No beams close to {}, checking out to {}".format(tgt['component_name'], max_sep_far)) src_beams, src_beam_sep = get_beams_near_src(target_loc, beam_locs, beams, max_sep=max_sep_far) for i in range(len(src_beams)): comp_names.append(tgt['component_name']) comp_ra.append(tgt['ra_deg_cont']) comp_dec.append(tgt['dec_deg_cont']) included_beam_nums.append(src_beams['beam_id'].data[i]) included_beam_interleaves.append(src_beams['interleave'].data[i]) included_beam_ids.append(src_beams['beam_id'].data[i] + src_beams['interleave'].data[i]) included_beam_sep.append(src_beam_sep.to(u.deg).value[i]) image_params = Table() image_params['component_name'] = comp_names image_params['comp_ra'] = comp_ra image_params['comp_dec'] = comp_dec image_params['beam_nums'] = included_beam_nums image_params['beam_interleaves'] = included_beam_interleaves image_params['beam_ids'] = included_beam_ids image_params['beam_sep'] = included_beam_sep image_params_vot = votable.from_table(image_params) filename = "{}/sb{}_srcs_image_params.vot".format(dest_folder, sbid) votable.writeto(image_params_vot, filename) print("Produced VO table file {} with {} target beam combos.".format( filename, len(image_params))) return image_params
def output_field_catalogue(fields, used_fields, output_folder): """ Write out a catalogue of the fields observed under the MAGMO project with some basic stats for each field. :param fields: The fields to be written. :param used_fields: An aray of field ids which had spectra which were used. :return: None """ rows = len(fields) days = np.zeros(rows, dtype=int) field_names = np.empty(rows, dtype=object) longitudes = np.zeros(rows) latitudes = np.zeros(rows) max_fluxes = np.zeros(rows) sn_ratios = np.zeros(rows) strong = np.empty(rows, dtype=bool) used = np.empty(rows, dtype=bool) duplicate = np.empty(rows, dtype=bool) i = 0 for field in fields: days[i] = int(field.day) field_names[i] = field.name longitudes[i] = field.longitude latitudes[i] = field.latitude max_fluxes[i] = field.max_flux sn_ratios[i] = field.sn_ratio sn_ratios[i] = field.sn_ratio strong[i] = True if field.used == 'Y' else False used[i] = field.get_field_id() in used_fields duplicate[i] = field.duplicate i += 1 coords = SkyCoord(longitudes, latitudes, frame='galactic', unit="deg") fields_table = Table([ days, field_names, longitudes, latitudes, max_fluxes, sn_ratios, strong, used, duplicate, coords.icrs.ra.degree, coords.icrs.dec.degree ], names=[ 'Day', 'Field', 'Longitude', 'Latitude', 'Max_Flux', 'SN_Ratio', 'Strong', 'Used', 'Duplicate', 'ra', 'dec' ], meta={ 'ID': 'thor_fields', 'name': 'THOR Fields ' + str(datetime.date.today()) }) votable = from_table(fields_table) filename = output_folder + '/thor-fields.vot' writeto(votable, filename) print("Wrote out", i, "fields to", filename)
def make_votable_given_source_ids(source_ids, outpath=None): t = Table() t['source_id'] = source_ids votable = from_table(t) writeto(votable, outpath) print('made {}'.format(outpath)) return outpath
def make_votable_given_ids(gaiaids, outpath='../data/rms_vs_mag/proj1301_gaiaids.xml'): t = Table() t['source_id'] = gaiaids.astype(int) votable = from_table(t) writeto(votable, outpath) print('made {}'.format(outpath)) return outpath
def output_emission_spectrum(source, comp_name, velocity, em_mean, em_std, filename): title = 'Primary beam emission for source #{} {}'.format(source['id'], comp_name) em_out_tab = Table(meta={'name': title}) em_out_tab.add_column(Column(name='velocity', data=velocity, unit='m/s', description='LSRK velocity')) em_out_tab.add_column(Column(name='pb_em_mean', data=em_mean, unit='K', description='Mean brightness temperature in the primary beam')) em_out_tab.add_column(Column(name='pb_em_std', data=em_std, unit='K', description='Noise level in the brightness temperature in the primary beam')) votable = from_table(em_out_tab) votable.params.append(Param(votable, id='id', value=source['id'], datatype='int')) votable.params.append(Param(votable, id='comp_name', value=comp_name, datatype='char', arraysize='*')) votable.params.append(Param(votable, id='ra', value=source['ra'], unit='deg', datatype='double')) votable.params.append(Param(votable, id='dec', value=source['dec'], unit='deg', datatype='double')) writeto(votable, filename)
def compare_strasser_2007(magmo_gas): print("## Comparing with Strasser et al 2007 ##") # Read in s07 data s07_files = glob.glob('../strasser2007/*.dat') s07_data = None for file_name in s07_files: data = ascii.read(file_name) spin_temp = np.array(data['Ts']) subset = data[spin_temp > 0] if s07_data is None: s07_data = subset else: s07_data = vstack([s07_data, subset]) s07_spin_temp = s07_data['Ts'] s07_spin_temp = s07_spin_temp[s07_spin_temp<1E4] print("Sample had {} values, mean {:0.3f}, median {:0.3f}, sd {:0.3f}".format(len(s07_spin_temp), np.mean(s07_spin_temp), np.median(s07_spin_temp), np.std(s07_spin_temp))) votable = from_table(s07_data) writeto(votable, 'strasser_2007_spin.vot') fig = plt.figure(figsize=(7.5, 3)) gs = matplotlib.gridspec.GridSpec(1, 2) gas_array = magmo_gas # Spin Temperature ax1 = fig.add_subplot(gs[0, 0]) sample = np.ma.array(gas_array['temp_spin']).compressed() bins = np.linspace(0,450,19) hist, edges = build_hist_fraction(sample, bins, 450) ax1.step(edges, hist) s07_sample = np.array(s07_data['Ts']) s07_sample = s07_sample[s07_sample<1E4] hist, edges = build_hist_fraction(s07_sample, bins, 450) ax1.step(edges, hist, color='black', ls='--') ax1.set_xlabel('Spin Temperature (K)') ax1.set_ylabel('Fraction of components') statistic, p_value = stats.ks_2samp(np.ma.filled(sample), np.ma.filled(s07_sample)) print ('Spin temp population similarity p_value={}'.format(p_value)) gs.update(wspace=0.5, hspace=0.5) filename = 'magmo-strasser_2007_comp.pdf' plt.savefig(filename, bbox_inches="tight") plt.close() return
def __init__(self, content_path, format="pandas", ok=True): if format == "pandas": with open(content_path) as f: expected_result = str.encode(f.read()) else: t = Table.read(content_path, format="csv") votable = from_table(t) f = io.BytesIO() votable.to_xml(f) f.seek(0) expected_result = f.read() self.content = expected_result self.ok = ok
def output_j19_comparison(sbid, gaskap_table, j19_table, idx_j19, d2d_j19, j19_match, j19_unmatched, title, filename, match_cat=None): print (title, filename) gaskap_targets = gaskap_table[j19_match] j19_targets = j19_table[idx_j19] j19_targets = j19_targets[j19_match] sort_order = gaskap_targets.argsort(['comp_name']) #comp_names = sorted(targets['comp_name']) gaskap_tgt_ordered = gaskap_targets[sort_order] j19_tgt_ordered = j19_targets[sort_order] with open(filename, 'w') as f: output_header(f, title) for rating in 'ABCDEF': mask = gaskap_tgt_ordered['rating']==rating subset = gaskap_tgt_ordered[mask] j19_subset = j19_tgt_ordered[mask] print('Rating {} has {} spectra'.format(rating, len(subset))) output_block_title(f, rating, rating=='A', len(subset)) for idx, gaskap_src in enumerate(subset): gaskap_name = gaskap_src['comp_name'] j19_name = j19_subset[idx]['Source'] output_j19_img(f, gaskap_name, j19_name, rating) # Add a section for missed spectra output_block_title(f, None, False, len(j19_unmatched)) for row in j19_unmatched: gaskap_name = row['closest_gaskap'] j19_name = row['Source'] output_j19_img(f, gaskap_name, j19_name, rating, sep=row['gaskap_sep']) output_footer(f) if match_cat: augmented_table = Table(gaskap_tgt_ordered) close_j19_comp_names = j19_tgt_ordered['Source'] col_closest = Column(name='closest_j19', data=close_j19_comp_names) col_gaskap_ra = Column(name='j19_ra', data=j19_tgt_ordered['ra']*u.deg) col_gaskap_dec = Column(name='j19_dec', data=j19_tgt_ordered['dec']*u.deg) sep_vals = d2d_j19[j19_match] sep_vals_sorted = sep_vals[sort_order] col_sep = Column(name='j19_sep', data=sep_vals_sorted.to(u.arcsec)) augmented_table.add_columns([col_closest, col_gaskap_ra, col_gaskap_dec, col_sep]) #print (augmented_table) j19_match_vo_table = from_table(augmented_table) writeto(j19_match_vo_table, match_cat)
def output_single_phase_catalogue(spectra): """ Create a catalogue of the spin temperature of each channel of each spectrum based on a naive single phase assumption. :param spectra: The list of all spectra :return: None """ spectra_by_long = sorted(spectra, key=lambda spectrum: spectrum.longitude) longitudes = [] latitudes = [] velocities = [] emission_temps = [] opacities = [] spin_temperatures = [] for spectrum in spectra_by_long: for i in range(len(spectrum.velocity)): if spectrum.em_temps[i] > 0: longitudes.append(spectrum.longitude) latitudes.append(spectrum.latitude) velocities.append(spectrum.velocity[i]) emission_bright_temp = spectrum.em_temps[i] emission_temps.append(emission_bright_temp) opacities.append(spectrum.opacities[i]) spin_t = None if emission_bright_temp: spin_t = emission_bright_temp / spectrum.opacities[i] spin_temperatures.append(spin_t) temp_table = Table( [ longitudes, latitudes, velocities, spin_temperatures, emission_temps, opacities ], names=[ 'Longitude', 'Latitude', 'Velocity', 'Spin_Temp', 'Emission_Bright_Temp', 'Opacity' ], meta={ 'ID': 'thor_single_phase_spin_temp', 'name': 'THOR 1P Spin Temp ' + str(datetime.date.today()) }) votable = from_table(temp_table) filename = "thor-1p-temp.vot" writeto(votable, filename) print("Wrote out", len(spin_temperatures), "channel temperatures to", filename)
def make_votable_given_cols(name, assoc, RA, dec, pm_RA, pm_dec, outpath=None): t = Table() t['name'] = name.astype(str) t['assoc'] = assoc.astype(str) t['ra'] = RA*u.deg t['dec'] = dec*u.deg t['pm_ra'] = pm_RA*(u.mas/u.year) t['pm_dec'] = pm_dec*(u.mas/u.year) votable = from_table(t) writeto(votable, outpath) print('made {}'.format(outpath)) return outpath
def write_table(tab, filename): """ Write a VOTable using a binary format. parameters ---------- tab : `astropy.table.Table` The table to be written filename : str The output filename. Should end with .xml or .vot """ vot = from_table(tab) vot.set_all_tables_format('binary') vot.to_xml(filename) print("Wrote {0}".format(filename)) return
def outputSpectra(spectrum, opacity, filename, longitude): """ Write the spectrum (velocity, flux and opacity) to a votable format file. :param spectrum: The spectrum to be output. :param opacity: The opacity to be output. :param filename: The filename to be created :param longitude: The galactic longitude of the target object """ table = Table([spectrum.plane, spectrum.velocity, opacity, spectrum.flux], names=('plane', 'velocity', 'opacity', 'flux'), meta={'name': 'Opacity', 'id' : 'opacity'}) votable = from_table(table) votable.infos.append(Info('longitude', 'longitude', longitude)) #votable.params.append(Param(votable, 'longitude', 'longitude', longitude, # datatype='char', arraysize=str(len(str(longitude))))) writeto(votable, filename)
def output_decomposition_catalogue(folder, spectra, data, data_decomposed, alpha1, alpha2): names = [] days = [] field_names = [] sources = [] longitudes = [] latitudes = [] cont_sd = [] residual_rms = [] ratings = [] num_comps = [] for i in range(len(data_decomposed['fwhms_fit'])): spectrum = spectra[data['spectrum_idx'][i]] names.append(spectrum['Name']) days.append(int(spectrum['Day'])) field_names.append(spectrum['Field']) sources.append(spectrum['Source']) longitudes.append(spectrum['Longitude']) latitudes.append(spectrum['Latitude']) ratings.append(spectrum['Rating']) cont_sd.append(spectrum['Continuum_SD']) fit_fwhms = data_decomposed['fwhms_fit'][i] fit_means = data_decomposed['means_fit'][i] fit_amps = data_decomposed['amplitudes_fit'][i] num_comps.append(len(fit_fwhms)) velo = data['x_values'][i] # opacity = convert_to_ratio(data['data_list'][i]) residual = calc_residual(velo, data['data_list'][i], fit_amps, fit_fwhms, fit_means) residual_rms.append(np.sqrt(np.mean(np.square(residual)))) temp_table = Table( [names, days, field_names, sources, longitudes, latitudes, residual_rms, ratings, num_comps, cont_sd], names=['Spectra_Name', 'Day', 'Field', 'Source', 'Longitude', 'Latitude', 'Residual_RMS', 'Rating', 'Num_Comp', 'Continuum_SD'], meta={'ID': 'magmo_decomposition', 'name': 'MAGMO Decomposition ' + str(datetime.date.today()), 'alpha1' : alpha1, 'alpha2' : alpha2}) votable = from_table(temp_table) filename = folder + "/magmo-decomposition.vot" writeto(votable, filename)
def output_metrics_xml(reporter, dest_folder): titles = [] descs = [] values = [] statuses = [] for metric in reporter.metrics: # title, description, value, status titles.append(metric.title) descs.append(metric.description) values.append(metric.value) statuses.append(metric.status) temp_table = Table([titles, descs, values, statuses], names=[ 'metric_name', 'metric_description', 'metric_value', 'metric_status' ], dtype=['str', 'str', 'double', 'int']) votable = from_table(temp_table) table = votable.get_first_table() if reporter.project: table.params.append( Param(votable, name="project", datatype="char", arraysize="*", value=reporter.project)) if reporter.sbid: table.params.append( Param(votable, name="sbid", datatype="char", arraysize="*", value=reporter.sbid)) table.get_field_by_id('metric_name').datatype = 'char' table.get_field_by_id('metric_description').datatype = 'char' table.get_field_by_id('metric_status').datatype = 'int' filename = dest_folder + '/gaskap-metrics.xml' writeto(votable, filename) return
def writer(filename, catalog, fmt=None): """ construct a dict of the data this method preserves the data types in the VOTable """ tab_dict = {} name_list = [] for name in catalog[0].names: col_name = name if catalog[0].galactic: if name.startswith('ra'): col_name = 'lon' + name[2:] elif name.endswith('ra'): col_name = name[:-2] + 'lon' elif name.startswith('dec'): col_name = 'lat' + name[3:] elif name.endswith('dec'): col_name = name[:-3] + 'lat' col_name = pre + col_name tab_dict[col_name] = [getattr(c, name, None) for c in catalog] name_list.append(col_name) t = Table(tab_dict, meta=meta) # re-order the columns t = t[[n for n in name_list]] if fmt is not None: if fmt in ["vot", "vo", "xml"]: vot = from_table(t) # description of this votable vot.description = repr(meta) writetoVO(vot, filename) elif fmt in ['hdf5']: t.write(filename, path='data', overwrite=True) elif fmt in ['fits']: writeFITSTable(filename, t) else: ascii.write(t, filename, fmt, overwrite=True) else: ascii.write(t, filename, overwrite=True) return
def writer(filename, catalog, fmt=None): """ construct a dict of the data this method preserves the data types in the VOTable """ tab_dict = {} name_list = [] for name in catalog[0].names: col_name = name if catalog[0].galactic: if name.startswith('ra'): col_name = 'lon'+name[2:] elif name.endswith('ra'): col_name = name[:-2] + 'lon' elif name.startswith('dec'): col_name = 'lat'+name[3:] elif name.endswith('dec'): col_name = name[:-3] + 'lat' col_name = pre + col_name tab_dict[col_name] = [getattr(c, name, None) for c in catalog] name_list.append(col_name) t = Table(tab_dict, meta=meta) # re-order the columns t = t[[n for n in name_list]] if fmt is not None: if fmt in ["vot", "vo", "xml"]: vot = from_table(t) # description of this votable vot.description = repr(meta) writetoVO(vot, filename) elif fmt in ['hdf5']: t.write(filename, path='data', overwrite=True) elif fmt in ['fits']: writeFITSTable(filename, t) else: ascii.write(t, filename, fmt, overwrite=True) else: ascii.write(t, filename, overwrite=True) return
def handleSiaRequest(posString, sizeString, formatString): info = 'You asked for POS = {}, SIZE = {}, FORMAT = {}'.format( posString, sizeString, formatString) ra, dec = parsePos(posString) size = parseSize(sizeString) if ra is None: return Response(status="Invalid RA from POS: " + posString) elif dec is None: return Response(status="Invalid Dec from POS: " + posString) elif size[0] is None: return Response(status="Invalid Size from SIZE: " + sizeString) queryRegion = makeQueryRegion(ra, dec, size) # Get the image database table db = getDb() # Remove rows that don't match the query. intersectedTable = findIntersectedRows(queryRegion, db) dbvotable = from_table(intersectedTable) # Get rid of the fixed size arrays fixArraysize(dbvotable) # Set the UCDs for the columns. mapUcds(dbvotable) # Write the VOTABLE xml to a string, then use that to create the response object with # content-type of xml. xmlStringObj = StringIO.StringIO() dbvotable.to_xml(xmlStringObj) xmlString = xmlStringObj.getvalue() resp = Response(xmlString, mimetype='text/xml') return resp
def output_spectra(spectrum, opacity, filename, longitude, latitude, em_mean, em_std, temp_bright, beam_area, sigma_tau, src_id): """ Write the spectrum (velocity, flux and opacity) to a votable format file. :param spectrum: The spectrum to be output. :param opacity: The opacity to be output. :param filename: The filename to be created :param longitude: The galactic longitude of the target object :param latitude: The galactic latitude of the target object """ table = Table(meta={'name': filename, 'id': 'opacity'}) table.add_column(Column(name='plane', data=spectrum.plane)) table.add_column( Column(name='velocity', data=spectrum.velocity, unit='m/s')) table.add_column(Column(name='opacity', data=opacity)) table.add_column( Column(name='flux', data=spectrum.flux, unit='Jy', description='Flux per beam')) table.add_column(Column(name='temp_brightness', data=temp_bright, unit='K')) table.add_column( Column(name='sigma_tau', data=sigma_tau, description='Noise in the absorption profile')) if len(em_mean) > 0: # The emission may not be available, so don't include it if not table.add_column(Column(name='em_mean', data=em_mean, unit='K')) table.add_column(Column(name='em_std', data=em_std, unit='K')) votable = from_table(table) votable.infos.append(Info('longitude', 'longitude', longitude.value)) votable.infos.append(Info('latitude', 'latitude', latitude.value)) votable.infos.append(Info('beam_area', 'beam_area', beam_area)) votable.infos.append(Info('src_gname', 'gname', src_id)) writeto(votable, filename)
def handleSiaRequest(posString, sizeString, formatString): info = "You asked for POS = {}, SIZE = {}, FORMAT = {}".format(posString, sizeString, formatString) ra, dec = parsePos(posString) size = parseSize(sizeString) if ra is None: return Response(status="Invalid RA from POS: " + posString) elif dec is None: return Response(status="Invalid Dec from POS: " + posString) elif size[0] is None: return Response(status="Invalid Size from SIZE: " + sizeString) queryRegion = makeQueryRegion(ra, dec, size) # Get the image database table db = getDb() # Remove rows that don't match the query. intersectedTable = findIntersectedRows(queryRegion, db) dbvotable = from_table(intersectedTable) # Get rid of the fixed size arrays fixArraysize(dbvotable) # Set the UCDs for the columns. mapUcds(dbvotable) # Write the VOTABLE xml to a string, then use that to create the response object with # content-type of xml. xmlStringObj = StringIO.StringIO() dbvotable.to_xml(xmlStringObj) xmlString = xmlStringObj.getvalue() resp = Response(xmlString, mimetype="text/xml") return resp
def writer(filename, catalog, fmt=None): # construct a dict of the data # this method preserves the data types in the VOTable tab_dict = {} for name in catalog[0].names: tab_dict[name] = [getattr(c, name, None) for c in catalog] t = Table(tab_dict, meta=meta) # re-order the columns t = t[[n for n in catalog[0].names]] if fmt is not None: if fmt in ["vot", "vo", "xml"]: vot = from_table(t) # description of this votable vot.description = repr(meta) writetoVO(vot, filename) elif fmt in ['hdf5']: t.write(filename, path='data', overwrite=True) elif fmt in ['fits']: writeFITSTable(filename, t) else: ascii.write(t, filename, fmt) else: ascii.write(t, filename) return
def writer(filename, catalog, fmt=None): # construct a dict of the data # this method preserves the data types in the VOTable tab_dict = {} for name in catalog[0].names: tab_dict[name] = [getattr(c, name, None) for c in catalog] t = Table(tab_dict,meta=meta) # re-order the columns t = t[[n for n in catalog[0].names]] if fmt is not None: if fmt in ["vot", "vo", "xml"]: vot = from_table(t) # description of this votable vot.description = repr(meta) writetoVO(vot, filename) elif fmt in ['hdf5']: t.write(filename,path='data',overwrite=True) elif fmt in ['fits']: writeFITSTable(filename,t) else: ascii.write(t, filename, fmt) else: ascii.write(t, filename) return
def savevot( table, output_name ): writeto(from_table(table), str(output_name)) print(str(output_name) + "saved in votable successfully!") return
def output_component_catalogue(spectra, data, data_decomposed, folder): names = [] comp_names = [] days = [] field_names = [] sources = [] longitudes = [] latitudes = [] amps = [] fwhms = [] means = [] best_fit_rchi2s = [] amps_fit_errs = [] fwhms_fit_errs = [] means_fit_errs = [] num_no_comps = {} for i in range(len(data_decomposed['fwhms_fit'])): if i >= len(data['spectrum_idx']): print("Error: data index of %d is invalid for data array of len %d" % ( i, len(data['spectrum_idx']))) spectrum_idx = data['spectrum_idx'][i] if spectrum_idx >= len(spectra): print("Error: spectra index of %d at row %d is invalid for spectra array of len %d" % ( spectrum_idx, i, len(spectra))) spectrum = spectra[spectrum_idx] fit_fwhms = data_decomposed['fwhms_fit'][i] fit_means = data_decomposed['means_fit'][i] fit_amps = data_decomposed['amplitudes_fit'][i] best_fit_rchi2 = data_decomposed['best_fit_rchi2'][i] means_fit_err = data_decomposed['means_fit_err'][i] fwhms_fit_err = data_decomposed['fwhms_fit_err'][i] amplitudes_fit_err = data_decomposed['amplitudes_fit_err'][i] if len(fit_amps) > 0.: for j in range(len(fit_amps)): days.append(int(spectrum['Day'])) field_names.append(spectrum['Field']) sources.append(spectrum['Source']) longitudes.append(spectrum['Longitude']) latitudes.append(spectrum['Latitude']) amps.append(fit_amps[j]) fwhms.append(fit_fwhms[j]) means.append(fit_means[j]) best_fit_rchi2s.append(best_fit_rchi2[0]) amps_fit_errs.append(means_fit_err[j]) fwhms_fit_errs.append(fwhms_fit_err[j]) means_fit_errs.append(amplitudes_fit_err[j]) names.append(spectrum['Name']) suffix = chr(ord('A') + j) comp_names.append(spectrum['Name']+suffix) else: rating = spectrum['Rating'] num_no_comps[rating] = num_no_comps.get(rating, 0) + 1 print ("Unable to find components for ") temp_table = Table( [comp_names, names, days, field_names, sources, longitudes, latitudes, amps, fwhms, means, best_fit_rchi2s, amps_fit_errs, fwhms_fit_errs, means_fit_errs], names=['Comp_Name', 'Spectra_Name', 'Day', 'Field', 'Source', 'Longitude', 'Latitude', 'Amplitude', 'FWHM', 'Mean', 'Best_Fit_Rchi2', 'Amplitude_Fit_Err', 'FWHM_Fit_Err', 'Mean_Fit_Err'], meta={'ID': 'magmo_components', 'name': 'MAGMO Components ' + str(datetime.date.today())}) votable = from_table(temp_table) filename = "magmo-components.vot" writeto(votable, filename) filename = folder + "/magmo-components.vot" writeto(votable, filename) total_nc = 0 for rating, count in num_no_comps.items(): total_nc += count print("Wrote out", len(fwhms), "components to", filename, "No components generated for", total_nc) for rating in sorted(num_no_comps.keys()): print("%s: %3d" % (rating, num_no_comps[rating]))
def compare_heiles_2003(magmo_gas): print("## Comparing with Heiles & Troland 2003 ##") # Read in ht03 data rdr = ascii.get_reader(Reader=ascii.Csv) ht03_table = rdr.read('../Millennium_data.csv') # filter for just the CNM data |b| < 10 cnm = np.array(ht03_table['CNM']) sample = ht03_table[cnm >= '0'] abs_lat = np.absolute(np.array(sample['GLAT'])) ht03_low_cnm = sample[abs_lat <= 10] spin_temp = np.array(ht03_low_cnm['Ts']) print("Sample had {} values, mean {:0.3f}, median {:0.3f}, sd {:0.3f}".format(len(spin_temp), np.mean(spin_temp), np.median(spin_temp), np.std(spin_temp))) votable = from_table(ht03_low_cnm) writeto(votable, 'millenium_spin.vot') # comparative histogram of the two CNM spin temp sets fig = plt.figure(figsize=(7.5, 3)) gs = matplotlib.gridspec.GridSpec(1, 2) gas_array = magmo_gas # Spin Temperature ax1 = fig.add_subplot(gs[0, 0]) sample = np.ma.array(gas_array['temp_spin']).compressed() bins = np.linspace(0,450,19) hist, edges = build_hist_fraction(sample, bins, 450) ax1.step(edges, hist) ht03_sample = np.array(ht03_low_cnm['Ts']) hist, edges = build_hist_fraction(ht03_sample, bins, 450) ax1.step(edges, hist, color='black', ls='--') ax1.set_xlabel('Spin Temperature (K)') ax1.set_ylabel('Fraction of components') statistic, p_value = stats.ks_2samp(np.ma.filled(sample), np.ma.filled(ht03_sample)) print ('Spin temp population similarity p_value={}'.format(p_value)) # Column Density ax2 = fig.add_subplot(gs[0, 1]) sample = np.ma.array(gas_array['column_density']).compressed() sample = np.log10(sample) bins = np.linspace(19, 24, 21) hist, edges = build_hist_fraction(sample, bins, 24) sample = np.array(ht03_low_cnm['NHI']) * 1E20 sample = np.log10(sample[sample > 0]) ht03_hist, edges = build_hist_fraction(sample, bins, 24) ax2.step(edges, hist) #, width=edges[1]-edges[0]) ax2.step(edges, ht03_hist, color='black', ls=':') # , width=edges[1]-edges[0] label = 'Column Density $\\log_{10}(N_{H}$) (cm$^{-2}$)' ax2.set_xlabel(label) ax2.set_ylabel('Fraction of components') statistic, p_value = stats.ks_2samp(np.ma.filled(gas_array['column_density']), np.ma.filled(np.array(ht03_low_cnm['NHI']) * 1E20)) print ('Column density population similarity p_value={}'.format(p_value)) gs.update(wspace=0.5, hspace=0.5) filename = 'magmo-heiles_2003_comp.pdf' plt.savefig(filename, bbox_inches="tight") plt.close() return
raStr = "%sh%sm%ss" % (row['RAh'], row['RAm'], row['RAs']) decStr = "%s%sd%sm%ss" % (row['DE_sign'], row['DEd'], row['DEm'], row['DEs']) coord=SkyCoord(raStr, decStr, frame='icrs') row['ra_deg']=coord.ra.deg row['dec_deg']=coord.dec.degree print "Final first table row: ", table.array[0] # Table 4 print "------ Starting table 4 ----------" SourceFilename = BaseT4Filename+".txt" DestFilename = BaseT4Filename+".xml" data = ascii.read(SourceFilename) sanitiseColNames(data) print data votable = from_table(data) votable.version = 1.3 addCoords(votable.get_first_table()) for field in votable.get_first_table().fields: print "Field %s %s %s %s %s" %(field.ID, field.name, field.datatype, field.width, field.ucd) votable.to_xml(DestFilename) print "Written to ", DestFilename # Table 6 print "\n\n------ Starting table 6 ----------" SourceFilename = BaseT6Filename+".txt" DestFilename = BaseT6Filename+".xml" data = ascii.read(SourceFilename) sanitiseColNames(data) print data
def csv_to_votable(filename, save_filename): data = Table.read(filename, format='ascii.csv') votable = from_table(data) writeto(votable, save_filename)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu May 28 12:29:24 2020 Convert textfile to votable. @author: mbattley """ from astropy.table import Table from astropy.io.votable import from_table, writeto import numpy as np from astropy.io import ascii import pandas as pd save_path = "/Users/mbattley/Documents/PhD/Young Star Lists/" #filename = "BANYAN_XI-III_members_with_TIC.csv" filename = save_path + "Bell17_simplified.txt" #data - pd.read_table(filename) data = Table.read(filename, format='ascii') #data = np.fromfile(filename, dtype=float) votable = from_table(data) writeto(votable, save_path + "Bell17.xml")
def download(self,jobid=None,filename=None,format=None): """ A public function to download data from a job with COMPLETED phase. Keyword Args ------------ jobid : Completed jobid to be downloaded filename : string If left blank, downloaded to the terminal. If specified, data is written out to file (directory can be included here). Returns ------- headers, data : list, list """ if jobid is None: try: jobid = self.current_job except: raise self.check_all_jobs() completed_job_responses = self.completed_job_info(jobid) soup = BeautifulSoup(completed_job_responses[0].content) tableurl = soup.find("uws:result").get("xlink:href") # This is where the request.content parsing happens raw_table_data = self.session.get(tableurl,auth=(self.username,self.password)) raw_headers = raw_table_data.content.split('\n')[0] num_cols = len(raw_headers.split(',')) num_rows = len(raw_table_data.content.split('\n'))-2 headers = [raw_headers.split(',')[i].strip('"') for i in range(num_cols)] raw_data = [raw_table_data.content.split('\n')[i+1].split(",") for i in range(num_rows)] data = [map(eval,raw_data[i]) for i in range(num_rows)] if format is not None: tbl = Table(data=map(list, zip(*data)),names=headers) if format in ['VOTable','votable']: votbl = votable.from_table(tbl) if filename is None: return votbl else: if '.xml' in filename: filename = filename.split('.')[0] votable.writeto(votbl, "{}.xml".format(filename)) print("Data written to file: {}.xml".format(filename)) elif format in ['FITS','fits']: print("Need to implement...") else: if filename is None: return headers, data else: with open(filename, 'wb') as fh: raw_table_data = self.session.get(tableurl,auth=(self.username,self.password),stream=True) for block in raw_table_data.iter_content(1024): if not block: break fh.write(block) print("Data written to file: {}".format(filename)) return headers, data
def createFootprintsTable(catalog, xy0=None, insertColumn=4): """make a VOTable of SourceData table and footprints Parameters: ----------- catalog : `lsst.afw.table.SourceCatalog` Source catalog from which to display footprints. xy0 : tuple or list or None Pixel origin to subtract off from the footprint coordinates. If None, the value used is (0,0) insertColumn : `int` Column at which to insert the "family_id" and "category" columns Returns: -------- `astropy.io.votable.voTableFile` VOTable object to upload to Firefly """ if xy0 is None: xy0 = afwGeom.Point2I(0, 0) _catalog = afwTable.SourceCatalog(catalog.table.clone()) _catalog.extend(catalog, deep=True) sourceTable = _catalog.asAstropy() # Change int64 dtypes so they convert to VOTable for colName in sourceTable.colnames: if sourceTable[colName].dtype.num == 9: sourceTable[colName].dtype = np.dtype('long') inputColumnNames = sourceTable.colnames x0, y0 = xy0 spanList = [] peakList = [] familyList = [] categoryList = [] fpxll = [] fpyll = [] fpxur = [] fpyur = [] for record in catalog: footprint = record.getFootprint() recordId = record.getId() spans = footprint.getSpans() scoords = [(s.getY()-y0, s.getX0()-x0, s.getX1()-x0) for s in spans] scoords = np.array(scoords).flatten() scoords = np.ma.MaskedArray(scoords, mask=np.zeros(len(scoords), dtype=np.bool)) fpbbox = footprint.getBBox() corners = [(c.getX()-x0, c.getY()-y0) for c in fpbbox.getCorners()] fpxll.append(corners[0][0]) fpyll.append(corners[0][1]) fpxur.append(corners[2][0]) fpyur.append(corners[2][1]) peaks = footprint.getPeaks() pcoords = [(p.getFx()-x0, p.getFy()-y0) for p in peaks] pcoords = np.array(pcoords).flatten() pcoords = np.ma.MaskedArray(pcoords, mask=np.zeros(len(pcoords), dtype=np.bool)) fpbbox = footprint.getBBox() parentId = record.getParent() nChild = record.get('deblend_nChild') if parentId == 0: familyList.append(recordId) if nChild > 0: # blended parent categoryList.append('blended parent') else: # isolated categoryList.append('isolated') else: # deblended child familyList.append(parentId) categoryList.append('deblended child') spanList.append(scoords) peakList.append(pcoords) sourceTable.add_column(Column(np.array(familyList)), name='family_id', index=insertColumn) sourceTable.add_column(Column(np.array(categoryList)), name='category', index=insertColumn+1) sourceTable.add_column(Column(np.array(spanList)), name='spans') sourceTable.add_column(Column(np.array(peakList)), name='peaks') sourceTable.add_column(Column(np.array(fpxll)), name='footprint_corner1_x') sourceTable.add_column(Column(np.array(fpyll)), name='footprint_corner1_y') sourceTable.add_column(Column(np.array(fpxur)), name='footprint_corner2_x') sourceTable.add_column(Column(np.array(fpyur)), name='footprint_corner2_y') outputVO = from_table(sourceTable) outTable = outputVO.get_first_table() outTable.infos.append(Info(name='contains_lsst_footprints', value='true')) outTable.infos.append(Info(name='contains_lsst_measurements', value='true')) outTable.infos.append(Info(name='FootPrintColumnNames', value='id;footprint_corner1_x;footprint_corner1_y;' + 'footprint_corner2_x;footprint_corner2_y;spans;peaks')) outTable.infos.append(Info(name='pixelsys', value='zero-based')) # Check whether the coordinates are included and are valid if (('slot_Centroid_x' in inputColumnNames) and ('slot_Centroid_y' in inputColumnNames) and np.isfinite(outTable.array['slot_Centroid_x']).any() and np.isfinite(outTable.array['slot_Centroid_y']).any()): coord_column_string = 'slot_Centroid_x;slot_Centroid_y;ZERO_BASED' elif (('coord_ra' in inputColumnNames) and ('coord_dec' in inputColumnNames) and np.isfinite(outTable.array['coord_ra']).any() and np.isfinite(outTable.array['coord_dec']).any()): coord_column_string = 'coord_ra;coord_dec;EQ_J2000' elif (('base_SdssCentroid_x' in inputColumnNames) and ('base_SdssCentroid_y' in inputColumnNames) and np.isfinite(outTable.array['base_SdssCentroid_x']).any() and np.isfinite(outTable.array['base_SdssCentroid_y']).any()): coord_column_string = 'base_SdssCentroid_x;base_SdssCentroid_y;ZERO_BASED' elif (('base_NaiveCentroid_x' in inputColumnNames) and ('base_NaiveCentroid_y' in inputColumnNames) and np.isfinite(outTable.array['base_NaiveCentroid_x']).any() and np.isfinite(outTable.array['base_NaiveCentroid_y']).any()): coord_column_string = 'base_NaiveCentroid_x;base_NaiveCentroid_y;ZERO-BASED' else: raise RuntimeError('No valid coordinate columns in catalog') outTable.infos.append(Info(name='CatalogCoordColumns', value=coord_column_string)) for f in outTable.fields: if f.datatype == 'bit': f.datatype = 'boolean' outTable._config['version_1_3_or_later'] = True outputVO.set_all_tables_format('binary2') return(outputVO)
def output_gas_catalogue(all_gas): num_gas = len(all_gas) names = [] days = [] field_names = [] sources = [] longitudes = [] latitudes = [] ras = [] decs = [] velocities = np.zeros(num_gas) em_velocities = np.ma.array(np.zeros(num_gas)) optical_depths = np.zeros(num_gas) comp_widths = np.zeros(num_gas) temps_off = np.ma.array(np.zeros(num_gas)) delta_temps_off = np.ma.array(np.zeros(num_gas)) temps_spin = np.ma.array(np.zeros(num_gas)) delta_temps_spin = np.ma.array(np.zeros(num_gas)) temps_kmax = np.zeros(num_gas) tau = np.zeros(num_gas) continuum_sd = np.zeros(num_gas) maser_region = np.empty(num_gas, dtype=bool) ratings = np.empty(num_gas, dtype=object) filenames = np.empty(num_gas, dtype=object) local_paths = np.empty(num_gas, dtype=object) local_emission_paths = np.empty(num_gas, dtype=object) local_spectra_paths = np.empty(num_gas, dtype=object) base_path = os.path.realpath('.') for i in range(len(all_gas)): gas = all_gas[i] names.append(gas.name) days.append(gas.day) field_names.append(gas.field) sources.append(gas.src) longitudes.append(gas.longitude) latitudes.append(gas.latitude) ras.append(gas.ra) decs.append(gas.dec) velocities[i] = gas.comp_vel em_velocities[i] = gas.em_vel / 1000 if gas.em_vel else np.ma.masked optical_depths[i] = gas.optical_depth comp_widths[i] = gas.comp_width temps_kmax[i] = gas.t_kmax if gas.t_off is None: temps_off[i] = np.ma.masked delta_temps_off[i] = np.ma.masked else: temps_off[i] = gas.t_off delta_temps_off[i] = gas.delta_t_off if gas.t_s is None: temps_spin[i] = np.ma.masked delta_temps_spin[i] = np.ma.masked else: temps_spin[i] = gas.t_s delta_temps_spin[i] = gas.delta_t_s tau[i] = gas.tau maser_region[i] = is_gas_near_maser(gas) ratings[i] = gas.rating continuum_sd[i] = gas.continuum_sd # Need to read in spectra to get rating and include it in the catalogue and # link to the fit preview: e.g. plots/A/012.909-0.260_19_src4-1_fit prefix = 'day' + str(gas.day) + '/' + gas.field + \ "_src" + gas.src filenames[i] = prefix + "_plot.png" em_filename = prefix + "_emission.png" spectra_path = 'run2/plots/{}/{}_{}_src{}_fit.png'.format(gas.rating, gas.field, gas.day, gas.src) local_paths[i] = base_path + '/' + filenames[i] local_emission_paths[i] = base_path + '/' + em_filename local_spectra_paths[i] = base_path + '/' + spectra_path # bulk calc fields vel_diff = np.abs(velocities - em_velocities) equiv_width = np.abs((1-optical_depths) * comp_widths) fwhm = np.abs(comp_widths) column_density = tau * fwhm * temps_spin * 1.823E18 * 1.064 sigma = fwhm / (2 * math.sqrt(2 * math.log(2))) mach_num = np.sqrt(4.2*((temps_kmax/temps_spin)-1)) n_wnm = ((column_density * temps_spin) / 50 ) - column_density temp_table = Table( [names, days, field_names, sources, velocities, em_velocities, optical_depths, temps_off, temps_spin, temps_kmax, longitudes, latitudes, ras, decs, fwhm, sigma, vel_diff, equiv_width, tau, maser_region, column_density, mach_num, n_wnm, ratings, delta_temps_spin, delta_temps_off, continuum_sd, filenames, local_paths, local_emission_paths, local_spectra_paths], names=['Comp_Name', 'Day', 'Field', 'Source', 'Velocity', 'em_velocity', 'optical_depth', 'temp_off', 'temp_spin', 'temp_kmax', 'longitude', 'latitude', 'ra', 'dec', 'fwhm', 'sigma', 'vel_diff', 'equiv_width', 'tau', 'near_maser', 'column_density', 'mach', 'n_wnm', 'Rating', 'delta_temp_spin', 'delta_temp_off', 'delta_optical_depth', 'Filename', 'Local_Path', 'Local_Emission_Path', 'Local_Spectrum_Path'], meta={'ID': 'magmo_gas', 'name': 'MAGMO Gas ' + str(datetime.date.today())}) votable = from_table(temp_table) table = votable.get_first_table() set_field_metadata(table.get_field_by_id('longitude'), 'pos.galactic.lon', 'deg', 'Galactic longitude of the background source') set_field_metadata(table.get_field_by_id('latitude'), 'pos.galactic.lat', 'deg', 'Galactic latitude of the background source') set_field_metadata(table.get_field_by_id('ra'), 'pos.eq.ra;meta.main', 'deg', 'Right ascension of the background source (J2000)') set_field_metadata(table.get_field_by_id('dec'), 'pos.eq.dec;meta.main', 'deg', 'Declination of the background source (J2000)') set_field_metadata(table.get_field_by_id('fwhm'), '', 'km/s', 'Full width at half maximum of the Gaussian component') set_field_metadata(table.get_field_by_id('sigma'), '', 'km/s', 'Sigma value of the Gaussian component') set_field_metadata(table.get_field_by_id('temp_off'), 'phys.temperature;stat.mean', 'K', 'The mean temperature for the gas immediately adjacent to the source') set_field_metadata(table.get_field_by_id('temp_spin'), 'phys.temperature;stat.mean', 'K', 'The excitation or spin temperature of the gas') set_field_metadata(table.get_field_by_id('optical_depth'), '', '', 'The peak optical depth of the component ($e^(-\\tau)$)') set_field_metadata(table.get_field_by_id('column_density'), '', 'cm-2', 'The density of Cold HI gas in the Gaussian component') set_field_metadata(table.get_field_by_id('n_wnm'), '', 'cm-2', 'The density of Warm HI gas in the Gaussian component') set_field_metadata(table.get_field_by_id('mach'), '', '', 'The turbulent mach number of the gas in the Gaussian component') filename = "magmo-gas.vot" writeto(votable, filename) return table
mytable ## The column 'radial_velocity' is c*z but doesn't include the unit; it is km/s ## Get the speed of light from astropy.constants and express in km/s c = const.c.to(u.km/u.s).value redshifts = mytable['radial_velocity']/c mytable['redshift'] = redshifts physdist = 0.05*u.Mpc # 50 kpc physical distance angDdist = Planck15.angular_diameter_distance(mytable['redshift']) angDrad = np.arctan(physdist/angDdist) mytable['angDdeg'] = angDrad.to(u.deg) mytable ## In memory only, use an IO stream. vot_obj=io.BytesIO() apvot.writeto(apvot.from_table(mytable),vot_obj) ## (Reset the "file-like" object to the beginning.) vot_obj.seek(0) query="""SELECT mt.ra, mt.dec, cat.ra, cat.dec, cat.Radial_Velocity, cat.morph_type, cat.bmag FROM zcat cat, tap_upload.mytable mt WHERE contains(point('ICRS',cat.ra,cat.dec),circle('ICRS',mt.ra,mt.dec,mt.angDdeg))=1 and cat.Radial_Velocity > 0 and cat.radial_velocity != mt.radial_velocity ORDER by cat.ra""" # Currently broken due to a bug. #mytable2 = heasarc_tap_services[0].service.run_async(query, uploads={'mytable':vot_obj}) mytable2 = heasarc_tap_services[0].search(query, uploads={'mytable':vot_obj}) vot_obj.close() mytable2.to_table()
def save_table(table, tabname): results = VOTableFile() resource = Resource() results.resources.append(resource) resource.tables.append(from_table(table).get_first_table()) results.to_xml(tabname)