Ejemplo n.º 1
0
def output_emission_spectra(filename, longitude, latitude, velocity, em_mean,
                            em_std, ems):
    """
    Write the emission spectrum (velocity, flux and opacity) to a votable format
    file.

    :param filename:  The filename to be created
    :param longitude: The galactic longitude of the target object
    :param latitude: The galactic latitude of the target object
    :param velocity:
    :param em_mean:
    :param em_std:
    :param ems:
    """
    table = Table(meta={'name': filename, 'id': 'emission'})
    table.add_column(
        Column(name='velocity',
               data=velocity,
               unit='m/s',
               description='velocity relative to LSRK'))
    table.add_column(Column(name='em_mean', data=em_mean, unit='K'))
    table.add_column(Column(name='em_std', data=em_std, unit='K'))
    for i in range(len(ems)):
        table.add_column(
            Column(name='em_' + str(i), data=ems[i].flux, unit='K'))

    votable = from_table(table)
    votable.infos.append(Info('ra', 'longitude', longitude.value))
    votable.infos.append(Info('dec', 'latitude', latitude.value))
    writeto(votable, filename)
Ejemplo n.º 2
0
def output_spectra(spectrum, opacity, filename, longitude, latitude, em_mean, em_std, temp_bright, beam_area,
                   sigma_tau):
    """
    Write the spectrum (velocity, flux and opacity) to a votable format file.

    :param spectrum: The spectrum to be output.
    :param opacity:  The opacity to be output.
    :param filename:  The filename to be created
    :param longitude: The galactic longitude of the target object
    :param latitude: The galactic latitude of the target object
    """
    table = Table(meta={'name': filename, 'id': 'opacity'})
    table.add_column(Column(name='plane', data=spectrum.plane))
    table.add_column(Column(name='velocity', data=spectrum.velocity, unit='m/s'))
    table.add_column(Column(name='opacity', data=opacity))
    table.add_column(Column(name='flux', data=spectrum.flux, unit='Jy', description='Flux per beam'))
    table.add_column(Column(name='temp_brightness', data=temp_bright, unit='K'))
    table.add_column(Column(name='sigma_tau', data=sigma_tau, description='Noise in the absorption profile'))
    if len(em_mean) > 0:
        # The emission may not be available, so don't include it if not
        table.add_column(Column(name='em_mean', data=em_mean, unit='K'))
        table.add_column(Column(name='em_std', data=em_std, unit='K'))

    votable = from_table(table)
    votable.infos.append(Info('longitude', 'longitude', longitude.value))
    votable.infos.append(Info('latitude', 'latitude', latitude.value))
    votable.infos.append(Info('beam_area', 'beam_area', beam_area))
    writeto(votable, filename)
Ejemplo n.º 3
0
def votable_format(ra, dec, radius, nearest, result):
    ztfdic = {}
    votable = result.text.encode(encoding='UTF-8')
    bio = io.BytesIO(votable)
    votable = parse(bio)
    table = parse_single_table(bio).to_table()

    if len(table) <= 0:
        ztfdic['0'] = 'not found'
        return ztfdic  #'not found'

    tablas = table.group_by('oid')

    #the most close object to radius
    if nearest is True:

        minztf = id_nearest(ra, dec, radius, tablas)

        buf = io.BytesIO()
        votable = from_table(tablas.groups[minztf])
        writeto(votable, buf)
        ztfdic[str(tablas.groups[minztf]['oid'][0])] = (
            buf.getvalue().decode("utf-8"))
        return ztfdic
    # all objects in radius
    else:
        for group in tablas.groups:
            buf = io.BytesIO()
            votable = from_table(group)
            writeto(votable, buf)
            ztfdic[str(group['oid'][0])] = (buf.getvalue().decode("utf-8"))
        return ztfdic
Ejemplo n.º 4
0
def output_emission_spectra(filename, longitude, latitude, velocity, em_mean,
                            em_std, ems):
    """
    Write the emission spectrum (velocity, flux and opacity) to a votable format
    file.

    :param filename:  The filename to be created
    :param longitude: The galactic longitude of the target object
    :param latitude: The galactic latitude of the target object
    :param velocity:
    :param em_mean:
    :param em_std:
    :param ems:
    """
    table = Table(meta={'name': filename, 'id': 'emission'})
    table.add_column(Column(name='velocity', data=velocity, unit='m/s'))
    table.add_column(Column(name='em_mean', data=em_mean, unit='K'))
    table.add_column(Column(name='em_std', data=em_std, unit='K'))
    for i in range(len(ems)):
        table.add_column(Column(name='em_'+str(i), data=ems[i].flux, unit='K'))

    votable = from_table(table)
    votable.infos.append(Info('longitude', 'longitude', longitude.value))
    votable.infos.append(Info('latitude', 'latitude', latitude.value))
    writeto(votable, filename)
Ejemplo n.º 5
0
def mkcol(file_in, file_out, ra_cen, dec_cen):

    #import the vot file
    t = parse_single_table(str(file_in)).to_table()
    print("reading the file: " + str(file_in))
    print("please wait")

    #new column of SNR and area (in the unit of beam)
    t['SNR'] = t['peak_flux'] / t['local_rms']
    t['area'] = t['int_flux'] / t['peak_flux']

    #calculating separation

    c0 = SkyCoord(Angle(ra_cen * u.deg), Angle(dec_cen * u.deg), frame='fk5')
    seplist = []

    for i in range(len(t)):
        c = SkyCoord(Angle(t['ra'][i] * u.deg),
                     Angle(t['dec'][i] * u.deg),
                     frame='fk5')
        sep = c.separation(c0)
        seplist.append(sep.degree)
    t['sep'] = seplist
    t['sep'].unit = 'deg'

    #saving file
    votable = from_table(t)
    writeto(votable, str(file_out))
    print(str(file_out) + "saved successfully :)")
    print()

    return
Ejemplo n.º 6
0
def make_Dias2014_cut_vot():

    inpath = ( '/home/luke/local/tess-trex/catalogs/'
              'Dias_2014_prob_gt_50_pct_vizier.vot')

    outpath = ('../data/cluster_data/'
               'Dias_2014_prob_gt_50_pct_to_gaia_archive.vot')
    if os.path.exists(outpath):
        print('found {}, skipping'.format(outpath))
        return

    tab = parse(inpath)
    t = tab.get_first_table().to_table()

    J_mag = np.array(t['Jmag'])
    Ks_mag = np.array(t['Kmag']) # is actually Ks
    G_mag_estimate = estimate_Gaia_G_given_2mass_J_Ks(J_mag, Ks_mag)
    okinds = np.isfinite(G_mag_estimate)

    d14_ucac4_ids = np.array(t['UCAC4'])[okinds]

    #TODO: check if J2015 transformation needed.
    ra_deg = np.array(t['RAJ2000'])[okinds]
    dec_deg = np.array(t['DEJ2000'])[okinds]

    import IPython; IPython.embed()
    outtab = Table()
    outtab['ra'] = ra_deg*u.deg
    outtab['dec'] = dec_deg*u.deg
    outtab['gmag_estimate'] = G_mag_estimate[okinds]*u.mag
    outtab['ucac_id'] = d14_ucac4_ids
    v_outtab = from_table(outtab)
    writeto(v_outtab, outpath)
    print('--> made {}'.format(outpath))
Ejemplo n.º 7
0
def mksub(file_in, file_out):

    #import the vot file
    t = parse_single_table(str(file_in)).to_table()
    print("reading the file: " + str(file_in))
    print("the original file has " + str(len(t)) + " rows")
    print("please wait")

    dellist = []

    #rule of making subset (edit here)
    for i in range(len(t)):
        if t['sep'][i] >= 10.0:
            dellist.append(i)

    del t[dellist]
    print(str(len(dellist)) + " rows deleted")
    print("the subset has " + str(len(t)) + " rows")

    #saving file
    votable = from_table(t)
    writeto(votable, str(file_out))
    print("subset created :)")
    print()

    return
Ejemplo n.º 8
0
def find_j19_matches(gaskap_table, no_match_cat=None):
    print ('\nCross-matching with Jameson et al 2019', no_match_cat)
    j19_table = ascii.read('jameson2019.csv', format='csv')
    col_index = Column(name='index', data=1+np.arange(len(j19_table)))
    j19_table.add_column(col_index)

    coo_j19 = SkyCoord(j19_table['ra']*u.deg, j19_table['dec']*u.deg)
    coo_gaskap = SkyCoord(gaskap_table['ra'], gaskap_table['dec'])

    idx_j19, d2d_j19, d3d_j19 = coo_gaskap.match_to_catalog_sky(coo_j19)
    matched = d2d_j19 <= 18.5*u.arcsec # This cutoff allows for the widest separation without adding duplicates

    matched_j19_idx = idx_j19[matched]
    un_matched_j19_idx = [i for i in np.arange(len(j19_table)) if i not in matched_j19_idx]
    j19_unmatched = j19_table[un_matched_j19_idx]
    print ("Found {} sources in Jameson et al 2019 not in GASKAP data.".format(len(j19_unmatched)))
    coo_j19_unm = SkyCoord(j19_unmatched['ra']*u.deg, j19_unmatched['dec']*u.deg)
    idx_gaskap, d2d_gaskap, d3d_gaskap = coo_j19_unm.match_to_catalog_sky(coo_gaskap)
    close_gaskap_comp_names = gaskap_table[idx_gaskap]['comp_name']
    col_closest = Column(name='closest_gaskap', data=close_gaskap_comp_names)
    col_gaskap_ra = Column(name='gaskap_ra', data=gaskap_table[idx_gaskap]['ra'])
    col_gaskap_dec = Column(name='gaskap_dec', data=gaskap_table[idx_gaskap]['dec'])
    col_sep = Column(name='gaskap_sep', data=d2d_gaskap.to(u.arcsec))
    j19_unmatched.add_columns([col_closest, col_gaskap_ra, col_gaskap_dec, col_sep])
    if no_match_cat:
        print (j19_unmatched)
        j19_unm_vo_table = from_table(j19_unmatched)
        writeto(j19_unm_vo_table, no_match_cat)

    return j19_table, idx_j19, d2d_j19, matched, j19_unmatched
Ejemplo n.º 9
0
def output_spectra(spectrum, opacity, filename, longitude, latitude, em_mean,
                   em_std, temp_bright, beam_area, sigma_tau, sigma_tau_smooth,
                   mean):
    """
    Write the spectrum (velocity, flux and opacity) to a votable format file.

    :param spectrum: The spectrum to be output.
    :param opacity:  The opacity to be output.
    :param filename:  The filename to be created
    :param longitude: The galactic longitude of the target object
    :param latitude: The galactic latitude of the target object
    """
    table = Table(meta={'name': filename, 'id': 'opacity'})
    table.add_column(Column(name='plane', data=spectrum.plane))
    table.add_column(
        Column(name='velocity',
               data=spectrum.velocity,
               unit='m/s',
               description='velocity relative to LSRK'))
    table.add_column(Column(name='opacity', data=opacity))

    #following smooth should be using same parameter with line 950
    hann_window = np.hanning(31)
    hann_kernel = CustomKernel(hann_window)
    smooth_y = convolve(opacity, hann_kernel, boundary='extend')
    table.add_column(
        Column(name='smooth_opacity',
               data=smooth_y,
               description='opacity smooth with hanning window 31 channels.'))

    table.add_column(
        Column(name='flux',
               data=spectrum.flux,
               unit='Jy',
               description='Flux per beam'))
    table.add_column(Column(name='temp_brightness', data=temp_bright,
                            unit='K'))
    table.add_column(
        Column(
            name='sigma_tau',
            data=sigma_tau,
            description='Noise in the absorption profile, before smoothing'))
    table.add_column(
        Column(name='sigma_tau_smooth',
               data=sigma_tau_smooth,
               description='Noise in the absorption profile, after smoothing'))
    if len(em_mean) > 0:
        # The emission may not be available, so don't include it if not
        table.add_column(Column(name='em_mean', data=em_mean, unit='K'))
        table.add_column(Column(name='em_std', data=em_std, unit='K'))

    votable = from_table(table)
    votable.infos.append(Info('ra', 'longitude', longitude.value))
    votable.infos.append(Info('dec', 'latitude', latitude.value))
    votable.infos.append(Info('beam_area', 'beam_area', beam_area))
    votable.infos.append(Info('cont', 'continuum', mean))
    writeto(votable, filename)
Ejemplo n.º 10
0
def get_image_params_table(dest_folder,
                           sbid,
                           targets,
                           beams,
                           beam_locs,
                           max_sep_close=0.55 * u.deg,
                           max_sep_far=0.8 * u.deg):
    comp_names = []
    comp_ra = []
    comp_dec = []
    included_beam_nums = []
    included_beam_interleaves = []
    included_beam_ids = []
    included_beam_sep = []

    for tgt in targets:
        target_loc = SkyCoord(ra=tgt['ra_deg_cont'] * u.degree,
                              dec=tgt['dec_deg_cont'] * u.degree,
                              frame='icrs')
        src_beams, src_beam_sep = get_beams_near_src(target_loc,
                                                     beam_locs,
                                                     beams,
                                                     max_sep=max_sep_close)
        if len(src_beams) == 0:
            #print ("No beams close to {}, checking out to {}".format(tgt['component_name'], max_sep_far))
            src_beams, src_beam_sep = get_beams_near_src(target_loc,
                                                         beam_locs,
                                                         beams,
                                                         max_sep=max_sep_far)

        for i in range(len(src_beams)):
            comp_names.append(tgt['component_name'])
            comp_ra.append(tgt['ra_deg_cont'])
            comp_dec.append(tgt['dec_deg_cont'])
            included_beam_nums.append(src_beams['beam_id'].data[i])
            included_beam_interleaves.append(src_beams['interleave'].data[i])
            included_beam_ids.append(src_beams['beam_id'].data[i] +
                                     src_beams['interleave'].data[i])
            included_beam_sep.append(src_beam_sep.to(u.deg).value[i])

    image_params = Table()
    image_params['component_name'] = comp_names
    image_params['comp_ra'] = comp_ra
    image_params['comp_dec'] = comp_dec
    image_params['beam_nums'] = included_beam_nums
    image_params['beam_interleaves'] = included_beam_interleaves
    image_params['beam_ids'] = included_beam_ids
    image_params['beam_sep'] = included_beam_sep

    image_params_vot = votable.from_table(image_params)
    filename = "{}/sb{}_srcs_image_params.vot".format(dest_folder, sbid)
    votable.writeto(image_params_vot, filename)

    print("Produced VO table file {} with {} target beam combos.".format(
        filename, len(image_params)))

    return image_params
Ejemplo n.º 11
0
def output_field_catalogue(fields, used_fields, output_folder):
    """
    Write out a catalogue of the fields observed under the MAGMO project
    with some basic stats for each field.

    :param fields: The fields to be written.
    :param used_fields: An aray of field ids which had spectra which were used.
    :return: None
    """
    rows = len(fields)
    days = np.zeros(rows, dtype=int)
    field_names = np.empty(rows, dtype=object)
    longitudes = np.zeros(rows)
    latitudes = np.zeros(rows)
    max_fluxes = np.zeros(rows)
    sn_ratios = np.zeros(rows)
    strong = np.empty(rows, dtype=bool)
    used = np.empty(rows, dtype=bool)
    duplicate = np.empty(rows, dtype=bool)

    i = 0
    for field in fields:
        days[i] = int(field.day)
        field_names[i] = field.name
        longitudes[i] = field.longitude
        latitudes[i] = field.latitude
        max_fluxes[i] = field.max_flux
        sn_ratios[i] = field.sn_ratio
        sn_ratios[i] = field.sn_ratio
        strong[i] = True if field.used == 'Y' else False
        used[i] = field.get_field_id() in used_fields
        duplicate[i] = field.duplicate
        i += 1

    coords = SkyCoord(longitudes, latitudes, frame='galactic', unit="deg")

    fields_table = Table([
        days, field_names, longitudes, latitudes, max_fluxes, sn_ratios,
        strong, used, duplicate, coords.icrs.ra.degree, coords.icrs.dec.degree
    ],
                         names=[
                             'Day', 'Field', 'Longitude', 'Latitude',
                             'Max_Flux', 'SN_Ratio', 'Strong', 'Used',
                             'Duplicate', 'ra', 'dec'
                         ],
                         meta={
                             'ID': 'thor_fields',
                             'name':
                             'THOR Fields ' + str(datetime.date.today())
                         })
    votable = from_table(fields_table)
    filename = output_folder + '/thor-fields.vot'
    writeto(votable, filename)

    print("Wrote out", i, "fields to", filename)
Ejemplo n.º 12
0
def make_votable_given_source_ids(source_ids, outpath=None):

    t = Table()
    t['source_id'] = source_ids

    votable = from_table(t)

    writeto(votable, outpath)
    print('made {}'.format(outpath))

    return outpath
Ejemplo n.º 13
0
def make_votable_given_ids(gaiaids,
                           outpath='../data/rms_vs_mag/proj1301_gaiaids.xml'):

    t = Table()
    t['source_id'] = gaiaids.astype(int)

    votable = from_table(t)

    writeto(votable, outpath)
    print('made {}'.format(outpath))

    return outpath
Ejemplo n.º 14
0
def output_emission_spectrum(source, comp_name, velocity, em_mean, em_std, filename):
    title = 'Primary beam emission for source #{} {}'.format(source['id'], comp_name)
    em_out_tab = Table(meta={'name': title})
    em_out_tab.add_column(Column(name='velocity', data=velocity, unit='m/s', description='LSRK velocity'))
    em_out_tab.add_column(Column(name='pb_em_mean', data=em_mean, unit='K', description='Mean brightness temperature in the primary beam'))
    em_out_tab.add_column(Column(name='pb_em_std', data=em_std, unit='K', description='Noise level in the brightness temperature in the primary beam'))
    
    votable = from_table(em_out_tab)
    votable.params.append(Param(votable, id='id', value=source['id'], datatype='int'))
    votable.params.append(Param(votable, id='comp_name', value=comp_name, datatype='char', arraysize='*'))
    votable.params.append(Param(votable, id='ra', value=source['ra'], unit='deg', datatype='double'))
    votable.params.append(Param(votable, id='dec', value=source['dec'], unit='deg', datatype='double'))
    writeto(votable, filename)
Ejemplo n.º 15
0
def compare_strasser_2007(magmo_gas):
    print("## Comparing with Strasser et al 2007 ##")

    # Read in s07 data
    s07_files = glob.glob('../strasser2007/*.dat')
    s07_data = None
    for file_name in s07_files:
        data = ascii.read(file_name)
        spin_temp = np.array(data['Ts'])
        subset = data[spin_temp > 0]
        if s07_data is None:
            s07_data = subset
        else:
            s07_data = vstack([s07_data, subset])

    s07_spin_temp = s07_data['Ts']
    s07_spin_temp = s07_spin_temp[s07_spin_temp<1E4]
    print("Sample had {} values, mean {:0.3f}, median {:0.3f}, sd {:0.3f}".format(len(s07_spin_temp),
                                                                                  np.mean(s07_spin_temp),
                                                                                  np.median(s07_spin_temp),
                                                                                  np.std(s07_spin_temp)))
    votable = from_table(s07_data)
    writeto(votable, 'strasser_2007_spin.vot')

    fig = plt.figure(figsize=(7.5, 3))
    gs = matplotlib.gridspec.GridSpec(1, 2)
    gas_array = magmo_gas

    # Spin Temperature
    ax1 = fig.add_subplot(gs[0, 0])
    sample = np.ma.array(gas_array['temp_spin']).compressed()
    bins = np.linspace(0,450,19)
    hist, edges = build_hist_fraction(sample, bins, 450)
    ax1.step(edges, hist)

    s07_sample = np.array(s07_data['Ts'])
    s07_sample = s07_sample[s07_sample<1E4]
    hist, edges = build_hist_fraction(s07_sample, bins, 450)
    ax1.step(edges, hist, color='black', ls='--')

    ax1.set_xlabel('Spin Temperature (K)')
    ax1.set_ylabel('Fraction of components')

    statistic, p_value = stats.ks_2samp(np.ma.filled(sample), np.ma.filled(s07_sample))
    print ('Spin temp population similarity p_value={}'.format(p_value))

    gs.update(wspace=0.5, hspace=0.5)
    filename = 'magmo-strasser_2007_comp.pdf'
    plt.savefig(filename, bbox_inches="tight")
    plt.close()
    return
Ejemplo n.º 16
0
def output_single_phase_catalogue(spectra):
    """
    Create a catalogue of the spin temperature of each channel of each spectrum based on a naive single phase
    assumption.
    :param spectra: The list of all spectra
    :return: None
    """
    spectra_by_long = sorted(spectra, key=lambda spectrum: spectrum.longitude)

    longitudes = []
    latitudes = []
    velocities = []
    emission_temps = []
    opacities = []
    spin_temperatures = []

    for spectrum in spectra_by_long:
        for i in range(len(spectrum.velocity)):
            if spectrum.em_temps[i] > 0:
                longitudes.append(spectrum.longitude)
                latitudes.append(spectrum.latitude)
                velocities.append(spectrum.velocity[i])
                emission_bright_temp = spectrum.em_temps[i]
                emission_temps.append(emission_bright_temp)
                opacities.append(spectrum.opacities[i])
                spin_t = None
                if emission_bright_temp:
                    spin_t = emission_bright_temp / spectrum.opacities[i]
                spin_temperatures.append(spin_t)

    temp_table = Table(
        [
            longitudes, latitudes, velocities, spin_temperatures,
            emission_temps, opacities
        ],
        names=[
            'Longitude', 'Latitude', 'Velocity', 'Spin_Temp',
            'Emission_Bright_Temp', 'Opacity'
        ],
        meta={
            'ID': 'thor_single_phase_spin_temp',
            'name': 'THOR 1P Spin Temp ' + str(datetime.date.today())
        })
    votable = from_table(temp_table)
    filename = "thor-1p-temp.vot"
    writeto(votable, filename)

    print("Wrote out", len(spin_temperatures), "channel temperatures to",
          filename)
Ejemplo n.º 17
0
def output_j19_comparison(sbid, gaskap_table, j19_table, idx_j19, d2d_j19, j19_match, j19_unmatched, title, filename, match_cat=None): 
    print (title, filename)

    gaskap_targets = gaskap_table[j19_match]
    j19_targets = j19_table[idx_j19]
    j19_targets = j19_targets[j19_match]
    sort_order = gaskap_targets.argsort(['comp_name'])
    #comp_names = sorted(targets['comp_name'])
    gaskap_tgt_ordered = gaskap_targets[sort_order]
    j19_tgt_ordered = j19_targets[sort_order]

    with open(filename, 'w') as f:
        output_header(f, title)

        for rating in 'ABCDEF':
            mask = gaskap_tgt_ordered['rating']==rating
            subset = gaskap_tgt_ordered[mask]
            j19_subset = j19_tgt_ordered[mask]
            print('Rating {} has {} spectra'.format(rating, len(subset)))

            output_block_title(f, rating, rating=='A', len(subset))

            for idx, gaskap_src in enumerate(subset):
                gaskap_name  = gaskap_src['comp_name']
                j19_name = j19_subset[idx]['Source']
                output_j19_img(f, gaskap_name, j19_name, rating)

        # Add a section for missed spectra
        output_block_title(f, None, False, len(j19_unmatched))
        for row in j19_unmatched:
            gaskap_name  = row['closest_gaskap']
            j19_name = row['Source']
            output_j19_img(f, gaskap_name, j19_name, rating, sep=row['gaskap_sep'])

        output_footer(f)

    if match_cat:
        augmented_table = Table(gaskap_tgt_ordered)
        close_j19_comp_names = j19_tgt_ordered['Source']
        col_closest = Column(name='closest_j19', data=close_j19_comp_names)
        col_gaskap_ra = Column(name='j19_ra', data=j19_tgt_ordered['ra']*u.deg)
        col_gaskap_dec = Column(name='j19_dec', data=j19_tgt_ordered['dec']*u.deg)
        sep_vals = d2d_j19[j19_match]
        sep_vals_sorted = sep_vals[sort_order]
        col_sep = Column(name='j19_sep', data=sep_vals_sorted.to(u.arcsec))
        augmented_table.add_columns([col_closest, col_gaskap_ra, col_gaskap_dec, col_sep])
        #print (augmented_table)
        j19_match_vo_table = from_table(augmented_table)
        writeto(j19_match_vo_table, match_cat)
Ejemplo n.º 18
0
def make_votable_given_cols(name, assoc, RA, dec, pm_RA, pm_dec, outpath=None):

    t = Table()
    t['name'] = name.astype(str)
    t['assoc'] = assoc.astype(str)
    t['ra'] = RA*u.deg
    t['dec'] = dec*u.deg
    t['pm_ra'] = pm_RA*(u.mas/u.year)
    t['pm_dec'] = pm_dec*(u.mas/u.year)

    votable = from_table(t)

    writeto(votable, outpath)
    print('made {}'.format(outpath))

    return outpath
Ejemplo n.º 19
0
 def format_votable(self):
     """
     Return
     -------
     Return data in VOTable format.
     """
     names_column = ['obj','ra','dec','mjd','mag','magerr','filter','catalog']
     descriptions_column = ['Id of object in catalog the original catalog',
                             'Right ascension of source','Declination of source',
                             'Julian Day','Magnitude','Magnitude Error',
                             'Filter code','Original Catalog']
     #dtype_column = [] # dtype=dtype_column
     t = Table(rows=self.data_np,names=names_column,descriptions=descriptions_column)
     votable= VOTableFile.from_table(t)
     buf = io.BytesIO()
     writeto(votable,buf)
     return buf.getvalue().decode("utf-8")
Ejemplo n.º 20
0
def outputSpectra(spectrum, opacity, filename, longitude):
    """
    Write the spectrum (velocity, flux and opacity) to a votable format file.

    :param spectrum: The spectrum to be output.
    :param opacity:  The opacity to be output.
    :param filename:  The filename to be created
    :param longitude: The galactic longitude of the target object
    """
    table = Table([spectrum.plane, spectrum.velocity, opacity, spectrum.flux],
                  names=('plane', 'velocity', 'opacity', 'flux'),
                  meta={'name': 'Opacity', 'id' : 'opacity'})
    votable = from_table(table)
    votable.infos.append(Info('longitude', 'longitude', longitude))
    #votable.params.append(Param(votable, 'longitude', 'longitude', longitude,
    #                          datatype='char', arraysize=str(len(str(longitude)))))
    writeto(votable, filename)
Ejemplo n.º 21
0
def output_decomposition_catalogue(folder, spectra, data, data_decomposed, alpha1, alpha2):
    names = []
    days = []
    field_names = []
    sources = []
    longitudes = []
    latitudes = []
    cont_sd = []
    residual_rms = []
    ratings = []
    num_comps = []

    for i in range(len(data_decomposed['fwhms_fit'])):
        spectrum = spectra[data['spectrum_idx'][i]]

        names.append(spectrum['Name'])
        days.append(int(spectrum['Day']))
        field_names.append(spectrum['Field'])
        sources.append(spectrum['Source'])
        longitudes.append(spectrum['Longitude'])
        latitudes.append(spectrum['Latitude'])
        ratings.append(spectrum['Rating'])
        cont_sd.append(spectrum['Continuum_SD'])

        fit_fwhms = data_decomposed['fwhms_fit'][i]
        fit_means = data_decomposed['means_fit'][i]
        fit_amps = data_decomposed['amplitudes_fit'][i]
        num_comps.append(len(fit_fwhms))

        velo = data['x_values'][i]
        # opacity = convert_to_ratio(data['data_list'][i])
        residual = calc_residual(velo, data['data_list'][i], fit_amps, fit_fwhms, fit_means)
        residual_rms.append(np.sqrt(np.mean(np.square(residual))))

    temp_table = Table(
        [names, days, field_names, sources, longitudes, latitudes, residual_rms, ratings, num_comps, cont_sd],
        names=['Spectra_Name', 'Day', 'Field', 'Source', 'Longitude', 'Latitude', 'Residual_RMS', 'Rating', 'Num_Comp',
               'Continuum_SD'],
        meta={'ID': 'magmo_decomposition',
              'name': 'MAGMO Decomposition ' + str(datetime.date.today()),
              'alpha1' : alpha1,
              'alpha2' : alpha2})
    votable = from_table(temp_table)
    filename = folder + "/magmo-decomposition.vot"
    writeto(votable, filename)
Ejemplo n.º 22
0
def write_array_to_vot(array, outputFile, isTable=False):
    """
    Writes an array or an astropy table into a .vot file.
    
    Input
    -----
    array : numpy array, astropy table
        The array to write into the file
    outputFile : string
        The file to write the array into
    isTable : boolean
        Whether the array is an astropy table or not.
    """

    #If it is an array it creates an astropy table
    if not isTable:
        array = Table(data=array)

    writeto(array, outputFile)
    return
Ejemplo n.º 23
0
def output_metrics_xml(reporter, dest_folder):
    titles = []
    descs = []
    values = []
    statuses = []

    for metric in reporter.metrics:
        # title, description, value, status
        titles.append(metric.title)
        descs.append(metric.description)
        values.append(metric.value)
        statuses.append(metric.status)

    temp_table = Table([titles, descs, values, statuses],
                       names=[
                           'metric_name', 'metric_description', 'metric_value',
                           'metric_status'
                       ],
                       dtype=['str', 'str', 'double', 'int'])
    votable = from_table(temp_table)
    table = votable.get_first_table()
    if reporter.project:
        table.params.append(
            Param(votable,
                  name="project",
                  datatype="char",
                  arraysize="*",
                  value=reporter.project))
    if reporter.sbid:
        table.params.append(
            Param(votable,
                  name="sbid",
                  datatype="char",
                  arraysize="*",
                  value=reporter.sbid))
    table.get_field_by_id('metric_name').datatype = 'char'
    table.get_field_by_id('metric_description').datatype = 'char'
    table.get_field_by_id('metric_status').datatype = 'int'
    filename = dest_folder + '/gaskap-metrics.xml'
    writeto(votable, filename)
    return
Ejemplo n.º 24
0
def output_spectra(spectrum, opacity, filename, longitude, latitude, em_mean,
                   em_std, temp_bright, beam_area, sigma_tau, src_id):
    """
    Write the spectrum (velocity, flux and opacity) to a votable format file.

    :param spectrum: The spectrum to be output.
    :param opacity:  The opacity to be output.
    :param filename:  The filename to be created
    :param longitude: The galactic longitude of the target object
    :param latitude: The galactic latitude of the target object
    """
    table = Table(meta={'name': filename, 'id': 'opacity'})
    table.add_column(Column(name='plane', data=spectrum.plane))
    table.add_column(
        Column(name='velocity', data=spectrum.velocity, unit='m/s'))
    table.add_column(Column(name='opacity', data=opacity))
    table.add_column(
        Column(name='flux',
               data=spectrum.flux,
               unit='Jy',
               description='Flux per beam'))
    table.add_column(Column(name='temp_brightness', data=temp_bright,
                            unit='K'))
    table.add_column(
        Column(name='sigma_tau',
               data=sigma_tau,
               description='Noise in the absorption profile'))
    if len(em_mean) > 0:
        # The emission may not be available, so don't include it if not
        table.add_column(Column(name='em_mean', data=em_mean, unit='K'))
        table.add_column(Column(name='em_std', data=em_std, unit='K'))

    votable = from_table(table)
    votable.infos.append(Info('longitude', 'longitude', longitude.value))
    votable.infos.append(Info('latitude', 'latitude', latitude.value))
    votable.infos.append(Info('beam_area', 'beam_area', beam_area))
    votable.infos.append(Info('src_gname', 'gname', src_id))
    writeto(votable, filename)
Ejemplo n.º 25
0
def sample_sky(ra, dec):

	"""
	Returns a list of the counts of viable sources per square degree
	for the input lists of RA and Dec.
	"""

	assert len(ra) == len(dec)
	deg_sq_radius = np.sqrt(1/np.pi)
	counts = []

	# if previous runs exist, increment outdir number by one
	outdir = 'sample_sky1'
	previous = filter(lambda x: outdir[:-1] in x, os.listdir('.'))
	if previous:
		previous.sort()
		spl = previous[-1].split(outdir[:-1])
		outdir = outdir[:-1] + str(int(spl[1])+1)
	os.mkdir(outdir)

	for i in range(len(ra)):
		try:
			vot = get_votable(ra[i], dec[i], deg_sq_radius, 10.0, 15.0)
		except:
			log(outdir, ra[i], dec[i], 'error on call to get_votable')
			continue
		df = cull_dataset(outdir, ra[i], dec[i], vot.get_first_table())
		if df is None:
			log(outdir, ra[i], dec[i], 'cull_dataset returned None')
			continue
		name = str(ra[i])+'_'+str(dec[i])
		vot_path = outdir+'/'+name+'_VOTable.xml'
		writeto(vot,vot_path)
		csv_path = outdir+'/'+name+'_culled.csv'
		df.to_csv(csv_path, index=False)
		counts.append(df.shape[0])
	return counts
Ejemplo n.º 26
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 28 12:29:24 2020

Convert textfile to votable.

@author: mbattley
"""

from astropy.table import Table
from astropy.io.votable import from_table, writeto
import numpy as np
from astropy.io import ascii
import pandas as pd

save_path = "/Users/mbattley/Documents/PhD/Young Star Lists/"

#filename = "BANYAN_XI-III_members_with_TIC.csv"
filename = save_path + "Bell17_simplified.txt"

#data - pd.read_table(filename)
data = Table.read(filename, format='ascii')
#data = np.fromfile(filename, dtype=float)

votable = from_table(data)

writeto(votable, save_path + "Bell17.xml")
Ejemplo n.º 27
0
def csv_to_votable(filename, save_filename):
    data = Table.read(filename, format='ascii.csv')

    votable = from_table(data)

    writeto(votable, save_filename)
Ejemplo n.º 28
0
"""
Created on Thu Jan 10 16:36:48 2019

This short script can be used to convert standard csv files to astropy tables

@author: MatthewTemp
"""
from astropy.table import Table
from astropy.io.votable import from_table, writeto

#filename = "BANYAN_XI-III_members_with_TIC.csv"
#filename = "/Users/mbattley/Documents/PhD/Papers/Detrending of young stars - TESS 1-5/Paper corrections/Grid_sensitivity_analysis/Full Sensitivity Analysis Table.csv"
filename = '/Users/mbattley/Documents/PhD/young_star_lists/Final_young_star_list_MB_EG_20201228_TICv8_matched_unordered.csv'


def csv_to_votable(filename, save_filename):
    data = Table.read(filename, format='ascii.csv')

    votable = from_table(data)

    writeto(votable, save_filename)


data = Table.read(filename, format='ascii.csv')

votable = from_table(data)

writeto(
    votable,
    "/Users/mbattley/Documents/PhD/Young_Star_Lists/Final_young_star_list_MB_EG_20201228_TICv8_matched_unordered.xml"
)
import astropy
from astropy.io import fits, ascii

import numpy as np

from astroquery.sdss import SDSS
from astropy import coordinates as coords

data = astropy.io.ascii.read('/Users/users/mulder/astrods/projectgit/sample_trainingsetwithclass.csv',format='csv', fast_reader=False)
print(data)

from astropy.io.votable import from_table, writeto
votable = from_table(data)
writeto(votable, 'o3726.xml')

import pandas as pd
df = pd.read_csv('/Users/users/mulder/astrods/projectgit/sample_trainingsetwithclass.csv')
print(df)


def convert_info(resource_name, table_name, description):
	return """ <?xml version="1.0"?>
  <VOTABLE version="1.3" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
  xmlns="http://www.ivoa.net/xml/VOTable/v1.3"
  xmlns:stc="http://www.ivoa.net/xml/STC/v1.30" >
    <RESOURCE name="%s">
      <TABLE name="%s">
        <DESCRIPTION>"%s"</DESCRIPTION>
        <GROUP utype="stc:CatalogEntryLocation">
          <PARAM name="href" datatype="char" arraysize="*"
            utype="stc:AstroCoordSystem.href" value="ivo://STClib/CoordSys#UTC-ICRS-TOPO"/>
Ejemplo n.º 30
0
 def saveFromTable(table, ouputfile):
     votable = from_table(table)
     writeto(votable, ouputfile)
Ejemplo n.º 31
0
def export_data(datatable, filename):
    votable = from_table(datatable)
    writeto(votable, filename)
Ejemplo n.º 32
0
            print('not A and B not C not D')
            query = "SELECT source_id, ra, dec, l, b, parallax, pmra, pmdec, phot_g_mean_mag, phot_bp_mean_mag, phot_rp_mean_mag, ra_error, dec_error, parallax_error, pmra_error, pmdec_error, bp_g, bp_rp, parallax_pmra_corr, parallax_pmdec_corr, pmra_pmdec_corr, radial_velocity, radial_velocity_error, ruwe FROM gaiadr2.gaia_source as gaia WHERE 1=CONTAINS(POINT(%s, gaia.ra, gaia.dec), CIRCLE(%s, %f, %f, %f )) AND gaia.pmdec < %f  AND gaia.pmdec > %f" % (
                icrs_string, icrs_string, RA_cluster, DEC_cluster, search_rad,
                pmDEC_cluster + sigma_num * epmDEC_cluster,
                pmDEC_cluster - sigma_num * epmDEC_cluster)

        elif A == False and B == False and C == True and D == False:
            print('not A not B and C not D')
            query = "SELECT source_id, ra, dec, l, b, parallax, pmra, pmdec, phot_g_mean_mag, phot_bp_mean_mag, phot_rp_mean_mag, ra_error, dec_error, parallax_error, pmra_error, pmdec_error, bp_g, bp_rp, parallax_pmra_corr, parallax_pmdec_corr, pmra_pmdec_corr, radial_velocity, radial_velocity_error, ruwe FROM gaiadr2.gaia_source as gaia WHERE 1=CONTAINS(POINT(%s, gaia.ra, gaia.dec), CIRCLE(%s, %f, %f, %f )) AND gaia.parallax < %f  AND gaia.parallax > %f" % (
                icrs_string, icrs_string, RA_cluster, DEC_cluster, search_rad,
                parallax_cluster + sigma_num * eparallax_cluster,
                parallax_cluster - sigma_num * eparallax_cluster)

            tap_service = vo.dal.TAPService(tap_service_url)
            tap_result = tap_service.run_async(query)
            writeto(tap_result.table, file_name)

        elif A == False and B == False and C == False and D == True:
            print('not A not B not C and D')
            query = "SELECT source_id, ra, dec, l, b, parallax, pmra, pmdec, phot_g_mean_mag, phot_bp_mean_mag, phot_rp_mean_mag, ra_error, dec_error, parallax_error, pmra_error, pmdec_error, bp_g, bp_rp, parallax_pmra_corr, parallax_pmdec_corr, pmra_pmdec_corr, radial_velocity, radial_velocity_error, ruwe FROM gaiadr2.gaia_source as gaia WHERE 1=CONTAINS(POINT(%s, gaia.ra, gaia.dec), CIRCLE(%s, %f, %f, %f )) AND gaia.radial_velocity < %f  AND gaia.radial_velocity > %f" % (
                icrs_string, icrs_string, RA_cluster, DEC_cluster, search_rad,
                RV_cluster + sigma_num * eRV_cluster,
                RV_cluster - sigma_num * eRV_cluster)

        else:  # everything == 0
            print('not A not B not C not D')
            query = "SELECT source_id, ra, dec, l, b, parallax, pmra, pmdec, phot_g_mean_mag, phot_bp_mean_mag, phot_rp_mean_mag, ra_error, dec_error, parallax_error, pmra_error, pmdec_error, bp_g, bp_rp, parallax_pmra_corr, parallax_pmdec_corr, pmra_pmdec_corr, radial_velocity, radial_velocity_error, ruwe FROM gaiadr2.gaia_source as gaia WHERE 1=CONTAINS(POINT(%s, gaia.ra, gaia.dec), CIRCLE(%s, %f, %f, %f ))" % (
                icrs_string, icrs_string, RA_cluster, DEC_cluster, search_rad)

        print(query)
        tap_service = vo.dal.TAPService(tap_service_url)
Ejemplo n.º 33
0
mytable

## The column 'radial_velocity' is c*z but doesn't include the unit; it is km/s
## Get the speed of light from astropy.constants and express in km/s
c = const.c.to(u.km/u.s).value
redshifts = mytable['radial_velocity']/c
mytable['redshift'] = redshifts
physdist = 0.05*u.Mpc # 50 kpc physical distance

angDdist = Planck15.angular_diameter_distance(mytable['redshift'])
angDrad = np.arctan(physdist/angDdist)
mytable['angDdeg'] = angDrad.to(u.deg)
mytable

## In memory only, use an IO stream.
vot_obj=io.BytesIO()
apvot.writeto(apvot.from_table(mytable),vot_obj)
## (Reset the "file-like" object to the beginning.)
vot_obj.seek(0)

query="""SELECT mt.ra, mt.dec, cat.ra, cat.dec, cat.Radial_Velocity, cat.morph_type, cat.bmag 
    FROM zcat cat, tap_upload.mytable mt 
    WHERE
    contains(point('ICRS',cat.ra,cat.dec),circle('ICRS',mt.ra,mt.dec,mt.angDdeg))=1
    and cat.Radial_Velocity > 0 and cat.radial_velocity != mt.radial_velocity
    ORDER by cat.ra"""
#  Currently broken due to a bug.
#mytable2 = heasarc_tap_services[0].service.run_async(query, uploads={'mytable':vot_obj})
mytable2 = heasarc_tap_services[0].search(query, uploads={'mytable':vot_obj})
vot_obj.close()
mytable2.to_table()
Ejemplo n.º 34
0
def output_gas_catalogue(all_gas):
    num_gas = len(all_gas)
    names = []
    days = []
    field_names = []
    sources = []
    longitudes = []
    latitudes = []
    ras = []
    decs = []
    velocities = np.zeros(num_gas)
    em_velocities = np.ma.array(np.zeros(num_gas))
    optical_depths = np.zeros(num_gas)
    comp_widths = np.zeros(num_gas)
    temps_off = np.ma.array(np.zeros(num_gas))
    delta_temps_off = np.ma.array(np.zeros(num_gas))
    temps_spin = np.ma.array(np.zeros(num_gas))
    delta_temps_spin = np.ma.array(np.zeros(num_gas))
    temps_kmax = np.zeros(num_gas)
    tau = np.zeros(num_gas)
    continuum_sd = np.zeros(num_gas)
    maser_region = np.empty(num_gas, dtype=bool)
    ratings = np.empty(num_gas, dtype=object)
    filenames = np.empty(num_gas, dtype=object)
    local_paths = np.empty(num_gas, dtype=object)
    local_emission_paths = np.empty(num_gas, dtype=object)
    local_spectra_paths = np.empty(num_gas, dtype=object)

    base_path = os.path.realpath('.')

    for i in range(len(all_gas)):
        gas = all_gas[i]
        names.append(gas.name)
        days.append(gas.day)
        field_names.append(gas.field)
        sources.append(gas.src)
        longitudes.append(gas.longitude)
        latitudes.append(gas.latitude)
        ras.append(gas.ra)
        decs.append(gas.dec)
        velocities[i] = gas.comp_vel
        em_velocities[i] = gas.em_vel / 1000 if gas.em_vel else np.ma.masked
        optical_depths[i] = gas.optical_depth
        comp_widths[i] = gas.comp_width
        temps_kmax[i] = gas.t_kmax
        if gas.t_off is None:
            temps_off[i] = np.ma.masked
            delta_temps_off[i] = np.ma.masked
        else:
            temps_off[i] = gas.t_off
            delta_temps_off[i] = gas.delta_t_off
        if gas.t_s is None:
            temps_spin[i] = np.ma.masked
            delta_temps_spin[i] = np.ma.masked
        else:
            temps_spin[i] = gas.t_s
            delta_temps_spin[i] = gas.delta_t_s
        tau[i] = gas.tau
        maser_region[i] = is_gas_near_maser(gas)
        ratings[i] = gas.rating
        continuum_sd[i] = gas.continuum_sd
        # Need to read in spectra to get rating and include it in the catalogue and
        # link to the fit preview: e.g. plots/A/012.909-0.260_19_src4-1_fit
        prefix = 'day' + str(gas.day) + '/' + gas.field + \
                 "_src" + gas.src
        filenames[i] = prefix + "_plot.png"
        em_filename = prefix + "_emission.png"
        spectra_path = 'run2/plots/{}/{}_{}_src{}_fit.png'.format(gas.rating, gas.field, gas.day, gas.src)
        local_paths[i] = base_path + '/' + filenames[i]
        local_emission_paths[i] = base_path + '/' + em_filename
        local_spectra_paths[i] = base_path + '/' + spectra_path

    # bulk calc fields
    vel_diff = np.abs(velocities - em_velocities)
    equiv_width = np.abs((1-optical_depths) * comp_widths)
    fwhm = np.abs(comp_widths)
    column_density = tau * fwhm * temps_spin * 1.823E18 * 1.064
    sigma = fwhm / (2 * math.sqrt(2 * math.log(2)))
    mach_num = np.sqrt(4.2*((temps_kmax/temps_spin)-1))
    n_wnm = ((column_density * temps_spin) / 50 ) - column_density

    temp_table = Table(
        [names, days, field_names, sources, velocities, em_velocities, optical_depths, temps_off, temps_spin, temps_kmax,
         longitudes, latitudes, ras, decs, fwhm, sigma, vel_diff, equiv_width, tau, maser_region, column_density,
         mach_num, n_wnm, ratings, delta_temps_spin, delta_temps_off, continuum_sd,
         filenames, local_paths, local_emission_paths, local_spectra_paths],
        names=['Comp_Name', 'Day', 'Field', 'Source', 'Velocity', 'em_velocity', 'optical_depth', 'temp_off',
               'temp_spin', 'temp_kmax', 'longitude', 'latitude', 'ra', 'dec', 'fwhm', 'sigma', 'vel_diff',
               'equiv_width', 'tau', 'near_maser', 'column_density', 'mach', 'n_wnm', 'Rating',
               'delta_temp_spin', 'delta_temp_off', 'delta_optical_depth',
               'Filename', 'Local_Path', 'Local_Emission_Path', 'Local_Spectrum_Path'],
        meta={'ID': 'magmo_gas',
              'name': 'MAGMO Gas ' + str(datetime.date.today())})
    votable = from_table(temp_table)
    table = votable.get_first_table()
    set_field_metadata(table.get_field_by_id('longitude'), 'pos.galactic.lon', 'deg',
                       'Galactic longitude of the background source')
    set_field_metadata(table.get_field_by_id('latitude'), 'pos.galactic.lat', 'deg',
                       'Galactic latitude of the background source')
    set_field_metadata(table.get_field_by_id('ra'), 'pos.eq.ra;meta.main', 'deg',
                       'Right ascension of the background source (J2000)')
    set_field_metadata(table.get_field_by_id('dec'), 'pos.eq.dec;meta.main', 'deg',
                       'Declination of the background source (J2000)')
    set_field_metadata(table.get_field_by_id('fwhm'), '', 'km/s', 'Full width at half maximum of the Gaussian component')
    set_field_metadata(table.get_field_by_id('sigma'), '', 'km/s', 'Sigma value of the Gaussian component')
    set_field_metadata(table.get_field_by_id('temp_off'), 'phys.temperature;stat.mean', 'K',
                       'The mean temperature for the gas immediately adjacent to the source')
    set_field_metadata(table.get_field_by_id('temp_spin'), 'phys.temperature;stat.mean', 'K',
                       'The excitation or spin temperature of the gas')
    set_field_metadata(table.get_field_by_id('optical_depth'), '', '',
                       'The peak optical depth of the component ($e^(-\\tau)$)')
    set_field_metadata(table.get_field_by_id('column_density'), '', 'cm-2',
                       'The density of Cold HI gas in the Gaussian component')
    set_field_metadata(table.get_field_by_id('n_wnm'), '', 'cm-2',
                       'The density of Warm HI gas in the Gaussian component')
    set_field_metadata(table.get_field_by_id('mach'), '', '',
                       'The turbulent mach number of the gas in the Gaussian component')
    filename = "magmo-gas.vot"
    writeto(votable, filename)
    return table
Ejemplo n.º 35
0
    def download(self,jobid=None,filename=None,format=None):
        """
        A public function to download data from a job with COMPLETED phase.

        Keyword Args
        ------------
        jobid :
            Completed jobid to be downloaded
        filename : string
            If left blank, downloaded to the terminal. If specified, data is written out to file (directory can be included here).

        Returns
        -------
        headers, data : list, list
        """

        if jobid is None:
            try:
                jobid = self.current_job
            except:
                raise
                   
        self.check_all_jobs()
        completed_job_responses = self.completed_job_info(jobid)
        soup = BeautifulSoup(completed_job_responses[0].content)
        tableurl = soup.find("uws:result").get("xlink:href")
        
        # This is where the request.content parsing happens
        raw_table_data = self.session.get(tableurl,auth=(self.username,self.password))
        raw_headers = raw_table_data.content.split('\n')[0]
        num_cols = len(raw_headers.split(','))
        num_rows = len(raw_table_data.content.split('\n'))-2
        headers = [raw_headers.split(',')[i].strip('"') for i in range(num_cols)]
        raw_data = [raw_table_data.content.split('\n')[i+1].split(",") for i in range(num_rows)]
        data = [map(eval,raw_data[i]) for i in range(num_rows)]

        if format is not None:
            tbl = Table(data=map(list, zip(*data)),names=headers)
            if format in ['VOTable','votable']:
                votbl = votable.from_table(tbl)
                if filename is None:
                    return votbl
                else:
                    if '.xml' in filename:
                        filename = filename.split('.')[0]
                    votable.writeto(votbl, "{}.xml".format(filename))
                    print("Data written to file: {}.xml".format(filename))
            elif format in ['FITS','fits']:
                print("Need to implement...")
        else:
            if filename is None:
                return headers, data
            else:
                with open(filename, 'wb') as fh:
                    raw_table_data = self.session.get(tableurl,auth=(self.username,self.password),stream=True)
                    for block in raw_table_data.iter_content(1024):
                        if not block:
                            break
                        fh.write(block)
                    print("Data written to file: {}".format(filename))
                return headers, data
Ejemplo n.º 36
0
def compare_heiles_2003(magmo_gas):
    print("## Comparing with Heiles & Troland 2003 ##")

    # Read in ht03 data
    rdr = ascii.get_reader(Reader=ascii.Csv)
    ht03_table = rdr.read('../Millennium_data.csv')
    # filter for just the CNM data |b| < 10
    cnm = np.array(ht03_table['CNM'])
    sample = ht03_table[cnm >= '0']
    abs_lat = np.absolute(np.array(sample['GLAT']))
    ht03_low_cnm = sample[abs_lat <= 10]
    spin_temp = np.array(ht03_low_cnm['Ts'])
    print("Sample had {} values, mean {:0.3f}, median {:0.3f}, sd {:0.3f}".format(len(spin_temp),
                                                                                  np.mean(spin_temp),
                                                                                  np.median(spin_temp),
                                                                                  np.std(spin_temp)))

    votable = from_table(ht03_low_cnm)
    writeto(votable, 'millenium_spin.vot')

    # comparative histogram of the two CNM spin temp sets
    fig = plt.figure(figsize=(7.5, 3))
    gs = matplotlib.gridspec.GridSpec(1, 2)
    gas_array = magmo_gas

    # Spin Temperature
    ax1 = fig.add_subplot(gs[0, 0])
    sample = np.ma.array(gas_array['temp_spin']).compressed()
    bins = np.linspace(0,450,19)
    hist, edges = build_hist_fraction(sample, bins, 450)
    ax1.step(edges, hist)

    ht03_sample = np.array(ht03_low_cnm['Ts'])
    hist, edges = build_hist_fraction(ht03_sample, bins, 450)
    ax1.step(edges, hist, color='black', ls='--')

    ax1.set_xlabel('Spin Temperature (K)')
    ax1.set_ylabel('Fraction of components')

    statistic, p_value = stats.ks_2samp(np.ma.filled(sample), np.ma.filled(ht03_sample))
    print ('Spin temp population similarity p_value={}'.format(p_value))


    # Column Density
    ax2 = fig.add_subplot(gs[0, 1])
    sample = np.ma.array(gas_array['column_density']).compressed()
    sample = np.log10(sample)
    bins = np.linspace(19, 24, 21)
    hist, edges = build_hist_fraction(sample, bins, 24)

    sample = np.array(ht03_low_cnm['NHI']) * 1E20
    sample = np.log10(sample[sample > 0])
    ht03_hist, edges = build_hist_fraction(sample, bins, 24)

    ax2.step(edges, hist) #, width=edges[1]-edges[0])
    ax2.step(edges, ht03_hist, color='black', ls=':') # , width=edges[1]-edges[0]
    label = 'Column Density $\\log_{10}(N_{H}$) (cm$^{-2}$)'
    ax2.set_xlabel(label)
    ax2.set_ylabel('Fraction of components')

    statistic, p_value = stats.ks_2samp(np.ma.filled(gas_array['column_density']), np.ma.filled(np.array(ht03_low_cnm['NHI']) * 1E20))
    print ('Column density population similarity p_value={}'.format(p_value))

    gs.update(wspace=0.5, hspace=0.5)
    filename = 'magmo-heiles_2003_comp.pdf'
    plt.savefig(filename, bbox_inches="tight")
    plt.close()

    return
Ejemplo n.º 37
0
def output_component_catalogue(spectra, data, data_decomposed, folder):
    names = []
    comp_names = []
    days = []
    field_names = []
    sources = []
    longitudes = []
    latitudes = []
    amps = []
    fwhms = []
    means = []
    best_fit_rchi2s = []
    amps_fit_errs = []
    fwhms_fit_errs = []
    means_fit_errs = []

    num_no_comps = {}

    for i in range(len(data_decomposed['fwhms_fit'])):
        if i >= len(data['spectrum_idx']):
            print("Error: data index of %d is invalid for data array of len %d" % (
                i, len(data['spectrum_idx'])))
        spectrum_idx = data['spectrum_idx'][i]
        if spectrum_idx >= len(spectra):
            print("Error: spectra index of %d at row %d is invalid for spectra array of len %d" % (
                spectrum_idx, i, len(spectra)))
        spectrum = spectra[spectrum_idx]
        fit_fwhms = data_decomposed['fwhms_fit'][i]
        fit_means = data_decomposed['means_fit'][i]
        fit_amps = data_decomposed['amplitudes_fit'][i]
        best_fit_rchi2 = data_decomposed['best_fit_rchi2'][i]
        means_fit_err = data_decomposed['means_fit_err'][i]
        fwhms_fit_err = data_decomposed['fwhms_fit_err'][i]
        amplitudes_fit_err = data_decomposed['amplitudes_fit_err'][i]

        if len(fit_amps) > 0.:
            for j in range(len(fit_amps)):
                days.append(int(spectrum['Day']))
                field_names.append(spectrum['Field'])
                sources.append(spectrum['Source'])
                longitudes.append(spectrum['Longitude'])
                latitudes.append(spectrum['Latitude'])
                amps.append(fit_amps[j])
                fwhms.append(fit_fwhms[j])
                means.append(fit_means[j])
                best_fit_rchi2s.append(best_fit_rchi2[0])
                amps_fit_errs.append(means_fit_err[j])
                fwhms_fit_errs.append(fwhms_fit_err[j])
                means_fit_errs.append(amplitudes_fit_err[j])
                names.append(spectrum['Name'])
                suffix = chr(ord('A') + j)
                comp_names.append(spectrum['Name']+suffix)
        else:
            rating = spectrum['Rating']
            num_no_comps[rating] = num_no_comps.get(rating, 0) + 1
            print ("Unable to find components for ")

    temp_table = Table(
        [comp_names, names, days, field_names, sources, longitudes, latitudes, amps, fwhms, means, best_fit_rchi2s,
         amps_fit_errs, fwhms_fit_errs, means_fit_errs],
        names=['Comp_Name', 'Spectra_Name', 'Day', 'Field', 'Source', 'Longitude', 'Latitude', 'Amplitude', 'FWHM',
               'Mean', 'Best_Fit_Rchi2', 'Amplitude_Fit_Err', 'FWHM_Fit_Err', 'Mean_Fit_Err'],
        meta={'ID': 'magmo_components',
              'name': 'MAGMO Components ' + str(datetime.date.today())})
    votable = from_table(temp_table)
    filename = "magmo-components.vot"
    writeto(votable, filename)
    filename = folder + "/magmo-components.vot"
    writeto(votable, filename)

    total_nc = 0
    for rating, count in num_no_comps.items():
        total_nc += count

    print("Wrote out", len(fwhms), "components to", filename, "No components generated for", total_nc)
    for rating in sorted(num_no_comps.keys()):
        print("%s: %3d" % (rating, num_no_comps[rating]))
Ejemplo n.º 38
0
def append_data(data, template_path, output_path):
    old_table = parse_single_table(template_path).to_table()
    new_table = vstack([template_path, data])
    votable = from_table(new_table)

    writeto(votable, output_path)
Ejemplo n.º 39
0
def savevot( table, output_name ):
	writeto(from_table(table), str(output_name))
	print(str(output_name) + "saved in votable successfully!")
	return
Ejemplo n.º 40
0
def output_spectra_catalogue(spectra, output_folder):
    """
    Output the list of spectrum stats to a VOTable file thor-hi-spectra.vot

    :param spectra: The list of Spectrum objects
    :return: None
    """
    rows = len(spectra)
    ids = np.empty(rows, dtype=object)
    days = np.zeros(rows, dtype=int)
    fields = np.empty(rows, dtype=object)
    sources = np.empty(rows, dtype=object)
    longitudes = np.zeros(rows)
    latitudes = np.zeros(rows)
    eq_ras = np.zeros(rows)
    eq_decs = np.zeros(rows)
    max_flux = np.zeros(rows)
    max_opacity = np.zeros(rows)
    min_opacity = np.zeros(rows)
    max_velocity = np.zeros(rows)
    min_velocity = np.zeros(rows)
    rms_opacity = np.zeros(rows)
    opacity_range = np.zeros(rows)
    continuum_sd = np.zeros(rows)
    continuum_temp = np.zeros(rows)
    max_s_max_n = np.zeros(rows)
    max_em_std = np.zeros(rows)
    max_emission = np.zeros(rows)
    min_emission = np.zeros(rows)
    rating = np.empty(rows, dtype=object)
    used = np.empty(rows, dtype=bool)
    resolved = np.empty(rows, dtype=bool)
    duplicate = np.empty(rows, dtype=bool)
    has_emission = np.empty(rows, dtype=bool)
    filenames = np.empty(rows, dtype=object)
    local_paths = np.empty(rows, dtype=object)
    local_emission_paths = np.empty(rows, dtype=object)

    base_path = os.path.realpath('.')
    i = 0
    for spectrum in spectra:
        ids[i] = spectrum.name
        days[i] = int(spectrum.day)
        fields[i] = spectrum.field_name
        sources[i] = spectrum.src_id
        longitudes[i] = spectrum.longitude
        latitudes[i] = spectrum.latitude
        eq_ras[i] = spectrum.ra
        eq_decs[i] = spectrum.dec
        max_flux[i] = np.max(spectrum.flux)
        min_opacity[i] = np.min(spectrum.opacities)
        max_opacity[i] = np.max(spectrum.opacities)
        rms_opacity[i] = np.sqrt(np.mean(np.square(spectrum.opacities)))
        min_velocity[i] = np.min(spectrum.velocity)
        max_velocity[i] = np.max(spectrum.velocity)
        max_em_std[i] = np.max(spectrum.em_std)
        if spectrum.has_emission:
            max_emission[i] = np.max(spectrum.em_temps)
            min_emission[i] = np.min(spectrum.em_temps)

        opacity_range[i] = spectrum.opacity_range
        max_s_max_n[i] = spectrum.max_s_max_n
        continuum_sd[i] = spectrum.continuum_sd
        continuum_temp[i] = spectrum.continuum_temp
        rating[i] = spectrum.rating
        src_parts = spectrum.src_id.split('-')
        resolved[
            i] = False  # is_resolved(spectrum.day, spectrum.field_name, src_parts[0], src_parts[1], spectrum.beam_area,
        #      None)

        duplicate[i] = spectrum.duplicate
        has_emission[i] = spectrum.has_emission
        used[i] = not spectrum.low_sn
        prefix = 'spectra/' + spectrum.field_name + '/' + spectrum.field_name + "_src" + spectrum.src_id
        filenames[i] = prefix + "_plot.png"
        em_filename = prefix + "_emission.png"
        local_paths[i] = base_path + '/' + filenames[i]
        local_emission_paths[i] = base_path + '/' + em_filename
        i += 1

    spectra_table = Table(
        [
            ids, days, fields, sources, longitudes, latitudes, eq_ras, eq_decs,
            max_flux, min_opacity, max_opacity, rms_opacity, min_emission,
            max_emission, min_velocity, max_velocity, used, continuum_temp,
            opacity_range, max_s_max_n, continuum_sd, max_em_std, rating,
            resolved, duplicate, has_emission, filenames, local_paths,
            local_emission_paths
        ],
        names=[
            'Name', 'Day', 'Field', 'Source', 'Longitude', 'Latitude', 'RA',
            'Dec', 'Max_Flux', 'Min_Opacity', 'Max_Opacity', 'RMS_Opacity',
            'Min_Emission', 'Max_Emission', 'Min_Velocity', 'Max_Velocity',
            'Used', 'Continuum_Temp', 'Opacity_Range', 'Max_S_Max_N',
            'Continuum_SD', 'max_em_std', 'Rating', 'Resolved', 'Duplicate',
            'Has_Emission', 'Filename', 'Local_Path', 'Local_Emission_Path'
        ],
        meta={
            'ID': 'thor_hi_spectra',
            'name': 'THOR HI Spectra ' + str(datetime.date.today())
        })
    votable = from_table(spectra_table)
    table = votable.get_first_table()
    set_field_metadata(table.get_field_by_id('Min_Emission'), 'stat.min', 'K',
                       'Minimum average emission')
    set_field_metadata(table.get_field_by_id('Max_Emission'), 'stat.max', 'K',
                       'Maximum average emission')

    filename = output_folder + "/thor-hi-spectra.vot"
    writeto(votable, filename)
    print("Wrote out", i, "spectra to", filename)
    for grade in "ABCDEF":
        num_rating = len(np.where(rating == grade)[0])
        print("%s: %3d" % (grade, num_rating))
    print("Mean continuum sd %f" % np.mean(continuum_sd))