Beispiel #1
0
def load_tyc_hd() -> Table:
    """Load the Tycho-HD cross index."""
    print('Loading TYC-HD cross index')
    with tarfile.open(os.path.join('vizier', 'tyc2hd.tar.gz'), 'r:gz') as tf:
        with tf.extractfile('./ReadMe') as readme:
            col_names = ['TYC1', 'TYC2', 'TYC3', 'HD']
            reader = io_ascii.get_reader(io_ascii.Cds,
                                         readme=readme,
                                         include_names=col_names)
            reader.data.table_name = 'tyc2_hd.dat'
            with tf.extractfile('./tyc2_hd.dat.gz') as gzf, gzip.open(gzf, 'rb') as f:
                data = reader.read(f)

    parse_tyc_cols(data)

    err_del = np.array(TYC_HD_ERRATA['delete'] + [a[1] for a in TYC_HD_ERRATA['add']])
    data = data[np.logical_not(np.isin(data['HD'], err_del))]

    err_add = Table(np.array(TYC_HD_ERRATA['add']),
                    names=['TYC', 'HD'],
                    dtype=[np.int64, np.int64])

    data = vstack([data, err_add], join_type='exact')

    data = unique(data.group_by('HD'), keys='TYC')
    data = unique(data.group_by('TYC'), keys='HD')

    return data
Beispiel #2
0
def test_col_dtype_in_custom_class():
    """Test code in BaseOutputter._convert_vals to handle Column.dtype
    attribute. See discussion in #11895."""
    dtypes = [np.float32, np.int8, np.int16]

    class TestDtypeHeader(ascii.BasicHeader):
        def get_cols(self, lines):
            super().get_cols(lines)
            for col, dtype in zip(self.cols, dtypes):
                col.dtype = dtype

    class TestDtype(ascii.Basic):
        """
        Basic table Data Reader with data type alternating float32, int8
        """
        header_class = TestDtypeHeader

    txt = """
    a b c
    1 2 3
    """
    reader = ascii.get_reader(TestDtype)
    t = reader.read(txt)
    for col, dtype in zip(t.itercols(), dtypes):
        assert col.dtype.type is dtype
def parse_ssois_return(ssois_return,
                       camera_filter='r.MP9601',
                       telescope_instrument='CFHT/MegaCam'):
    assert camera_filter in ['r.MP9601', 'u.MP9301']
    ret_table = []

    table_reader = ascii.get_reader(Reader=ascii.Basic)
    table_reader.inconsistent_handler = _skip_missing_data
    table_reader.header.splitter.delimiter = '\t'
    table_reader.data.splitter.delimiter = '\t'
    table = table_reader.read(ssois_return)

    for row in table:
        # check if a dbimages object exists
        ccd = int(row['Ext']) - 1
        expnum = row['Image'].rstrip('p')
        X = row['X']
        Y = row['Y']
        mjd = row['MJD']

        # Excludes the OSSOS wallpaper.
        # note: 'Telescope_Insturment' is a typo in SSOIS's return format
        if (row['Telescope_Insturment'] == telescope_instrument) and (row['Filter'] == camera_filter) \
                and not row['Image_target'].startswith('WP'):
            ret_table.append(row)

    return ret_table
Beispiel #4
0
    def extract(self, ra, dec, width, height, keep=0):
        if not self.valid:
            return
        if keep == 0:
            self.data = Table()

        polygon = SkyCoord( [ra-width/2., ra+width/2., ra+width/2., ra-width/2.],\
                [dec-height/2., dec-height/2., dec+height/2., dec+height/2.],\
                unit = 'deg' ).cartesian.get_xyz().T
        pix = hp.query_polygon(self.NSIDE, polygon, inclusive=True, nest=True)
        rangelist = self._get_tgasptyc_zone_file(pix)

        f = open(self.datafile, "r")
        lines = []
        for r in rangelist:
            f.seek(r[0] * self.linelength)
            lines.append(f.read((r[1] - r[0]) * self.linelength))
        f.close()
        content = ''.join(lines)

        reader = ascii.get_reader(Reader=ascii.Cds,
                                  fill_values=[('', 0)],
                                  readme=self.readmefile)
        reader.data.table_name = "tgasptyc.dat"
        catalog = reader.read(content)
        p = catalog[ np.where( ( catalog['RAdeg'] > ra - width/2. )\
                & ( catalog['RAdeg'] < ra + width/2. )\
                & ( catalog['DEdeg'] > dec - height/2. )\
                & ( catalog['DEdeg'] < dec + height/2. ) ) ]
        self.data = astropy.table.vstack([self.data, p])

        return (len(self.data))
Beispiel #5
0
def parse_ssois_return(ssois_return, object_name, imagetype, camera_filter='r.MP9601',
                       telescope_instrument='CFHT/MegaCam'):
    """
    Parse through objects in ssois query and filter out images of desired filter, type, exposure time, and instrument
    """

    assert camera_filter in ['r.MP9601', 'u.MP9301']

    ret_table = []
    good_table = 0

    table_reader = ascii.get_reader(Reader=ascii.Basic)
    table_reader.inconsistent_handler = _skip_missing_data
    table_reader.header.splitter.delimiter = '\t'
    table_reader.data.splitter.delimiter = '\t'
    table = table_reader.read(ssois_return)

    for row in table:
        # Excludes the OSSOS wallpaper.
        # note: 'Telescope_Insturment' is a typo in SSOIS's return format
        if not 'MegaCam' in row['Telescope_Insturment']:
            continue
        # Check if image of object exists in OSSOS observations
        if not storage.exists(storage.get_uri(row['Image'][:-1])):
            continue
        if not str(row['Image_target']).startswith('WP'):
           good_table += 1
           ret_table.append(row)

    if good_table > 0:
        print((" %d images found" % good_table))

    return ret_table
Beispiel #6
0
def test_write_table(fast_writer):
    table = ascii.get_reader(Reader=ascii.Daophot)
    data = table.read('t/daophot.dat')

    for test_def in test_defs:
        check_write_table(test_def, data, fast_writer)
        check_write_table_via_table(test_def, data, fast_writer)
Beispiel #7
0
def test_write_table(fast_writer):
    table = ascii.get_reader(Reader=ascii.Daophot)
    data = table.read('data/daophot.dat')

    for test_def in test_defs:
        check_write_table(test_def, data, fast_writer)
        check_write_table_via_table(test_def, data, fast_writer)
Beispiel #8
0
def parse_ssois_return(ssois_return, object_name, imagetype, camera_filter='r.MP9601',
                       telescope_instrument='CFHT/MegaCam'):
    """
    Parse through objects in ssois query and filter out images of desired filter, type, exposure time, and instrument
    """

    assert camera_filter in ['r.MP9601', 'u.MP9301']

    ret_table = []
    good_table = 0

    table_reader = ascii.get_reader(Reader=ascii.Basic)
    table_reader.inconsistent_handler = _skip_missing_data
    table_reader.header.splitter.delimiter = '\t'
    table_reader.data.splitter.delimiter = '\t'
    table = table_reader.read(ssois_return)

    for row in table:
        # Excludes the OSSOS wallpaper.
        # note: 'Telescope_Insturment' is a typo in SSOIS's return format
        if not 'MegaCam' in row['Telescope_Insturment']:
            continue
        if not storage.exists(storage.get_uri(row['Image'][:-1])):  #Check if image of object exists in OSSOS observations
            continue
        if not str(row['Image_target']).startswith('WP'):
           good_table += 1
           ret_table.append(row)

    if good_table > 0:
        print " %d images found" % good_table

    return ret_table
Beispiel #9
0
def test_write_valid_meta_ipac():
    """Write an IPAC table that contains no data and has *correctly* specified
    metadata.  No warnings should be issued"""
    table = ascii.get_reader(Reader=ascii.Ipac)
    data = table.read('data/no_data_ipac.dat')
    data.meta['keywords']['blah'] = {'value': 'invalid'}
    out = StringIO()
    data.write(out, format='ascii.ipac')
Beispiel #10
0
 def _create_reader(cls, readme: IO, table: str, names: List[str],
                    **kwargs) -> io_ascii.Cds:
     reader = io_ascii.get_reader(io_ascii.Cds,
                                  readme=readme,
                                  include_names=names,
                                  **kwargs)
     reader.data.table_name = table
     return reader
Beispiel #11
0
def test_write_no_data_ipac(fast_writer):
    """Write an IPAC table that contains no data."""
    table = ascii.get_reader(Reader=ascii.Ipac)
    data = table.read('t/no_data_ipac.dat')

    for test_def in test_defs_no_data:
        check_write_table(test_def, data, fast_writer)
        check_write_table_via_table(test_def, data, fast_writer)
def test_cds_units():
    from astropy import units
    data_and_readme = 'data/cds.dat'
    reader = ascii.get_reader(ascii.Cds)
    table = reader.read(data_and_readme)
    # column unit is GMsun (giga solar masses)
    # make sure this is parsed correctly, not as a "string" unit
    assert table['Fit'].to(units.solMass).unit == units.solMass
def test_cds_units():
    from astropy import units
    data_and_readme = 'data/cds.dat'
    reader = ascii.get_reader(ascii.Cds)
    table = reader.read(data_and_readme)
    # column unit is GMsun (giga solar masses)
    # make sure this is parsed correctly, not as a "string" unit
    assert table['Fit'].to(units.solMass).unit == units.solMass
Beispiel #14
0
def test_write_no_data_ipac(fast_writer):
    """Write an IPAC table that contains no data."""
    table = ascii.get_reader(Reader=ascii.Ipac)
    data = table.read('data/no_data_ipac.dat')

    for test_def in test_defs_no_data:
        check_write_table(test_def, data, fast_writer)
        check_write_table_via_table(test_def, data, fast_writer)
Beispiel #15
0
def test_cds_function_units(reader_cls):
    data_and_readme = 'data/cdsFunctional.dat'
    reader = ascii.get_reader(reader_cls)
    table = reader.read(data_and_readme)
    assert table['logg'].unit == u.dex(u.cm / u.s**2)
    assert table['logTe'].unit == u.dex(u.K)
    assert table['Mass'].unit == u.Msun
    assert table['e_Mass'].unit == u.Msun
    assert table['Age'].unit == u.Myr
    assert table['e_Age'].unit == u.Myr
def test_cds_function_units():
    from astropy.units import dex
    data_and_readme = 'data/cdsFunctional.dat'
    reader = ascii.get_reader(ascii.Cds)
    table = reader.read(data_and_readme)
    assert table['logg'].unit == u.dex(u.cm / u.s**2)
    assert table['logTe'].unit == u.dex(u.K)
    assert table['Mass'].unit == u.Msun
    assert table['e_Mass'].unit == u.Msun
    assert table['Age'].unit == u.Myr
    assert table['e_Age'].unit == u.Myr
Beispiel #17
0
def test_write_valid_meta_ipac():
    """Write an IPAC table that contains no data and has *correctly* specified
    metadata.  No warnings should be issued"""
    table = ascii.get_reader(Reader=ascii.Ipac)
    data = table.read('t/no_data_ipac.dat')
    data.meta['keywords']['blah'] = {'value': 'invalid'}

    with catch_warnings(AstropyWarning) as ASwarn:
        out = StringIO()
        data.write(out, format='ascii.ipac')
    assert len(ASwarn) == 0
Beispiel #18
0
def convert_to_fits(msss_cat):
    #if .txt file do this, but need to remove comments at start of file, not sure why...
    rdr = ascii.get_reader(Reader=ascii.Basic)
    rdr.header.splitter.delimiter = ' '
    rdr.data.splitter.delimiter = ' '
    rdr.header.start_line = 0
    rdr.data.start_line = 1
    rdr.data.end_line = None
    rdr.header.comment = r'\s*#'
    rdr.data.comment = r'\s*#'
    data = rdr.read(msss_cat + '.txt')
    data.write(msss_cat + '.fits', format='fits')
Beispiel #19
0
def test_write_invalid_toplevel_meta_ipac():
    """Write an IPAC table that contains no data but has invalid (incorrectly
    specified) metadata stored in the top-level metadata and therefore should
    raise a warning, and check that the warning has been raised"""
    table = ascii.get_reader(Reader=ascii.Ipac)
    data = table.read('data/no_data_ipac.dat')
    data.meta['blah'] = 'extra'
    out = StringIO()

    with pytest.warns(AstropyWarning, match=r'.*were not written.*') as warn:
        data.write(out, format='ascii.ipac')
    assert len(warn) == 1
Beispiel #20
0
    def parse(self, ssos_result_filename_or_lines):
        """
        given the result table create 'source' objects.

        :param ssos_result_filename_or_lines:
        :rtype Table
        """
        table_reader = ascii.get_reader(Reader=ascii.Basic)
        table_reader.inconsistent_handler = self._skip_missing_data
        table_reader.header.splitter.delimiter = '\t'
        table_reader.data.splitter.delimiter = '\t'
        return table_reader.read(ssos_result_filename_or_lines)
Beispiel #21
0
def test_cds_function_units2(reader_cls):
    # This one includes some dimensionless dex.
    data_and_readme = 'data/cdsFunctional2.dat'
    reader = ascii.get_reader(reader_cls)
    table = reader.read(data_and_readme)
    assert table['Teff'].unit == u.K
    assert table['logg'].unit == u.dex(u.cm / u.s**2)
    assert table['vturb'].unit == u.km / u.s
    assert table['[Fe/H]'].unit == u.dex(u.one)
    assert table['e_[Fe/H]'].unit == u.dex(u.one)
    assert_almost_equal(table['[Fe/H]'].to(u.one),
                        10.**(np.array([-2.07, -1.50, -2.11, -1.64])))
Beispiel #22
0
def test_read_normal_names():
    """Nice, typical fixed format table with col names provided"""
    table = """
# comment (with blank line above)
|  Col1  |  Col2   |
|  1.2   | "hello" |
|  2.4   |'s worlds|
"""
    reader = ascii.get_reader(Reader=ascii.FixedWidth,
                              names=('name1', 'name2'))
    dat = reader.read(table)
    assert_equal(dat.colnames, ['name1', 'name2'])
    assert_almost_equal(dat[1][0], 2.4)
Beispiel #23
0
def test_read_normal_names():
    """Nice, typical fixed format table with col names provided"""
    table = """
# comment (with blank line above)
|  Col1  |  Col2   |
|  1.2   | "hello" |
|  2.4   |'s worlds|
"""
    reader = ascii.get_reader(Reader=ascii.FixedWidth,
                                   names=('name1', 'name2'))
    dat = reader.read(table)
    assert_equal(dat.colnames, ['name1', 'name2'])
    assert_almost_equal(dat[1][0], 2.4)
Beispiel #24
0
def test_read_weird():
    """Weird input table with data values chopped by col extent """
    table = """
  Col1  |  Col2 |
  1.2       "hello"
  2.4   sdf's worlds
"""
    reader = ascii.get_reader(Reader=ascii.FixedWidth)
    dat = reader.read(table)
    assert_equal(dat.colnames, ['Col1', 'Col2'])
    assert_almost_equal(dat[1][0], 2.4)
    assert_equal(dat[0][1], '"hel')
    assert_equal(dat[1][1], "df's wo")
Beispiel #25
0
def test_read_normal_exclude():
    """Nice, typical fixed format table with col name excluded"""
    table = """
# comment (with blank line above)
|  Col1  |  Col2   |
|  1.2   | "hello" |
|  2.4   |'s worlds|
"""
    reader = ascii.get_reader(Reader=ascii.FixedWidth,
                              exclude_names=('Col1', ))
    dat = reader.read(table)
    assert_equal(dat.colnames, ['Col2'])
    assert_equal(dat[1][0], "'s worlds")
Beispiel #26
0
def test_write_invalid_toplevel_meta_ipac():
    """Write an IPAC table that contains no data but has invalid (incorrectly
    specified) metadata stored in the top-level metadata and therefore should
    raise a warning, and check that the warning has been raised"""
    table = ascii.get_reader(Reader=ascii.Ipac)
    data = table.read('t/no_data_ipac.dat')
    data.meta['blah'] = 'extra'

    with catch_warnings(AstropyWarning) as ASwarn:
        out = StringIO()
        data.write(out, format='ascii.ipac')
    assert len(ASwarn) == 1
    assert "were not written" in str(ASwarn[0].message)
Beispiel #27
0
def test_read_normal_exclude():
    """Nice, typical fixed format table with col name excluded"""
    table = """
# comment (with blank line above)
|  Col1  |  Col2   |
|  1.2   | "hello" |
|  2.4   |'s worlds|
"""
    reader = ascii.get_reader(Reader=ascii.FixedWidth,
                                   exclude_names=('Col1',))
    dat = reader.read(table)
    assert_equal(dat.colnames, ['Col2'])
    assert_equal(dat[1][0], "'s worlds")
Beispiel #28
0
def test_read_weird():
    """Weird input table with data values chopped by col extent """
    table = """
  Col1  |  Col2 |
  1.2       "hello"
  2.4   sdf's worlds
"""
    reader = ascii.get_reader(Reader=ascii.FixedWidth)
    dat = reader.read(table)
    assert_equal(dat.colnames, ['Col1', 'Col2'])
    assert_almost_equal(dat[1][0], 2.4)
    assert_equal(dat[0][1], '"hel')
    assert_equal(dat[1][1], "df's wo")
Beispiel #29
0
    def parse(self, ssos_result_filename_or_lines):

        """
        given the result table create 'source' objects.

        :param ssos_result_filename_or_lines:
        :rtype Table
        """
        table_reader = ascii.get_reader(Reader=ascii.Basic)
        table_reader.inconsistent_handler = self._skip_missing_data
        table_reader.header.splitter.delimiter = '\t'
        table_reader.data.splitter.delimiter = '\t'
        return table_reader.read(ssos_result_filename_or_lines)
Beispiel #30
0
def test_write_invalid_toplevel_meta_ipac():
    """Write an IPAC table that contains no data but has invalid (incorrectly
    specified) metadata stored in the top-level metadata and therefore should
    raise a warning, and check that the warning has been raised"""
    table = ascii.get_reader(Reader=ascii.Ipac)
    data = table.read('t/no_data_ipac.dat')
    data.meta['blah'] = 'extra'

    with catch_warnings(AstropyWarning) as ASwarn:
        out = StringIO()
        data.write(out, format='ascii.ipac')
    assert len(ASwarn) == 1
    assert "were not written" in str(ASwarn[0].message)
Beispiel #31
0
def test_write_invalid_keyword_meta_ipac():
    """Write an IPAC table that contains no data but has invalid (incorrectly
    specified) metadata stored appropriately in the ``keywords`` section
    of the metadata but with invalid format and therefore should raise a
    warning, and check that the warning has been raised"""
    table = ascii.get_reader(Reader=ascii.Ipac)
    data = table.read('data/no_data_ipac.dat')
    data.meta['keywords']['blah'] = 'invalid'
    out = StringIO()

    with pytest.warns(AstropyWarning, match=r'.*has been skipped.*') as warn:
        data.write(out, format='ascii.ipac')
    assert len(warn) == 1
Beispiel #32
0
def test_read_unbounded_right_column_header():
    """The right hand column should be allowed to overflow"""
    table = """
# comment (with blank line above)
===== ===== ====
 Col1  Col2 Col3Long
===== ===== ====
 1.2    2    Hello
 2.4     4   Worlds
===== ===== ====
"""
    reader = ascii.get_reader(Reader=ascii.RST)
    dat = reader.read(table)
    assert_equal(dat.colnames[-1], "Col3Long")
Beispiel #33
0
def cone_search(ra,
                dec,
                dra=0.01,
                ddec=0.01,
                mjdate=None,
                calibration_level=2):
    """Do a QUERY on the TAP service for all observations that are part of OSSOS (*P05/*P016)
    where taken after mjd and have calibration 'observable'.

    :param ra: float degrees
    :param dec: float degrees
    :param dra: float degrees
    :param ddec: float degrees
    """

    data = dict(QUERY=(
        " SELECT Observation.observationID as collectionID, "
        " Plane.time_bounds_cval1 AS mjdate "
        " FROM caom2.Observation AS Observation "
        " JOIN caom2.Plane AS Plane "
        " ON Observation.obsID = Plane.obsID "
        " WHERE  ( Observation.collection = 'CFHT' ) "
        " AND Plane.calibrationLevel={} "
        " AND ( Observation.proposal_id LIKE '%P05' or Observation.proposal_id LIKE '%P06' )"
    ),
                REQUEST="doQuery",
                LANG="ADQL",
                FORMAT="tsv")

    data["QUERY"] = data["QUERY"].format(calibration_level)
    data["QUERY"] += (" AND  "
                      " INTERSECTS( BOX('ICRS', {}, {}, {}, {}), "
                      " Plane.position_bounds ) = 1 ").format(
                          ra, dec, dra, ddec)
    if mjdate is not None:
        data[
            "QUERY"] += " AND Plane.time_bounds_cval1 < {} AND Plane.time_bounds_cval2 > {} ".format(
                mjdate + 1.0 / 24.0, mjdate - 1 / 24.0)

    result = requests.get(TAP_WEB_SERVICE, params=data, verify=False)
    assert isinstance(result, requests.Response)
    logger.debug("Doing TAP Query using url: %s" % (str(result.url)))

    table_reader = ascii.get_reader(Reader=ascii.Basic)
    table_reader.header.splitter.delimiter = '\t'
    table_reader.data.splitter.delimiter = '\t'
    table = table_reader.read(result.text)

    logger.debug(str(table))
    return table
Beispiel #34
0
def test_read_unbounded_right_column_header():
    """The right hand column should be allowed to overflow"""
    table = """
# comment (with blank line above)
===== ===== ====
 Col1  Col2 Col3Long
===== ===== ====
 1.2    2    Hello
 2.4     4   Worlds
===== ===== ====
"""
    reader = ascii.get_reader(Reader=ascii.RST)
    dat = reader.read(table)
    assert_equal(dat.colnames[-1], "Col3Long")
Beispiel #35
0
def test_read_normal():
    """Nice, typical fixed format table"""
    table = """
# comment (with blank line above)
|  Col1  |  Col2   |
|  1.2   | "hello" |
|  2.4   |'s worlds|
"""
    reader = ascii.get_reader(Reader=ascii.FixedWidth)
    dat = reader.read(table)
    assert_equal(dat.colnames, ['Col1', 'Col2'])
    assert_almost_equal(dat[1][0], 2.4)
    assert_equal(dat[0][1], '"hello"')
    assert_equal(dat[1][1], "'s worlds")
Beispiel #36
0
def test_read_normal():
    """Nice, typical fixed format table"""
    table = """
# comment (with blank line above)
|  Col1  |  Col2   |
|  1.2   | "hello" |
|  2.4   |'s worlds|
"""
    reader = ascii.get_reader(Reader=ascii.FixedWidth)
    dat = reader.read(table)
    assert_equal(dat.colnames, ['Col1', 'Col2'])
    assert_almost_equal(dat[1][0], 2.4)
    assert_equal(dat[0][1], '"hello"')
    assert_equal(dat[1][1], "'s worlds")
Beispiel #37
0
def test_read_normal_exclude():
    """Nice, typical SimpleRST table with col name excluded"""
    table = """
======= ==========
  Col1     Col2
======= ==========
  1.2     "hello"
  2.4    's worlds
======= ==========
"""
    reader = ascii.get_reader(Reader=ascii.RST, exclude_names=('Col1', ))
    dat = reader.read(table)
    assert_equal(dat.colnames, ['Col2'])
    assert_equal(dat[1][0], "'s worlds")
Beispiel #38
0
def test_write_invalid_keyword_meta_ipac():
    """Write an IPAC table that contains no data but has invalid (incorrectly
    specified) metadata stored appropriately in the ``keywords`` section
    of the metadata but with invalid format and therefore should raise a
    warning, and check that the warning has been raised"""
    table = ascii.get_reader(Reader=ascii.Ipac)
    data = table.read('t/no_data_ipac.dat')
    data.meta['keywords']['blah'] = 'invalid'

    with catch_warnings(AstropyWarning) as ASwarn:
        out = StringIO()
        data.write(out, format='ascii.ipac')
    assert len(ASwarn) == 1
    assert "has been skipped" in str(ASwarn[0].message)
Beispiel #39
0
def cone_search(ra, dec, dra=0.01, ddec=0.01, runids=('13AP05','13AP06','13BP05', '14AP05')):
    """Do a QUERY on the TAP service for all observations that are part of runid,
    where taken after mjd and have calibration 'observable'.

    :param runids:
    :param ra:
    :param dec:
    :param dra: degrees
    :param ddec: degrees
    mjd : float
    observable: str ( CAL or RAW)
    runid: tuple eg. ('13AP05', '13AP06')
    ra: float right ascension
    dec: float declination

    """

    data=  {
        "QUERY": ( " SELECT Observation.collectionID as dataset_name "
                   " FROM caom.Observation AS Observation "
                   " JOIN caom.Plane AS Plane "
                   " ON Observation.obsID = Plane.obsID "
                   " WHERE  ( Observation.collection = 'CFHT' ) "
                   " AND Plane.observable_ctype='CAL' "
                   " AND Observation.proposal_id IN %s " ) % ( str(runids)),
          "REQUEST": "doQuery",
          "LANG": "ADQL",
          "FORMAT": "tsv" }

    data["QUERY"] += ( " AND  "
                       " CONTAINS( BOX('ICRS', {}, {}, {}, {}), "
                       " Plane.position_bounds ) = 1 " ).format(ra,dec, dra, ddec)

    result = requests.get(TAP_WEB_SERVICE, params=data)
    assert isinstance(result,requests.Response)
    logger.debug("Doing TAP Query using url: %s" % ( str(result.url)))
    #data = StringIO(result.text)

    table_reader = ascii.get_reader(Reader=ascii.Basic)
    table_reader.header.splitter.delimiter = '\t'
    table_reader.data.splitter.delimiter = '\t'
    table = table_reader.read(result.text)

    #vot = votable.parse_single_table(data)
    #vot.array.sort(order='dataset_name')
    #t = vot.array
    logger.debug(type(table))
    logger.debug(str(table))
    return table
Beispiel #40
0
def test_read_normal_names():
    """Normal SimpleRST Table with provided column names"""
    table = """
# comment (with blank line above)
======= =========
   Col1      Col2
======= =========
   1.2    "hello"
   2.4  's worlds
======= =========
"""
    reader = ascii.get_reader(Reader=ascii.RST, names=('name1', 'name2'))
    dat = reader.read(table)
    assert_equal(dat.colnames, ['name1', 'name2'])
    assert_almost_equal(dat[1][0], 2.4)
Beispiel #41
0
def test_read_normal_exclude():
    """Nice, typical SimpleRST table with col name excluded"""
    table = """
======= ==========
  Col1     Col2
======= ==========
  1.2     "hello"
  2.4    's worlds
======= ==========
"""
    reader = ascii.get_reader(Reader=ascii.RST,
                                   exclude_names=('Col1',))
    dat = reader.read(table)
    assert_equal(dat.colnames, ['Col2'])
    assert_equal(dat[1][0], "'s worlds")
Beispiel #42
0
def test_read_unbounded_right_column():
    """The right hand column should be allowed to overflow"""
    table = """
# comment (with blank line above)
===== ===== ====
 Col1  Col2 Col3
===== ===== ====
 1.2    2    Hello
 2.4     4   Worlds
===== ===== ====
"""
    reader = ascii.get_reader(Reader=ascii.RST)
    dat = reader.read(table)
    assert_equal(dat[0][2], "Hello")
    assert_equal(dat[1][2], "Worlds")
Beispiel #43
0
def test_read_unbounded_right_column():
    """The right hand column should be allowed to overflow"""
    table = """
# comment (with blank line above)
===== ===== ====
 Col1  Col2 Col3
===== ===== ====
 1.2    2    Hello
 2.4     4   Worlds
===== ===== ====
"""
    reader = ascii.get_reader(Reader=ascii.RST)
    dat = reader.read(table)
    assert_equal(dat[0][2], "Hello")
    assert_equal(dat[1][2], "Worlds")
Beispiel #44
0
def load_tyc2specnew() -> Table:
    """Load revised spectral types."""
    print("Loading revised TYC2 spectral types")
    with tarfile.open(os.path.join('vizier', 'tyc2specnew.tar.gz')) as tf:
        with tf.extractfile('./ReadMe') as readme:
            reader = io_ascii.get_reader(io_ascii.Cds,
                                         readme=readme,
                                         include_names=['HIP', 'SpType1'])
            reader.data.table_name = 'table2.dat'
            with tf.extractfile('./table2.dat') as f:
                # Suppress a warning because reader does not handle logarithmic units
                with warnings.catch_warnings():
                    warnings.simplefilter("ignore", UnitsWarning)
                    data = reader.read(f)
                    return data[data['SpType1'] != '']
Beispiel #45
0
def load_ubvri() -> Table:
    """Load UBVRI Teff calibration from VizieR archive."""
    print('Loading UBVRI calibration')
    with tarfile.open(os.path.join('vizier', 'ubvriteff.tar.gz'), 'r:gz') as tf:
        with tf.extractfile('./ReadMe') as readme:
            col_names = ['V-K', 'B-V', 'V-I', 'J-K', 'H-K', 'Teff']
            reader = io_ascii.get_reader(io_ascii.Cds,
                                         readme=readme,
                                         include_names=col_names)
            reader.data.table_name = 'table3.dat'
            with tf.extractfile('./table3.dat.gz') as gzf, gzip.open(gzf, 'rb') as f:
                # Suppress a warning generated because the reader does not handle logarithmic units
                with warnings.catch_warnings():
                    warnings.simplefilter('ignore', UnitsWarning)
                    return reader.read(f)
Beispiel #46
0
def test_read_right_indented_table():
    """We should be able to read right indented tables correctly"""
    table = """
# comment (with blank line above)
   ==== ==== ====
   Col1 Col2 Col3
   ==== ==== ====
    3    3.4  foo
    1    4.5  bar
   ==== ==== ====
"""
    reader = ascii.get_reader(Reader=ascii.RST)
    dat = reader.read(table)
    assert_equal(dat.colnames, ["Col1", "Col2", "Col3"])
    assert_equal(dat[0][2], "foo")
    assert_equal(dat[1][0], 1)
Beispiel #47
0
def test_read_normal_names():
    """Normal SimpleRST Table with provided column names"""
    table = """
# comment (with blank line above)
======= =========
   Col1      Col2
======= =========
   1.2    "hello"
   2.4  's worlds
======= =========
"""
    reader = ascii.get_reader(Reader=ascii.RST,
                                   names=('name1', 'name2'))
    dat = reader.read(table)
    assert_equal(dat.colnames, ['name1', 'name2'])
    assert_almost_equal(dat[1][0], 2.4)
Beispiel #48
0
def test_ipac_read_types():
    table = r"""\
|     ra   |    dec   |   sai   |-----v2---|    sptype        |
|    real  |   float  |   l     |    real  |     char         |
|    unit  |   unit   |   unit  |    unit  |     ergs         |
|    null  |   null   |   null  |    null  |     -999         |
   2.09708   2956        73765    2.06000   B8IVpMnHg
"""
    reader = ascii.get_reader(Reader=ascii.Ipac)
    dat = reader.read(table)
    types = [
        ascii.FloatType, ascii.FloatType, ascii.IntType, ascii.FloatType,
        ascii.StrType
    ]
    for (col, expected_type) in zip(reader.cols, types):
        assert_equal(col.type, expected_type)
Beispiel #49
0
def test_ipac_read_types():
    table = r"""\
|     ra   |    dec   |   sai   |-----v2---|    sptype        |
|    real  |   float  |   l     |    real  |     char         |
|    unit  |   unit   |   unit  |    unit  |     ergs         |
|    null  |   null   |   null  |    null  |     -999         |
   2.09708   2956        73765    2.06000   B8IVpMnHg
"""
    reader = ascii.get_reader(Reader=ascii.Ipac)
    dat = reader.read(table)
    types = [ascii.FloatType,
             ascii.FloatType,
             ascii.IntType,
             ascii.FloatType,
             ascii.StrType]
    for (col, expected_type) in zip(reader.cols, types):
        assert_equal(col.type, expected_type)
Beispiel #50
0
def test_read_normal():
    """Normal SimpleRST Table"""
    table = """
# comment (with blank line above)
======= =========
   Col1      Col2
======= =========
   1.2    "hello"
   2.4  's worlds
======= =========
"""
    reader = ascii.get_reader(Reader=ascii.RST)
    dat = reader.read(table)
    assert_equal(dat.colnames, ['Col1', 'Col2'])
    assert_almost_equal(dat[1][0], 2.4)
    assert_equal(dat[0][1], '"hello"')
    assert_equal(dat[1][1], "'s worlds")
Beispiel #51
0
def csv_to_table(handle, 
                 delimiter='|', 
                 comments=r'\s*(#|//|--)',
                 header_search=r'(?P<keyword>\w+)\s*=\s*(?P<value>\w+.*?)(/(?P<comment>.*))?$'):
    '''Produce an astropy table, and related keyword/value pairs from a CSV file.

    Parameters
    ----------
    handle: File handle
            The file object being read from.

    delimiter: str
               The single character delimiter that distinguish columns.

    comments: string
              A regular expression for determining whether a line
              is a comment or not.

    header_search: string
                   A regular expression the finds and parses
                   keyword/value pairs from comment lines.
                   The regex must contain two named groups: 
                   'keyword' for the keyword match and 'value'
                   for the value. An optional 'comment' 
                   group may also be present to put a comment with
                   the keyword/value pair, as in standard FITS fashion.
    Returns
    -------
    astropy.Table:
        An astropy Table is returned.
        Of note is one extra parameter on the table, 'meta', 
        which is the dict of header keywords found.
    '''

    # Setup the astropy reader.
    reader = ascii.get_reader(Reader=CSVKeywords)
    reader.header.start_line = 0
    reader.header.splitter.delimiter = delimiter
    reader.data.splitter.delimiter = reader.header.splitter.delimiter
    reader.header.comment = comments
    reader.data.comment = reader.header.comment
    reader.header.keywords = header_search

    # All setup, return the table
    return reader.read(handle)
Beispiel #52
0
def test_trailing_spaces_in_row_definition():
    """ Trailing spaces in the row definition column shouldn't matter"""
    table = (
        "\n"
        "# comment (with blank line above)\n"
        "   ==== ==== ====    \n"
        "   Col1 Col2 Col3\n"
        "   ==== ==== ====  \n"
        "    3    3.4  foo\n"
        "    1    4.5  bar\n"
        "   ==== ==== ====  \n"
    )
    # make sure no one accidentally deletes the trailing whitespaces in the
    # table.
    assert len(table) == 151

    reader = ascii.get_reader(Reader=ascii.RST)
    dat = reader.read(table)
    assert_equal(dat.colnames, ["Col1", "Col2", "Col3"])
    assert_equal(dat[0][2], "foo")
    assert_equal(dat[1][0], 1)
Beispiel #53
0
Datei: ssos.py Projekt: OSSOS/MOP
    def parse(self, ssos_result_filename_or_lines, mpc_observations=None):
        """
        given the result table create 'source' objects.

        :param ssos_result_filename_or_lines:
        :param mpc_observations: a list of mpc.Observation objects used to retrieve the SSOS observations
        """
        table_reader = ascii.get_reader(Reader=ascii.Basic)
        table_reader.inconsistent_handler = self._skip_missing_data
        table_reader.header.splitter.delimiter = '\t'
        table_reader.data.splitter.delimiter = '\t'
        ssos_table = table_reader.read(ssos_result_filename_or_lines)

        dbimage_list = storage.list_dbimages(dbimages=storage.DBIMAGES)
        logger.debug("Comparing to {} observations in dbimages: {}".format(len(dbimage_list), storage.DBIMAGES))
        sources = []
        observations = []
        source_readings = []

        if mpc_observations is not None and isinstance(mpc_observations[0], mpc.Observation):
            orbit = Orbfit(mpc_observations)
        else:
            from mp_ephem import horizons
            start_time = Time(min(ssos_table['MJD']), format='mjd')
            stop_time = Time(max(ssos_table['MJD']), format='mjd')
            step_size = 5.0 * units.hour
            orbit = horizons.Body(self.provisional_name, start_time, stop_time, step_size)

        warnings.filterwarnings('ignore')
        logger.info("Loading {} observations\n".format(len(ssos_table)))
        expnums_examined = []
        for row in ssos_table:
            # Trim down to OSSOS-specific images

            logger.debug("Checking row: {}".format(row))
            if (row['Filter'] not in parameters.OSSOS_FILTERS) or row['Image_target'].startswith('WP'):
                logger.debug("Failed filter / target name check")
                continue

            # check if a dbimages object exists
            # For CFHT/MegaCam strip off the trailing character to get the exposure number.
            ftype = row['Image'][-1]
            expnum = row['Image'][:-1]
            if str(expnum) not in dbimage_list:
                logger.debug("Expnum: {} Failed dbimage list check".format(expnum))
                continue
            logger.debug("Expnum: {} Passed dbimage list check".format(expnum))
            # The file extension is the ccd number + 1 , or the first extension.
            ccd = int(row['Ext'])-1
            if 39 < ccd < 0 or ccd < 0:
                ccd = None
            x = row['X'] * units.pix
            y = row['Y'] * units.pix
            ra = row['Object_RA'] * units.degree
            dec = row['Object_Dec'] * units.degree
            ssois_coordinate = SkyCoord(ra, dec)
            mjd = row['MJD'] * units.day

            # if not 0 < x.value < 2060 or not 0 < y.value < 4700:
            #    continue

            obs_date = Time(mjd, format='mjd', scale='utc')
            logger.info("Calling predict")
            orbit.predict(obs_date)
            logger.info("Done calling predict")
            if orbit.dra > 4 * units.arcminute or orbit.ddec > 4.0 * units.arcminute:
                print "Skipping entry as orbit uncertainty at date {} is large.".format(obs_date)
                continue
            if expnum in expnums_examined:
                logger.debug("Already checked this exposure.")
                continue
            expnums_examined.append(expnum)

            logger.debug(("SSOIS Prediction: exposure:{} ext:{} "
                          "ra:{} dec:{} x:{} y:{}").format(expnum, ccd, ra, dec, x, y))

            logger.debug(("Orbfit Prediction: "
                          "ra:{} dec:{} ").format(orbit.coordinate.ra.to(units.degree),
                                                  orbit.coordinate.dec.to(units.degree)))
            logger.info("Building Observation")
            observation = SSOSParser.build_source_reading(expnum, ccd, ftype=ftype)
            observation.mjd = mjd
            from_input_file = observation.rawname in self.input_rawnames
            # compare to input observation list.
            previous = False
            mpc_observation = None
            if from_input_file:
                for mpc_observation in mpc_observations:
                    try:
                        if mpc_observation.comment.frame.strip() == observation.rawname:
                            # only skip previous obseravtions if not discovery.
                            previous = not mpc_observation.discovery
                            break
                    except Exception as e:
                        logger.debug(str(e))
                        pass
                    mpc_observation = None

            # skip previously measured observations if requested.
            if self.skip_previous and ( previous or observation.rawname in self.null_observations):
                continue

            logger.info('built observation {}'.format(observation))
            observations.append(observation)
            null_observation = observation.rawname in self.null_observations

            ddec = orbit.ddec + abs(orbit.coordinate.dec - ssois_coordinate.dec)
            dra = orbit.dra + abs(orbit.coordinate.ra - ssois_coordinate.ra)

            logger.info(" Building SourceReading .... \n")
            source_reading = astrom.SourceReading(x=x, y=y, x0=x, y0=y,
                                                  ra=orbit.coordinate.ra.to(units.degree).value,
                                                  dec=orbit.coordinate.dec.to(units.degree).value,
                                                  xref=x, yref=y, obs=observation,
                                                  ssos=True, from_input_file=from_input_file,
                                                  dx=dra, dy=ddec, pa=orbit.pa,
                                                  null_observation=null_observation)
            source_reading.mpc_observation = mpc_observation
            source_readings.append(source_reading)
            logger.info("Source Reading Built")

        # build our array of SourceReading objects
        sources.append(source_readings)

        warnings.filterwarnings('once')

        return SSOSData(observations, sources, self.provisional_name)
Beispiel #54
0
def compare_heiles_2003(magmo_gas):
    print("## Comparing with Heiles & Troland 2003 ##")

    # Read in ht03 data
    rdr = ascii.get_reader(Reader=ascii.Csv)
    ht03_table = rdr.read('../Millennium_data.csv')
    # filter for just the CNM data |b| < 10
    cnm = np.array(ht03_table['CNM'])
    sample = ht03_table[cnm >= '0']
    abs_lat = np.absolute(np.array(sample['GLAT']))
    ht03_low_cnm = sample[abs_lat <= 10]
    spin_temp = np.array(ht03_low_cnm['Ts'])
    print("Sample had {} values, mean {:0.3f}, median {:0.3f}, sd {:0.3f}".format(len(spin_temp),
                                                                                  np.mean(spin_temp),
                                                                                  np.median(spin_temp),
                                                                                  np.std(spin_temp)))

    votable = from_table(ht03_low_cnm)
    writeto(votable, 'millenium_spin.vot')

    # comparative histogram of the two CNM spin temp sets
    fig = plt.figure(figsize=(7.5, 3))
    gs = matplotlib.gridspec.GridSpec(1, 2)
    gas_array = magmo_gas

    # Spin Temperature
    ax1 = fig.add_subplot(gs[0, 0])
    sample = np.ma.array(gas_array['temp_spin']).compressed()
    bins = np.linspace(0,450,19)
    hist, edges = build_hist_fraction(sample, bins, 450)
    ax1.step(edges, hist)

    ht03_sample = np.array(ht03_low_cnm['Ts'])
    hist, edges = build_hist_fraction(ht03_sample, bins, 450)
    ax1.step(edges, hist, color='black', ls='--')

    ax1.set_xlabel('Spin Temperature (K)')
    ax1.set_ylabel('Fraction of components')

    statistic, p_value = stats.ks_2samp(np.ma.filled(sample), np.ma.filled(ht03_sample))
    print ('Spin temp population similarity p_value={}'.format(p_value))


    # Column Density
    ax2 = fig.add_subplot(gs[0, 1])
    sample = np.ma.array(gas_array['column_density']).compressed()
    sample = np.log10(sample)
    bins = np.linspace(19, 24, 21)
    hist, edges = build_hist_fraction(sample, bins, 24)

    sample = np.array(ht03_low_cnm['NHI']) * 1E20
    sample = np.log10(sample[sample > 0])
    ht03_hist, edges = build_hist_fraction(sample, bins, 24)

    ax2.step(edges, hist) #, width=edges[1]-edges[0])
    ax2.step(edges, ht03_hist, color='black', ls=':') # , width=edges[1]-edges[0]
    label = 'Column Density $\\log_{10}(N_{H}$) (cm$^{-2}$)'
    ax2.set_xlabel(label)
    ax2.set_ylabel('Fraction of components')

    statistic, p_value = stats.ks_2samp(np.ma.filled(gas_array['column_density']), np.ma.filled(np.array(ht03_low_cnm['NHI']) * 1E20))
    print ('Column density population similarity p_value={}'.format(p_value))

    gs.update(wspace=0.5, hspace=0.5)
    filename = 'magmo-heiles_2003_comp.pdf'
    plt.savefig(filename, bbox_inches="tight")
    plt.close()

    return
Beispiel #55
0
def dat2hdf5(table_dir):
    """
    Convert the Marshall et al. (2006) map from \*.dat.gz to \*.hdf5.
    """

    import astropy.io.ascii as ascii
    import gzip
    from contextlib import closing

    readme_fname = os.path.join(table_dir, 'ReadMe')
    table_fname = os.path.join(table_dir, 'table1.dat.gz')
    h5_fname = os.path.join(table_dir, 'marshall.h5')

    # Extract the gzipped table
    with gzip.open(table_fname, 'rb') as f:
        # Read in the table using astropy's CDS table reader
        r = ascii.get_reader(ascii.Cds, readme=readme_fname)
        r.data.table_name = 'table1.dat' # Hack to deal with bug in CDS reader.
        table = r.read(f)
        print(table)

    # Reorder table entries according to Galactic (l, b)
    l = coordinates.Longitude(
        table['GLON'][:],
        wrap_angle=180.*units.deg)
    b = table['GLAT'][:]

    sort_idx = np.lexsort((b, l))

    l = l[sort_idx].astype('f4')
    b = b[sort_idx].astype('f4')
    l.shape = (801, 81)
    b.shape = (801, 81)

    # Extract arrays from the table
    chi2_all = np.reshape((table['x2all'][sort_idx]).astype('f4'), (801,81))
    chi2_giants = np.reshape((table['x2gts'][sort_idx]).astype('f4'), (801,81))

    A = np.empty((801*81,33), dtype='f4')
    sigma_A = np.empty((801*81,33), dtype='f4')
    dist = np.empty((801*81,33), dtype='f4')
    sigma_dist = np.empty((801*81,33), dtype='f4')

    for k in range(33):
        A[:,k] = table['ext{:d}'.format(k+1)][sort_idx]
        sigma_A[:,k] = table['e_ext{:d}'.format(k+1)][sort_idx]
        dist[:,k] = table['r{:d}'.format(k+1)][sort_idx]
        sigma_dist[:,k] = table['e_r{:d}'.format(k+1)][sort_idx]

    A.shape = (801,81,33)
    sigma_A.shape = (801,81,33)
    dist.shape = (801,81,33)
    sigma_dist.shape = (801,81,33)

    # Construct the HDF5 file
    h5_fname = os.path.join(table_dir, 'marshall.h5')
    filter_kwargs = dict(
        chunks=True,
        compression='gzip',
        compression_opts=3,
        # scaleoffset=4
    )

    with h5py.File(h5_fname, 'w') as f:
        dset = f.create_dataset('A', data=A, **filter_kwargs)
        dset.attrs['description'] = 'Extinction of each bin'
        dset.attrs['band'] = 'Ks (2MASS)'
        dset.attrs['units'] = 'mag'

        dset = f.create_dataset('sigma_A', data=sigma_A, **filter_kwargs)
        dset.attrs['description'] = 'Extinction uncertainty of each bin'
        dset.attrs['band'] = 'Ks (2MASS)'
        dset.attrs['units'] = 'mag'

        dset = f.create_dataset('dist', data=dist, **filter_kwargs)
        dset.attrs['description'] = 'Distance of each bin'
        dset.attrs['units'] = 'kpc'

        dset = f.create_dataset('sigma_dist', data=sigma_dist, **filter_kwargs)
        dset.attrs['description'] = 'Distance uncertainty of each bin'
        dset.attrs['units'] = 'kpc'

        dset = f.create_dataset('chi2_all', data=chi2_all, **filter_kwargs)
        dset.attrs['description'] = 'Chi^2, based on all the stars'
        dset.attrs['units'] = 'unitless'

        dset = f.create_dataset('chi2_giants', data=chi2_giants, **filter_kwargs)
        dset.attrs['description'] = 'Chi^2, based on giants only'
        dset.attrs['units'] = 'unitless'

        # filter_kwargs.pop('scaleoffset')

        dset = f.create_dataset('l', data=l, **filter_kwargs)
        dset.attrs['description'] = 'Galactic longitude'
        dset.attrs['units'] = 'deg'

        dset = f.create_dataset('b', data=b, **filter_kwargs)
        dset.attrs['description'] = 'Galactic latitude'
        dset.attrs['units'] = 'deg'
def read_table2(readme, data):
    reader = ascii.get_reader(Reader=ascii.Cds, readme=readme)
    reader.outputter = ascii.TableOutputter()
    return reader.read(data)
Beispiel #57
0
    def parse(self, ssos_result_filename_or_lines):
        """
        given the result table create 'source' objects.

        :param ssos_result_filename_or_lines:
        """


        table_reader = ascii.get_reader(Reader=ascii.Basic)
        table_reader.inconsistent_handler = self._skip_missing_data
        table_reader.header.splitter.delimiter = '\t'
        table_reader.data.splitter.delimiter = '\t'
        table = table_reader.read(ssos_result_filename_or_lines)

        sources = []
        observations = []
        source_readings = []

        warnings.filterwarnings('ignore')
        ref_x = None
        ref_y = None
        ref_mjd = None
        ref_expnum = None
        ref_ccd = None
        for row in table:
            # check if a dbimages object exists
            ccd = int(row['Ext']) - 1
            expnum = row['Image'].rstrip('p')
            X = row['X']
            Y = row['Y']

            # ADDING THIS TEMPORARILY TO GET THE NON-OSSOS DATA OUT OF THE WAY WHILE DEBUGGING
            if (row['Telescope_Insturment'] != 'CFHT/MegaCam') or (row['Filter'] != 'r.MP9601'):
                continue

            # Build astrom.SourceReading
            observation = self.build_source_reading(expnum, ccd, X, Y)
            if observation is None:
                continue
            observations.append(observation)

            from_input_file = observation.rawname in self.input_rawnames
            null_observation = observation.rawname in self.null_observations
            mjd = Time(observation.header['MJD_OBS_CENTER'], 
                       format='mpc', 
                       scale='utc').jd
            if ref_x is None or mjd - ref_mjd > 0.5:
                ref_x = X
                ref_y = Y
                ref_expnum = expnum
                ref_ccd = ccd
                ref_mjd = mjd
                x0 = X
                y0 = Y
            else:
                (x0, y0) = self.get_coord_offset(expnum, 
                                                 ccd, 
                                                 X, 
                                                 Y, 
                                                 ref_expnum, 
                                                 ref_ccd)

            # Also reset the reference point if the x/y shift is large.
            if x0 - X > 250 or y0 - Y > 250 :
                ref_x = X
                ref_y = Y
                ref_expnum = expnum
                ref_ccd = ccd
                ref_mjd = mjd
                x0 = X
                y0 = Y

            source_reading = astrom.SourceReading(
                x=row['X'], y=row['Y'],
                xref=ref_x, yref=ref_y,
                x0=x0, y0=y0,
                ra=row['Object_RA'], dec=row['Object_Dec'],
                obs=observation,
                ssos=True,
                from_input_file=from_input_file,
                null_observation=null_observation)


            source_readings.append(source_reading)


        # build our array of SourceReading objects
        sources.append(source_readings)

        warnings.filterwarnings('once')

        return SSOSData(observations, sources, self.provisional_name)
Beispiel #58
0
Datei: ssos.py Projekt: drusk/MOP
    def parse(self, ssos_result_filename_or_lines):
        """
        given the result table create 'source' objects.

        :type ssos_result_table: Table
        :param ssos_result_table:
        """
        table_reader = ascii.get_reader(Reader=ascii.Basic)
        table_reader.inconsistent_handler = self._skip_missing_data
        table_reader.header.splitter.delimiter = '\t'
        table_reader.data.splitter.delimiter = '\t'
        table = table_reader.read(ssos_result_filename_or_lines)

        sources = []
        observations = []
        source_readings = []

        ref_pvwcs = None
        downloader = Downloader()
        warnings.filterwarnings('ignore')

        for row in table:
            # check if a dbimages object exists
            ccd = int(row['Ext']) - 1
            expnum = row['Image'].rstrip('p')

            # ADDING THIS TEMPORARILY TO GET THE NON-OSSOS DATA OUT OF THE WAY WHILE DEBUGGING
            if (row['Telescope_Insturment'] != 'CFHT/MegaCam') or (row['Filter'] != 'r.MP9601'):
                continue

            # it's fine for OSSOS, go get the image
            image_uri = storage.dbimages_uri(expnum=expnum,
                                             ccd=None,
                                             version='p',
                                             ext='.fits',
                                             subdir=None)
            logger.info('Trying to access %s\n%s' % (row.data, image_uri))

            if not storage.exists(image_uri, force=False):
                logger.warning('Image not in dbimages? Trying subdir.')
                image_uri = storage.dbimages_uri(expnum=expnum,
                                                 ccd=ccd,
                                                 version='p')

                if not storage.exists(image_uri, force=False):
                    logger.warning("Image doesn't exist in ccd subdir. %s" % image_uri)
                    continue

            if row['X'] == -9999 or row['Y'] == -9999 :
                logger.warning("Skipping %s as x/y not resolved." % ( row['Image']))
                continue

            mopheader_uri = storage.dbimages_uri(expnum=expnum,
                                                 ccd=ccd,
                                                 version='p',
                                                 ext='.mopheader')

            if not mopheader_uri in mopheaders:
                if not storage.exists(mopheader_uri, force=False):
                    logger.warning('mopheader missing, but images exists')
                    continue

                # raise flag if no MOPHEADER
                mopheader_fpt = cStringIO.StringIO(storage.open_vos_or_local(mopheader_uri).read())
                mopheader = fits.open(mopheader_fpt)
                mopheaders[mopheader_uri] = mopheader
            mopheader = mopheaders[mopheader_uri]
            
            # Build astrom.Observation
            observation = astrom.Observation(expnum=str(expnum),
                                             ftype='p',
                                             ccdnum=str(ccd),
                                             fk="")

            observation.rawname = os.path.splitext(os.path.basename(image_uri))[0]+str(ccd).zfill(2)

            observation.header = mopheader[0].header
            MJD_OBS_CENTER = mpc.Time(observation.header['MJD-OBSC'],
                                      format='mjd',
                                      scale='utc', precision=5 ).replicate(format='mpc')
            observation.header['MJD_OBS_CENTER'] = str(MJD_OBS_CENTER)
            observation.header['MAXCOUNT'] = MAXCOUNT
            observation.header['SCALE'] = observation.header['PIXSCALE']
            #observation.header['CHIP'] = str(observation.header['CHIPNUM']).zfill(2)
            observation.header['NAX1'] = observation.header['NAXIS1']
            observation.header['NAX2'] = observation.header['NAXIS2']
            observation.header['MOPversion'] = observation.header['MOP_VER']
            observation.header['FWHM'] = 4



            # a download pixel 1,1 of this data to due offsets with.
            x_cen = int(min(max(1,row['X']),observation.header['NAX1']))
            y_cen = int(min(max(1,row['Y']),observation.header['NAX2']))
            if image_uri not in astheaders:
               hdulist = downloader.download_hdulist(
                   uri=image_uri,
                   view='cutout',
                   cutout='[{}][{}:{},{}:{}]'.format(ccd+1, x_cen, x_cen, y_cen, y_cen))
               astheaders[image_uri] = hdulist
            hdulist = astheaders[image_uri]

            pvwcs = wcs.WCS(hdulist[0].header)
            (ra,dec)  = pvwcs.xy2sky(x_cen, y_cen)
            if ref_pvwcs is None:
                ref_pvwcs = pvwcs
                xref = row['X']
                yref = row['Y']
            (x0, y0) = ref_pvwcs.sky2xy(ra,dec)
            x0 += row['X'] - x_cen
            y0 += row['Y'] - y_cen

            # Build astrom.SourceReading
            observations.append(observation)

            from_input_file = observation.rawname in self.input_rawnames
            null_observation = observation.rawname in self.null_observations

            print observation.rawname, observation.header['MJD_OBS_CENTER'], null_observation, from_input_file

            source_reading = astrom.SourceReading(x=row['X'], y=row['Y'],
                                                        xref=xref, yref=yref,
                                                        x0=x0, y0=y0,
                                                        ra=row['Object_RA'], dec=row['Object_Dec'],
                                                        obs=observation,
                                                        ssos=True,
                                                        from_input_file=from_input_file,
                                                        null_observation=null_observation)
            #if observation.rawname in  self.input_rawnames:
            #    source_readings.insert(0, source_reading)
            #else:
            source_readings.append(source_reading)
        # build our array of SourceReading objects
        sources.append(source_readings)

        warnings.filterwarnings('once')

        return SSOSData(observations, sources, self.provisional_name)