Ejemplo n.º 1
0
 def __init__(self,name,od,bn,ds=None,pol=None,polp=None,choice='directionless',
              s_i=0.,s_v=0.,cri=None,**kwargs):
     self.nm = name
     self.od = od
     self.bn = bn
     self.ch = choice
     self.cri = cri
     self.__dict__.update(kwargs)
     
     if ds:
         with get_readable_fileobj(ds, cache=True) as f:
             self.fitsfile = fits.open(f)
             self.ds       = self.fitsfile[0].data
             self.dshd     = self.fitsfile[0].header
     else: self.ds = np.zeros([0])
     self.ds[self.ds==0]=np.nan # remove irregular points
     
     if pol:
         self.pol = pol # fits file
         with get_readable_fileobj(pol, cache=True) as e:
             self.fitsfile = fits.open(e)
             self.po       = self.fitsfile[0].data
         hdd = fits.open(pol)
         self.header   = hdd[0].header
     else: self.po = np.zeros([0])
     # default ds and po to zero arrays with size 0 if not given
     self.po[self.po==0]=np.nan
     
     if ds:
         self.s_i = min(np.sqrt(np.nanmean(self.ds[0]**2)),
                        np.sqrt(np.nanmean(self.ds[-1]**2))) # set intensity rms to be the min rms
         self.s_v = self.dshd["cdelt3"] / 1000.              # channel width, in km/s 
         self.ds[self.ds < 2. * self.s_i]=np.nan # blank low SN points
Ejemplo n.º 2
0
def getObjectByCoord(ra, dec, service="ned"):

    if service not in ["simbad", "ned"]:
        raise ValueError("service must be one of 'ned' or 'simbad'")

    data = {}
    if service == "ned":
        try:
            with get_readable_fileobj(NED_COORD_QUERY % (ra, dec, "xml_main")) as f:
                table = Table.read(f, format="votable")
        except:
            return None
        data["RA"] = float(table["main_col3"][0])
        data["DEC"] = float(table["main_col4"][0])
        data["objtype"] = table["main_col5"][0]
        data["descr"] = "Results of NED query"
        data["name"] = table["main_col2"][0]
        data["Mv"] = table["main_col9"][0]
        data["rating"] = 0

        with get_readable_fileobj(NED_NAME_QUERY % (urllib.quote(data["name"]), "xml_basic")) as f:
            table = Table.read(f, format="votable")
        try:
            data["size"] = "%.1f arc-min" % (float(table["basic_col10"]))
        except:
            data["size"] = "..."
    elif service == "simbad":
        coord = "query coo %f %f radius=1m" % (ra, dec)
        try:
            with get_readable_fileobj(SIMBAD_URL + urllib.quote(SIMBAD_QUERY % coord)) as f:
                table = Table.read(f, format="votable")
        except:
            return None
        data["RA"] = table["RA_d_ICRS_2000_0_2000_0"][0]
        data["DEC"] = table["DEC_d_ICRS_2000_0_2000_0"][0]
        data["objtype"] = table["OTYPE_S"][0] or "--"
        data["objtype"] = table["OTYPE_S"][0] or "--"
        data["Mv"] = str((table["FLUX_V"][0] or "--"))
        data["descr"] = "Results from SIMBAD query"
        data["name"] = table["MAIN_ID"][0]
        data["rating"] = 0
        if table["GALDIM_MAJAXIS"][0]:
            data["size"] = "%.1f arc-min" % table["GALDIM_MAJAXIS"][0]
        else:
            data["size"] = "--"
    data["distance"] = -1.0
    data["dark"] = False
    data["comments"] = "None"
    return data
Ejemplo n.º 3
0
def read_remote_spec(filename, encoding='binary', cache=True,
                     show_progress=True, **kwargs):
    """Read FITS or ASCII spectrum from a remote location.

    Parameters
    ----------
    filename : str
        Spectrum filename.

    encoding, cache, show_progress
        See :func:`~astropy.utils.data.get_readable_fileobj`.

    kwargs : dict
        Keywords acceptable by :func:`read_fits_spec` (if FITS) or
        :func:`read_ascii_spec` (if ASCII).

    Returns
    -------
    header : dict
        Metadata.

    wavelengths, fluxes : `~astropy.units.quantity.Quantity`
        Wavelength and flux of the spectrum.

    """
    with get_readable_fileobj(filename, encoding=encoding, cache=cache,
                              show_progress=show_progress) as fd:
        header, wavelengths, fluxes = read_spec(fd, fname=filename, **kwargs)

    return header, wavelengths, fluxes
Ejemplo n.º 4
0
def get_remote_catalog_db(dbname, cache=True):
    """
    Get a database of VO services (which is a JSON file) from a remote
    location.

    Parameters
    ----------
    dbname : str
        Prefix of JSON file to download from `astropy.vo.client.vos_baseurl`.

    cache : bool
        Use caching for VO Service database. Access to actual VO
        websites referenced by the database still needs internet
        connection.

    Returns
    -------
    obj : `VOSDatabase` object

    """
    with get_readable_fileobj(BASEURL() + dbname + '.json',
                              encoding='utf8', cache=cache) as fd:
        tree = json.load(fd)

    return VOSDatabase(tree)
Ejemplo n.º 5
0
    def get_spectra_async(self, matches, plate=None, fiberID=None, mjd=None):
        """
        Download spectrum from SDSS.

        Parameters
        ----------
        matches : astropy.table.Table instance (result of query_region).

        Returns
        -------
        A list of context-managers that yield readable file-like objects
        """

        if not isinstance(matches, Table):
            raise ValueError

        results = []
        for row in matches:
            plate = str(row['plate']).zfill(4)
            fiber = str(row['fiberID']).zfill(3)
            mjd = str(row['mjd'])
            link = '%s/%s/1d/spSpec-%s-%s-%s.fit' % (SDSS.SPECTRO_1D, plate,
                                                     mjd, plate, fiber)

            results.append(aud.get_readable_fileobj(link))

        return results
Ejemplo n.º 6
0
def getfile(url,fn=None):
    if fn is None:
        fn = os.path.split(url)[-1]
    if not os.path.exists(fn):
        with aud.get_readable_fileobj(url) as f:
            with open(fn,'w') as of:
                of.write(f.read())
Ejemplo n.º 7
0
def test_compressed_stream():
    import base64

    gzipped_data = (b"H4sICIxwG1AAA2xvY2FsLmRhdAALycgsVkjLzElVANKlxakpCpl5CiUZqQ"
                    b"olqcUl8Tn5yYk58SmJJYnxWmCRzLx0hbTSvOSSzPy8Yi5nf78QV78QLgAlLytnRQAAAA==")
    gzipped_data = base64.b64decode(gzipped_data)
    assert isinstance(gzipped_data, bytes)

    class FakeStream:
        """
        A fake stream that has `read`, but no `seek`.
        """

        def __init__(self, data):
            self.data = data

        def read(self, nbytes=None):
            if nbytes is None:
                result = self.data
                self.data = b''
            else:
                result = self.data[:nbytes]
                self.data = self.data[nbytes:]
            return result

    stream = FakeStream(gzipped_data)
    with get_readable_fileobj(stream, encoding='binary') as f:
        f.readline()
        assert f.read().rstrip() == b'CONTENT'
Ejemplo n.º 8
0
 def parse(cls, filename, date=None, _parser=_LCHParser, **kwargs):
     """Parse a closure file or stream into regions.
     
     Closure files contain starlists lines followed by listings
     of opening blocks when laser propogation is permitted. This
     object parses these into individual :class:`Region` objects
     which contain a list of :class:`Opening` periods.
     
     Parameters
     ----------
     filename : str or fileobj
         Filename or readable file object for the parser.
     date : :class:`~astropy.time.Time`
         String, or Time value which will be used to set the
         date for the closure file.
         
     
     Returns
     -------
     regions : :class:`Regions`
         The parsed regions.
     
     """
     date = Time.now() if date is None else date
     with get_readable_fileobj(filename) as stream:
         parser = _parser(stream.name, date, regioncls=cls)
         return parser(stream, **kwargs)
Ejemplo n.º 9
0
    def _parse_result(self, response, verbose=False, retrieve_file=True):
        """
        retrieve_file : bool
            If True, will try to retrieve the file every 30s until it shows up.
            Otherwise, just returns the filename (the job is still executed on
            the remote server, though)
        """

        if verbose:
            print("Loading request from Besancon server ...")

        # keep the text stored for possible later use
        with aud.get_readable_fileobj(response.raw) as f:
            text = f.read()
        try:
            filename = self.result_re.search(text).group()
        except AttributeError:  # if there are no matches
            errors = parse_errors(text)
            raise ValueError("Errors: "+"\n".join(errors))

        if verbose:
            print("File is %s and can be aquired from %s" % (filename, self.url_download+'/'+filename))

        if retrieve_file:
            return self.get_besancon_model_file(filename)
        else:
            return filename
Ejemplo n.º 10
0
def _convert_to_fd_or_read_function(fd):
    """
    Returns a function suitable for streaming input, or a file object.

    This function is only useful if passing off to C code where:

       - If it's a real file object, we want to use it as a real
         C file object to avoid the Python overhead.

       - If it's not a real file object, it's much handier to just
         have a Python function to call.

    This is somewhat quirky behavior, of course, which is why it is
    private.  For a more useful version of similar behavior, see
    `astropy.utils.misc.get_readable_fileobj`.

    Parameters
    ----------
    fd : object
        May be:

            - a file object.  If the file is uncompressed, this raw
              file object is returned verbatim.  Otherwise, the read
              method is returned.

            - a function that reads from a stream, in which case it is
              returned verbatim.

            - a file path, in which case it is opened.  Again, like a
              file object, if it's uncompressed, a raw file object is
              returned, otherwise its read method.

            - an object with a :meth:`read` method, in which case that
              method is returned.

    Returns
    -------
    fd : context-dependent
        See above.
    """
    if is_callable(fd):
        yield fd
        return

    from astropy.utils.data import get_readable_fileobj

    with get_readable_fileobj(fd, encoding='binary') as new_fd:
        if sys.platform.startswith('win'):
            yield new_fd.read
        else:
            if IS_PY3K:
                if isinstance(new_fd, io.FileIO):
                    yield new_fd
                else:
                    yield new_fd.read
            else:
                if isinstance(new_fd, file):
                    yield new_fd
                else:
                    yield new_fd.read
Ejemplo n.º 11
0
    def get_extinction_table_async(
            self, coordinate, radius=None, timeout=TIMEOUT):
        """
        A query function similar to `astroquery.irsa_dust.IrsaDust.get_extinction_table`
        but returns a file-handler to the remote files rather than downloading it.
        Useful for asynchronous queries so that the actual download may be performed later.

        Parameters
        ----------
        coordinate : str
            Can be either the name of an object or a coordinate string
            If a name, must be resolveable by NED, SIMBAD, 2MASS, or SWAS.
            Examples of acceptable coordinate strings, can be found here:
            http://irsa.ipac.caltech.edu/applications/DUST/docs/coordinate.html
        radius : str, optional
            The size of the region to include in the dust query, in radian, degree
            or hour as per format specified by `astropy.coordinates.Angle`. Defaults
            to 5 degrees.
        timeout : int, optional
            Time limit for establishing successful connection with remote server.
            Defaults to `astroquery.irsa_dust.IrsaDust.TIMEOUT`

        Returns
        -------
        A context manager that yields a file like readable object
        """
        url = IrsaDust.DUST_SERVICE_URL
        request_payload = self._args_to_payload(coordinate, radius=radius)
        response = commons.send_request(url, request_payload, timeout)
        xml_tree = utils.xml(response.text)
        result = SingleDustResult(xml_tree, coordinate)
        return aud.get_readable_fileobj(result.ext_detail_table())
Ejemplo n.º 12
0
Archivo: losc.py Proyecto: stefco/gwpy
def fetch_json(url, verbose=False):
    """Fetch JSON data from a remote URL

    Parameters
    ----------
    url : `str`
        the remote URL to fetch

    verbose : `bool`, optional
        display verbose download progress, default: `False`

    Returns
    ------
    json : `object`
        the data fetched from ``url`` as parsed by :func:`json.loads`

    See also
    --------
    json.loads
        for details of the JSON parsing

    Examples
    --------
    >>> from gwpy.io.losc import fetch_json
    >>> fetch_json('https://losc.ligo.org/archive/1126257414/1126261510/json/')
    """
    with get_readable_fileobj(url, show_progress=verbose) as response:
        data = response.read()
        try:
            return json.loads(data)
        except ValueError as exc:
            exc.args = ("Failed to parse LOSC JSON from %r: %s"
                        % (url, str(exc)),)
            raise
Ejemplo n.º 13
0
    def search(cls, gravityspy_id, howmany=10,
               era='ALL', ifos='H1L1', remote_timeout=20):
        """perform restful API version of search available here:
        https://gravityspytools.ciera.northwestern.edu/search/

        Parameters
        ----------
        gravityspy_id : `str`,
            This is the unique 10 character hash that identifies
            a Gravity Spy Image

        howmany : `int`, optional, default: 10
            number of similar images you would like

        Returns
        -------
        `GravitySpyTable` containing similar events based on
        an evaluation of the Euclidean distance of the input image
        to all other images in some Feature Space
        """
        from astropy.utils.data import get_readable_fileobj
        import json
        from six.moves.urllib.error import HTTPError
        from six.moves import urllib

        # Need to build the url call for the restful API
        base = 'https://gravityspytools.ciera.northwestern.edu' + \
            '/search/similarity_search_restful_API'

        map_era_to_url = {
            'ALL': "event_time BETWEEN 1126400000 AND 1229176818",
            'O1': "event_time BETWEEN 1126400000 AND 1137250000",
            'ER10': "event_time BETWEEN 1161907217 AND 1164499217",
            'O2a': "event_time BETWEEN 1164499217 AND 1219276818",
            'ER13': "event_time BETWEEN 1228838418 AND 1229176818",
        }

        parts = {
            'howmany': howmany,
            'imageid': gravityspy_id,
            'era': map_era_to_url[era],
            'ifo': "{}".format(", ".join(
                map(repr, [ifos[i:i+2] for i in range(0, len(ifos), 2)]),
             )),
            'database': 'updated_similarity_index_v2d0',
        }

        search = urllib.parse.urlencode(parts)

        url = '{}/?{}'.format(base, search)

        try:
            with get_readable_fileobj(url, remote_timeout=remote_timeout) as f:
                return GravitySpyTable(json.load(f))
        except HTTPError as exc:
            if exc.code == 500:
                exc.msg += ', confirm the gravityspy_id is valid'
                raise
Ejemplo n.º 14
0
def parse_cs(ivoid, cap_index=1):
    """Return test query pars as dict for given IVO ID and capability index."""
    if isinstance(ivoid, bytes):  # pragma: py3
        ivoid = ivoid.decode('ascii')

    # Production server.
    url = ("http://vao.stsci.edu/regtap/tapservice.aspx/sync?lang=adql&"
           "query=select%20detail_xpath%2Cdetail_value%20from%20"
           "rr.res_detail%20where%20"
           "ivoid%3D%27{0}%27%20and%20cap_index={1}%20and%20"
           "detail_xpath%20in%20%28%27/capability/testQuery/ra%27%2C"
           "%27/capability/testQuery/dec%27%2C%27/capability/testQuery/sr%27"
           "%29".format(ivoid, cap_index))

    urls_failed = False
    default_sr = 0.1

    try:
        with get_readable_fileobj(url, encoding='binary',
                                  show_progress=False) as fd:
            t_query = Table.read(fd, format='votable')
    except Exception as e:  # pragma: no cover
        urls_failed = True
        urls_errmsg = '{0} raised {1}, using default'.format(
            url, str(e))

    if not urls_failed:
        try:
            xpath = t_query['detail_xpath']
            ra = float(
                t_query[xpath == b'/capability/testQuery/ra']['detail_value'])
            dec = float(
                t_query[xpath == b'/capability/testQuery/dec']['detail_value'])
            sr = float(
                t_query[xpath == b'/capability/testQuery/sr']['detail_value'])

            # Handle big SR returning too big a table for some queries, causing
            # tests to fail due to timeout.
            if sr > default_sr:
                warnings.warn(
                    'SR={0} is too large, using SR={1} for {2},{3}'.format(
                        sr, default_sr, ivoid, cap_index), AstropyUserWarning)
                sr = default_sr

            d = OrderedDict({'RA': ra, 'DEC': dec, 'SR': sr})

        except Exception as e:  # pragma: no cover
            urls_failed = True
            urls_errmsg = ('Failed to retrieve test query parameters for '
                           '{0},{1}, using default'.format(ivoid, cap_index))

    # If no test query found, use default
    if urls_failed:  # pragma: no cover
        d = OrderedDict({'RA': 0, 'DEC': 0, 'SR': default_sr})
        warnings.warn(urls_errmsg, AstropyUserWarning)

    return d
Ejemplo n.º 15
0
    def test_web_profile(self):

        # Check some additional queries to the server

        with get_readable_fileobj('http://localhost:{0}/crossdomain.xml'.format(self.hub._web_port)) as f:
            assert f.read() == CROSS_DOMAIN

        with get_readable_fileobj('http://localhost:{0}/clientaccesspolicy.xml'.format(self.hub._web_port)) as f:
            assert f.read() == CLIENT_ACCESS_POLICY

        # Check headers

        req = Request('http://localhost:{0}/crossdomain.xml'.format(self.hub._web_port))
        req.add_header('Origin', 'test_web_profile')
        resp = urlopen(req)

        assert resp.getheader('Access-Control-Allow-Origin') == 'test_web_profile'
        assert resp.getheader('Access-Control-Allow-Headers') == 'Content-Type'
        assert resp.getheader('Access-Control-Allow-Credentials') == 'true'
Ejemplo n.º 16
0
def get_besancon_model_file(filename, verbose=True, save=True, savename=None, overwrite=True):
    """
    Download a Besancon model from the website

    Parameters
    ----------
    filename : string
        The besancon filename, with format ##########.######.resu
    verbose : bool
        Print details about the download process
    save : bool
        Save the table after acquiring it?
    savename : None or string
        If not specified, defaults to the .resu table name
    overwrite : bool
        Overwrite the file if it exists?  Defaults to True because the .resu
        tables should have unique names by default, so there's little risk of
        accidentally overwriting important information
    """

    url = url_download+filename

    elapsed_time = 0
    t0 = time.time()

    sys.stdout.write("\n")
    while 1:
        sys.stdout.write(u"\r")
        try:
            U = urllib2.urlopen(url,timeout=5)
            with aud.get_readable_fileobj(U, cache=True) as f:
                results = f.read()
            break
        except urllib2.URLError:
            sys.stdout.write(u"Waiting 30s for model to finish (elapsed wait time %is, total %i)\r" % (elapsed_time,time.time()-t0))
            time.sleep(30)
            elapsed_time += 30
            continue
        except socket.timeout:
            sys.stdout.write(u"Waiting 30s for model to finish (elapsed wait time %is, total %i)\r" % (elapsed_time,time.time()-t0))
            time.sleep(30)
            elapsed_time += 30
            continue


    if save:
        if savename is None:
            savename = filename
        if not overwrite and os.path.exists(savename):
            raise IOError("File %s already exists." % savename)
        outf = open(savename,'w')
        print >>outf,results
        outf.close()

    return parse_besancon_model_string(results)
Ejemplo n.º 17
0
def test_local_data_obj_invalid(bad_compressed):

    is_bz2 = bad_compressed.endswith('.bz2')
    is_xz = bad_compressed.endswith('.xz')

    # Note, since these invalid files are created on the fly in order to avoid
    # problems with detection by antivirus software
    # (see https://github.com/astropy/astropy/issues/6520), it is no longer
    # possible to use ``get_pkg_data_fileobj`` to read the files. Technically,
    # they're not local anymore: they just live in a temporary directory
    # created by pytest. However, we can still use get_readable_fileobj for the
    # test.
    if (not HAS_BZ2 and is_bz2) or (not HAS_XZ and is_xz):
        with pytest.raises(ValueError) as e:
            with get_readable_fileobj(bad_compressed, encoding='binary') as f:
                f.read()
        assert ' format files are not supported' in str(e)
    else:
        with get_readable_fileobj(bad_compressed, encoding='binary') as f:
            assert f.read().rstrip().endswith(b'invalid')
Ejemplo n.º 18
0
    def get_images_async(self, coordinates, waveband='all', frame_type='stack',
                         image_width=1* u.arcmin, image_height=None, radius=None,
                         database='UKIDSSDR7PLUS', programme_id='all',
                         verbose=True, get_query_payload=False):
        """
        Serves the same purpose as :meth:`~astroquery.ukidss.core.Ukidss.get_images` but
        returns a list of file handlers to remote files

        Parameters
        ----------
        coordinates : str or `astropy.coordinates` object
            The target around which to search. It may be specified as a string
            in which case it is resolved using online services or as the appropriate
            `astropy.coordinates` object. ICRS coordinates may also be entered as strings
            as specified in the `astropy.coordinates` module.
        waveband  : str
            The color filter to download. Must be one of  ['all','J','H','K','H2','Z','Y','Br'].
        frame_type : str
            The type of image. Must be one of
            ['stack','normal','interleave','deep_stack','confidence','difference', 'leavstack', 'all']
        image_width : str or `astropy.units.Quantity` object, optional
            The image size (along X). Cannot exceed 15 arcmin. If missing, defaults to 1 arcmin.
        image_height : str or `astropy.units.Quantity` object, optional
             The image size (along Y). Cannot exceed 90 arcmin. If missing, same as image_width.
        radius : str or `astropy.units.Quantity` object, optional
            The string must be parsable by `astropy.coordinates.Angle`. The appropriate
            `Quantity` object from `astropy.units` may also be used. When missing only image
            around the given position rather than multi-frames are retrieved.
        programme_id : str
            The survey or programme in which to search for.
        database : str
            The UKIDSS database to use.
        verbose : bool
            Defaults to `True`. When `True` prints additional messages.
        get_query_payload : bool, optional
            if set to `True` then returns the dictionary sent as the HTTP request.
            Defaults to `False`

        Returns
        -------
        A list of context-managers that yield readable file-like objects
        """

        image_urls = self.get_image_list(coordinates, waveband=waveband, frame_type=frame_type,
                                        image_width=image_width, image_height=image_height,
                                        database=database, programme_id=programme_id,
                                        radius=radius, get_query_payload=get_query_payload)
        if get_query_payload:
            return image_urls

        if verbose:
            print("Found {num} targets".format(num=len(image_urls)))

        return [aud.get_readable_fileobj(U) for U in image_urls]
Ejemplo n.º 19
0
def read(cls, *args, format=None, **kwargs):
    """
    Read in data.

    The arguments passed to this method depend on the format.
    """

    ctx = None
    try:
        if format is None:
            path = None
            fileobj = None

            if len(args):
                if isinstance(args[0], PATH_TYPES):
                    from astropy.utils.data import get_readable_fileobj
                    # path might be a pathlib.Path object
                    if isinstance(args[0], pathlib.Path):
                        args = (str(args[0]),) + args[1:]
                    path = args[0]
                    try:
                        ctx = get_readable_fileobj(args[0], encoding='binary')
                        fileobj = ctx.__enter__()
                    except OSError:
                        raise
                    except Exception:
                        fileobj = None
                    else:
                        args = [fileobj] + list(args[1:])
                elif hasattr(args[0], 'read'):
                    path = None
                    fileobj = args[0]

            format = _get_valid_format(
                'read', cls, path, fileobj, args, kwargs)

        reader = get_reader(format, cls)
        data = reader(*args, **kwargs)

        if not isinstance(data, cls):
            # User has read with a subclass where only the parent class is
            # registered.  This returns the parent class, so try coercing
            # to desired subclass.
            try:
                data = cls(data)
            except Exception:
                raise TypeError('could not convert reader output to {0} '
                                'class.'.format(cls.__name__))
    finally:
        if ctx is not None:
            ctx.__exit__(*sys.exc_info())

    return data
Ejemplo n.º 20
0
    def __fet(self):
        from astropy.utils.data import get_readable_fileobj
        from astropy.io import fits

        with get_readable_fileobj(self.dcube_path, cache=True) as f:
            fitsfile = fits.open(f)
            gd       = fitsfile[0].data[0][0]
            dshd     = fitsfile[0].header

        with get_readable_fileobj(self.poldt_path, cache=True) as e:
            fitsfile = fits.open(e)
            po       = fitsfile[0].data[0][0] + 90. # to B-field
            pshd     = fitsfile[0].header

        tx = 'gd-pol'
        fig = Figure()
        self.fig_dict[tx] = fig
        self.fg_dict[tx] = [gd,po]
        self.mplfigs.addItem(tx)
        axf = fig.add_subplot(111)
        self.__quiver(gd,po,axf,tx)
Ejemplo n.º 21
0
def read_lockfile(lockfilename):
    """
    Read in the lockfile given by ``lockfilename`` into a dictionary.
    """
    # lockfilename may be a local file or a remote URL, but
    # get_readable_fileobj takes care of this.
    lockfiledict = {}
    with get_readable_fileobj(lockfilename) as f:
        for line in f:
            if not line.startswith("#"):
                kw, val = line.split("=")
                lockfiledict[kw.strip()] = val.strip()
    return lockfiledict
Ejemplo n.º 22
0
def _query_gator(options, debug=False):

    # Construct query URL
    url = GATOR_URL + "?" + \
          string.join(["%s=%s" % (x, urllib.quote_plus(str(options[x]))) for x in options], "&")
    if debug:
        print(url)

    # Request page
    req = urllib2.Request(url)
    response = urllib2.urlopen(req)
    with aud.get_readable_fileobj(response, cache=True) as f:
        result = f.read()

    # Check if results were returned
    if 'The catalog is not on the list' in result:
        raise Exception("Catalog not found")

    # Check that object name was not malformed
    if 'Either wrong or missing coordinate/object name' in result:
        raise Exception("Malformed coordinate/object name")

    # Check that the results are not of length zero
    if len(result) == 0:
        raise Exception("The IRSA server sent back an empty reply")

    # Write table to temporary file
    output = tempfile.NamedTemporaryFile()
    output.write(result)
    output.flush()

    # Read it in using the astropy VO table reader
    try:
        firsttable = votable.parse(output.name, pedantic=False).get_first_table()
        array = firsttable.array
    except Exception as ex:
        print("Failed to parse votable!  Returning output file instead.")
        print(ex)
        return open(output.name,'r')

    # Convert to astropy.table.Table instance
    table = array.to_table()

    # Check if table is empty
    if len(table) == 0:
        warnings.warn("Query returned no results, so the table will be empty")

    # Remove temporary file
    output.close()

    return table
Ejemplo n.º 23
0
def _vo_service_request(url, pedantic, kwargs):
    if len(kwargs) and not (url.endswith('?') or url.endswith('&')):
        raise VOSError("url should already end with '?' or '&'")

    query = []
    for key, value in kwargs.iteritems():
        query.append('{}={}'.format(
            urllib.quote(key), urllib.quote_plus(str(value))))

    parsed_url = url + '&'.join(query)
    with get_readable_fileobj(parsed_url) as req:
        tab = table.parse(req, filename=parsed_url, pedantic=pedantic)

    return vo_tab_parse(tab, url, kwargs)
Ejemplo n.º 24
0
    def get_besancon_model_file(self, filename, verbose=True, timeout=5.0):
        """
        Download a Besancon model from the website

        Parameters
        ----------
        filename : string
            The besancon filename, with format ##########.######.resu
        verbose : bool
            Print details about the download process
        timeout : float
            Amount of time to wait after pinging the server to see if a file is
            present.  Default 5s, which is probably reasonable.
        """

        url = self.url_download+"/"+filename

        elapsed_time = 0
        t0 = time.time()

        if verbose:
            sys.stdout.write("Awaiting Besancon file...\n")
        while True:
            if verbose:
                sys.stdout.write(u"\r")
                sys.stdout.flush()
            try:
                # U = requests.get(url,timeout=timeout,stream=True)
                # TODO: add timeout= keyword to get_readable_fileobj (when PR https://github.com/astropy/astropy/pull/1258 is merged)
                with aud.get_readable_fileobj(url, cache=True) as f:
                    results = f.read()
                break
            except urllib2.URLError:
                if verbose:
                    sys.stdout.write(u"Waiting %0.1fs for model to finish (elapsed wait time %0.1fs, total wait time %0.1f)\r" % (self.ping_delay,elapsed_time,time.time()-t0))
                    sys.stdout.flush()
                time.sleep(self.ping_delay)
                elapsed_time += self.ping_delay
                continue
            except socket.timeout:
                if verbose:
                    sys.stdout.write(u"Waiting %0.1fs for model to finish (elapsed wait time %0.1fs, total wait time %0.1f)\r" % (self.ping_delay,elapsed_time,time.time()-t0))
                    sys.stdout.flush()
                time.sleep(self.ping_delay)
                elapsed_time += self.ping_delay
                continue

        return parse_besancon_model_string(results)
Ejemplo n.º 25
0
def test_find_by_hash():

    from astropy.utils.data import clear_download_cache

    with get_readable_fileobj(TESTURL, encoding="binary", cache=True) as page:
        hash = hashlib.md5(page.read())

    hashstr = 'hash/' + hash.hexdigest()

    fnout = get_pkg_data_filename(hashstr)
    assert os.path.isfile(fnout)
    clear_download_cache(hashstr[5:])
    assert not os.path.isfile(fnout)

    lockdir = os.path.join(_get_download_cache_locs()[0], 'lock')
    assert not os.path.isdir(lockdir), 'Cache dir lock was not released!'
Ejemplo n.º 26
0
 def __init__(self,dataset,center=[0.,0.],pa=0.,length=10.,
              vrange=None,rms=None,gray=True,level=[1.,10.]):
     self.ds  = dataset
     self.cr  = center
     self.pa  = pa
     self.ln  = length
     self.vr  = vrange
     self.rms = rms
     self.gs  = gray
     self.lv  = level
     
     with get_readable_fileobj(dataset, cache=True) as f:
         self.fitsfile = fits.open(f)
         self.data     = self.fitsfile[0].data
         self.header   = self.fitsfile[0].header
     w = wcs.WCS(self.header)
Ejemplo n.º 27
0
    def get_images_async(self, coordinates, radius=0.25 * u.arcmin, max_rms=10000,
                         band="all", get_uvfits=False, verbose=True, get_query_payload=False):
        """
        Serves the same purpose as :meth:`~astroquery.nvas.core.Nvas.get_images` but
        returns a list of file handlers to remote files

        Parameters
        ----------
        coordinates : str or `astropy.coordinates` object
            The target around which to search. It may be specified as a string
            in which case it is resolved using online services or as the appropriate
            `astropy.coordinates` object. ICRS coordinates may also be entered as strings
            as specified in the `astropy.coordinates` module.
        radius : str or `astropy.units.Quantity` object, optional
            The string must be parsable by `astropy.coordinates.Angle`. The appropriate
            `Quantity` object from `astropy.units` may also be used. Defaults to 0.25 arcmin.
        max_rms : float, optional
            Maximum allowable noise level in the image (mJy). Defaults to 10000 mJy.
        band : str, optional
            The band of the image to fetch. Valid bands must be from
            ["all","L","C","X","U","K","Q"]. Defaults to 'all'
        get_uvfits : bool, optional
            Gets the UVfits files instead of the IMfits files when set to `True`.
            Defaults to `False`.
        verbose : bool, optional
            When `True` print out additional messgages. Defaults to `True`.
        get_query_payload : bool, optional
            if set to `True` then returns the dictionary sent as the HTTP request.
            Defaults to `False`.

        Returns
        -------
        A list of context-managers that yield readable file-like objects
        """

        image_urls = self.get_image_list(coordinates, radius=radius, max_rms=max_rms,
                                         band=band, get_uvfits=get_uvfits,
                                         get_query_payload=get_query_payload)
        if get_query_payload:
            return image_urls

        if verbose:
            print("{num} images found.".format(num=len(image_urls)))

        return [aud.get_readable_fileobj(U) for U in image_urls]
Ejemplo n.º 28
0
    def search(cls, uniqueID, howmany=10):
        """perform restful API version of search available here:
        https://gravityspytools.ciera.northwestern.edu/search/

        Parameters
        ----------
        uniqueID : `str`,
            This is the unique 10 character hash that identifies
            a Gravity Spy Image

        howmany : `int`, optional, default: 10
            number of similar images you would like

        Returns
        -------
        `GravitySpyTable` containing similar events based on
        an evaluation of the Euclidean distance of the input image
        to all other images in some Feature Space
        """
        from astropy.utils.data import get_readable_fileobj
        import json
        from six.moves.urllib.error import HTTPError

        # Need to build the url call for the restful API
        base = 'https://gravityspytools.ciera.northwestern.edu' + \
            '/search/similarity_search_restful_API'

        parts = {
            'howmany': howmany,
            'imageid': uniqueID,
        }

        search = '&'.join('{}={}'.format(key, value) for
                          key, value in parts.items())

        url = '{}/?{}'.format(base, search)

        try:
            with get_readable_fileobj(url) as f:
                return GravitySpyTable(json.load(f))
        except HTTPError as exc:
            if exc.code == 500:
                exc.msg = exc.msg + ', please confirm the uniqueID is valid'
                raise
Ejemplo n.º 29
0
    def get_spectral_template_async(self, kind='qso'):
        """
        Download spectral templates from SDSS DR-2, which are located here:

            http://www.sdss.org/dr5/algorithms/spectemplates/

        There 32 spectral templates available from DR-2, from stellar spectra,
        to galaxies, to quasars. To see the available templates, do:

            from astroquery.sdss import SDSS
            print sdss.AVAILABLE_TEMPLATES

        Parameters
        ----------
        kind : str, list
            Which spectral template to download? Options are stored in the
            dictionary astroquery.sdss.SDSS.AVAILABLE_TEMPLATES

        Examples
        --------
        >>> qso = SDSS.get_spectral_template(kind='qso')
        >>> Astar = SDSS.get_spectral_template(kind='star_A')
        >>> Fstar = SDSS.get_spectral_template(kind='star_F')

        Returns
        -------
        List of PyFITS HDUList objects.
        """

        if kind == 'all':
            indices = list(np.arange(33))
        else:
            indices = spec_templates[kind]
            if type(indices) is not list:
                indices = [indices]

        results = []
        for index in indices:
            name = str(index).zfill(3)
            link = '%s-%s.fit' % (SDSS.TEMPLATES, name)
            results.append(aud.get_readable_fileobj(link))

        return results
Ejemplo n.º 30
0
    def _parse_result(self, response, verbose=False):
        """
        Parses the raw HTTP response and returns it as an `astropy.table.Table`.

        Parameters
        ----------
        response : `requests.Response`
            The HTTP response object
        verbose : bool, optional
            Defaults to false. When true it will display warnings whenever the VOtable
            returned from the service doesn't conform to the standard.

        Returns
        -------
        table : `astropy.table.Table`
        """
        table_links = self.extract_urls(response.content)
        # keep only one link that is not a webstart
        if len(table_links) == 0:
            raise Exception("No VOTable found on returned webpage!")
        table_link = [link for link in table_links if "8080" not in link][0]
        with aud.get_readable_fileobj(table_link) as f:
            content = f.read()

        if not verbose:
            commons.suppress_vo_warnings()

        try:
            tf = tempfile.NamedTemporaryFile()
            tf.write(content.encode("utf-8"))
            tf.flush()
            first_table = votable.parse(tf.name, pedantic=False).get_first_table()
            table = first_table.to_table()
            if len(table) == 0:
                warnings.warn("Query returned no results, so the table will be empty")
            return table
        except Exception as ex:
            self.response = content
            self.table_parse_error = ex
            raise TableParseError(
                "Failed to parse UKIDSS votable! The raw response can be found "
                "in self.response, and the error in self.table_parse_error."
            )
Ejemplo n.º 31
0
    def from_json(cls, filename, **kwargs):
        """
        Create a database of VO services from a JSON file.

        Example JSON format for Cone Search::

            {
                "__version__": 1,
                "catalogs" : {
                    "My Cone Search": {
                        "capabilityClass": "ConeSearch",
                        "title": "My Cone Search",
                        "url": "http://foo/cgi-bin/search?CAT=bar&",
                        ...
                    },
                    "Another Cone Search": {
                        ...
                    }
                }
            }

        Parameters
        ----------
        filename : str
            JSON file.

        kwargs : dict
            Keywords accepted by
            :func:`~astropy.utils.data.get_readable_fileobj`.

        Returns
        -------
        db : `VOSDatabase`
            Database from given file.

        """
        with get_readable_fileobj(filename, **kwargs) as fd:
            tree = json.load(fd)

        return cls(tree)
Ejemplo n.º 32
0
def vospace_readable_fileobj(name_or_obj, token=None, **kwargs):
    """Read data from VOSpace or some other place.

    Notes
    -----
    Most of the heavy lifting is done with
    :func:`~astropy.io.data.get_readable_fileobj`.  Any additional keywords
    passed to this function will get passed directly to that function.

    Parameters
    ----------
    name_or_obj : :class:`str` or file-like object
        The filename of the file to access (if given as a string), or
        the file-like object to access.

        If a file-like object, it must be opened in binary mode.

    token : :class:`str`
        A token granting access to VOSpace.

    Returns
    -------
    file
        A readable file-like object.
    """
    fileobj = name_or_obj
    close_fileobj = False
    if (isinstance(name_or_obj, str) and name_or_obj.find('://') > 0):
        uri = name_or_obj[:name_or_obj.find('://')]
        if authClient.isValidUser(uri):
            # VOSpace call
            fileobj = BytesIO(storeClient.get(name_or_obj, mode='binary'))
            close_fileobj = True

    with get_readable_fileobj(fileobj, **kwargs) as f:
        try:
            yield f
        finally:
            if close_fileobj:
                fileobj.close()
Ejemplo n.º 33
0
def xml_readlines(source):
    """
    Get the lines from a given XML file.  Correctly determines the
    encoding and always returns unicode.

    Parameters
    ----------
    source : path-like, readable file-like, or callable
        Handle that contains the data or function that reads it.
        If a function or callable object, it must directly read from a stream.
        Non-callable objects must define a ``read`` method.

    Returns
    -------
    lines : list of unicode
    """
    encoding = get_xml_encoding(source)

    with data.get_readable_fileobj(source, encoding=encoding) as input:
        input.seek(0)
        xml_lines = input.readlines()

    return xml_lines
Ejemplo n.º 34
0
def request_besancon(email,
                     glon,
                     glat,
                     smallfield=True,
                     extinction=0.7,
                     area=0.0001,
                     verbose=True,
                     clouds=None,
                     absmag_limits=(-7, 15),
                     mag_limits=copy.copy(mag_limits),
                     colors_limits=copy.copy(colors_limits),
                     retrieve_file=True,
                     **kwargs):
    """
    Perform a query on the Besancon model of the galaxy
    http://model.obs-besancon.fr/

    Parameters
    ----------
    email : string
        A valid e-mail address to send the report of completion to
    glon : float
    glat : float
        Galactic latitude and longitude at the center
    smallfield : bool
        Small field (True) or Large Field (False)
        LARGE FIELD NOT SUPPORTED YET
    extinction : float
        Extinction per kpc in A_V
    area : float
        Area in square degrees 
    absmag_limits : (float,float)
        Absolute magnitude lower,upper limits
    colors_limits : dict of (float,float)
        Should contain 4 elements listing color differences in the valid bands, e.g.:
            {"J-H":(99,-99),"H-K":(99,-99),"J-K":(99,-99),"V-K":(99,-99)}
    mag_limits = dict of (float,float) 
        Lower and Upper magnitude difference limits for each magnitude band
        U B V R I J H K L
    clouds : list of 2-tuples
        Up to 25 line-of-sight clouds can be specified in pairs of (A_V,
        distance in pc)
    verbose : bool
        Print out extra error messages?
    retrieve_file : bool
        If True, will try to retrieve the file every 30s until it shows up.
        Otherwise, just returns the filename (the job is still executed on
        the remote server, though)
    kwargs : dict
        Can override any argument in the request if you know the name of the
        POST keyword.

    Returns
    -------
    Either the filename or the table depending on whether 'retrieve file' is
    specified

    """

    # create a new keyword dict based on inputs + defaults
    kwd = copy.copy(keyword_defaults)
    for key, val in kwargs.iteritems():
        if key in keyword_defaults:
            kwd[key] = val
        elif verbose:
            print "Skipped invalid key %s" % key

    kwd['kleg'] = 1 if smallfield else 2
    if not smallfield:
        raise NotImplementedError

    kwd['adif'] = extinction
    kwd['soli'] = area
    kwd['oo'][0] = absmag_limits[0]
    kwd['ff'][0] = absmag_limits[1]

    for ii, (key, val) in enumerate(colors_limits.items()):
        if key[0] in mag_order and key[1] == '-' and key[2] in mag_order:
            kwd['colind'][ii] = key
            kwd['oo'][ii + 9] = val[0]
            kwd['ff'][ii + 9] = val[1]
        else:
            raise ValueError('Invalid color %s' % key)

    for (key, val) in mag_limits.iteritems():
        if key in mag_order:
            kwd['band0'][mag_order.index(key)] = val[0]
            kwd['bandf'][mag_order.index(key)] = val[1]
        else:
            raise ValueError('Invalid band %s' % key)

    if clouds is not None:
        for ii, (AV, di) in enumerate(clouds):
            kwd[AV][ii] = AV
            kwd[di][ii] = di

    # parse the default dictionary
    request = parse_besancon_dict(keyword_defaults)

    # an e-mail address is required
    request.append(('email', email))
    request = urllib.urlencode(request)
    # load the URL as text
    U = urllib.urlopen(url_request, request)
    # keep the text stored for possible later use
    with aud.get_readable_fileobj(U) as f:
        text = f.read()
    try:
        filename = result_re.search(text).group()
    except AttributeError:  # if there are no matches
        errors = parse_errors(text)
        raise ValueError("Errors: " + "\n".join(errors))

    if verbose:
        print "File is %s and can be aquired from %s" % (
            filename, url_download + filename)

    if retrieve_file:
        return get_besancon_model_file(filename)
    else:
        return filename
Ejemplo n.º 35
0
def test_url_nocache():
    with get_readable_fileobj(TESTURL, cache=False, encoding='utf-8') as page:
        assert page.read().find('Astropy') > -1
Ejemplo n.º 36
0
def validate(source, output=None, xmllint=False, filename=None):
    """
    Prints a validation report for the given file.

    Parameters
    ----------
    source : str or readable file-like object
        Path to a VOTABLE_ xml file or pathlib.path
        object having Path to a VOTABLE_ xml file.

    output : writable file-like object, optional
        Where to output the report.  Defaults to ``sys.stdout``.
        If `None`, the output will be returned as a string.

    xmllint : bool, optional
        When `True`, also send the file to ``xmllint`` for schema and
        DTD validation.  Requires that ``xmllint`` is installed.  The
        default is `False`.  ``source`` must be a file on the local
        filesystem in order for ``xmllint`` to work.

    filename : str, optional
        A filename to use in the error messages.  If not provided, one
        will be automatically determined from ``source``.

    Returns
    -------
    is_valid : bool or str
        Returns `True` if no warnings were found.  If ``output`` is
        `None`, the return value will be a string.
    """

    from astropy.utils.console import print_code_line, color_print

    if output is None:
        output = sys.stdout

    return_as_str = False
    if output is None:
        output = io.StringIO()

    lines = []
    votable = None

    reset_vo_warnings()

    with data.get_readable_fileobj(source, encoding='binary') as fd:
        content = fd.read()
    content_buffer = io.BytesIO(content)
    content_buffer.seek(0)

    if filename is None:
        if isinstance(source, str):
            filename = source
        elif hasattr(source, 'name'):
            filename = source.name
        elif hasattr(source, 'url'):
            filename = source.url
        else:
            filename = "<unknown>"

    with warnings.catch_warnings(record=True) as warning_lines:
        warnings.resetwarnings()
        warnings.simplefilter("always", exceptions.VOWarning, append=True)
        try:
            votable = parse(content_buffer, verify='warn', filename=filename)
        except ValueError as e:
            lines.append(str(e))

    lines = [
        str(x.message)
        for x in warning_lines if issubclass(x.category, exceptions.VOWarning)
    ] + lines

    content_buffer.seek(0)
    output.write("Validation report for {0}\n\n".format(filename))

    if len(lines):
        xml_lines = iterparser.xml_readlines(content_buffer)

        for warning in lines:
            w = exceptions.parse_vowarning(warning)

            if not w['is_something']:
                output.write(w['message'])
                output.write('\n\n')
            else:
                line = xml_lines[w['nline'] - 1]
                warning = w['warning']
                if w['is_warning']:
                    color = 'yellow'
                else:
                    color = 'red'
                color_print('{0:d}: '.format(w['nline']),
                            '',
                            warning or 'EXC',
                            color,
                            ': ',
                            '',
                            textwrap.fill(w['message'],
                                          initial_indent='          ',
                                          subsequent_indent='  ').lstrip(),
                            file=output)
                print_code_line(line, w['nchar'], file=output)
            output.write('\n')
    else:
        output.write('astropy.io.votable found no violations.\n\n')

    success = 0
    if xmllint and os.path.exists(filename):
        from . import xmlutil

        if votable is None:
            version = "1.1"
        else:
            version = votable.version
        success, stdout, stderr = xmlutil.validate_schema(filename, version)

        if success != 0:
            output.write('xmllint schema violations:\n\n')
            output.write(stderr.decode('utf-8'))
        else:
            output.write('xmllint passed\n')

    if return_as_str:
        return output.getvalue()
    return len(lines) == 0 and success == 0
Ejemplo n.º 37
0
def read(table, guess=None, **kwargs):
    # Docstring defined below
    del _read_trace[:]

    # Downstream readers might munge kwargs
    kwargs = copy.deepcopy(kwargs)

    # Convert 'fast_reader' key in kwargs into a dict if not already and make sure
    # 'enable' key is available.
    fast_reader = _get_fast_reader_dict(kwargs)
    kwargs['fast_reader'] = fast_reader

    if fast_reader['enable'] and fast_reader.get('chunk_size'):
        return _read_in_chunks(table, **kwargs)

    if 'fill_values' not in kwargs:
        kwargs['fill_values'] = [('', '0')]

    # If an Outputter is supplied in kwargs that will take precedence.
    if 'Outputter' in kwargs:  # user specified Outputter, not supported for fast reading
        fast_reader['enable'] = False

    format = kwargs.get('format')
    # Dictionary arguments are passed by reference per default and thus need
    # special protection:
    new_kwargs = copy.deepcopy(kwargs)
    kwargs['fast_reader'] = copy.deepcopy(fast_reader)

    # Get the Reader class based on possible format and Reader kwarg inputs.
    Reader = _get_format_class(format, kwargs.get('Reader'), 'Reader')
    if Reader is not None:
        new_kwargs['Reader'] = Reader
        format = Reader._format_name

    # Remove format keyword if there, this is only allowed in read() not get_reader()
    if 'format' in new_kwargs:
        del new_kwargs['format']

    if guess is None:
        guess = _GUESS

    if guess:
        # If ``table`` is probably an HTML file then tell guess function to add
        # the HTML reader at the top of the guess list.  This is in response to
        # issue #3691 (and others) where libxml can segfault on a long non-HTML
        # file, thus prompting removal of the HTML reader from the default
        # guess list.
        new_kwargs['guess_html'] = _probably_html(table)

        # If `table` is a filename or readable file object then read in the
        # file now.  This prevents problems in Python 3 with the file object
        # getting closed or left at the file end.  See #3132, #3013, #3109,
        # #2001.  If a `readme` arg was passed that implies CDS format, in
        # which case the original `table` as the data filename must be left
        # intact.
        if 'readme' not in new_kwargs:
            encoding = kwargs.get('encoding')
            try:
                with get_readable_fileobj(table, encoding=encoding) as fileobj:
                    table = fileobj.read()
            except ValueError:  # unreadable or invalid binary file
                raise
            except Exception:
                pass
            else:
                # Ensure that `table` has at least one \r or \n in it
                # so that the core.BaseInputter test of
                # ('\n' not in table and '\r' not in table)
                # will fail and so `table` cannot be interpreted there
                # as a filename.  See #4160.
                if not re.search(r'[\r\n]', table):
                    table = table + os.linesep

                # If the table got successfully read then look at the content
                # to see if is probably HTML, but only if it wasn't already
                # identified as HTML based on the filename.
                if not new_kwargs['guess_html']:
                    new_kwargs['guess_html'] = _probably_html(table)

        # Get the table from guess in ``dat``.  If ``dat`` comes back as None
        # then there was just one set of kwargs in the guess list so fall
        # through below to the non-guess way so that any problems result in a
        # more useful traceback.
        dat = _guess(table, new_kwargs, format, fast_reader)
        if dat is None:
            guess = False

    if not guess:
        if format is None:
            reader = get_reader(**new_kwargs)
            format = reader._format_name

        # Try the fast reader version of `format` first if applicable.  Note that
        # if user specified a fast format (e.g. format='fast_basic') this test
        # will fail and the else-clause below will be used.
        if fast_reader['enable'] and 'fast_{0}'.format(
                format) in core.FAST_CLASSES:
            fast_kwargs = copy.deepcopy(new_kwargs)
            fast_kwargs['Reader'] = core.FAST_CLASSES['fast_{0}'.format(
                format)]
            fast_reader_rdr = get_reader(**fast_kwargs)
            try:
                dat = fast_reader_rdr.read(table)
                _read_trace.append({
                    'kwargs':
                    copy.deepcopy(fast_kwargs),
                    'Reader':
                    fast_reader_rdr.__class__,
                    'status':
                    'Success with fast reader (no guessing)'
                })
            except (core.ParameterError, cparser.CParserError,
                    UnicodeEncodeError) as err:
                # special testing value to avoid falling back on the slow reader
                if fast_reader['enable'] == 'force':
                    raise core.InconsistentTableError(
                        'fast reader {} exception: {}'.format(
                            fast_reader_rdr.__class__, err))
                # If the fast reader doesn't work, try the slow version
                reader = get_reader(**new_kwargs)
                dat = reader.read(table)
                _read_trace.append({
                    'kwargs':
                    copy.deepcopy(new_kwargs),
                    'Reader':
                    reader.__class__,
                    'status':
                    'Success with slow reader after failing'
                    ' with fast (no guessing)'
                })
        else:
            reader = get_reader(**new_kwargs)
            dat = reader.read(table)
            _read_trace.append({
                'kwargs':
                copy.deepcopy(new_kwargs),
                'Reader':
                reader.__class__,
                'status':
                'Success with specified Reader class '
                '(no guessing)'
            })

    return dat
Ejemplo n.º 38
0
Archivo: mp.py Proyecto: jumbokh/gwpy
def read_multi(flatten, cls, source, *args, **kwargs):
    """Read sources into a `cls` with multiprocessing

    This method should be called by `cls.read` and uses the `nproc`
    keyword to enable and handle pool-based multiprocessing of
    multiple source files, using `flatten` to combine the
    chunked data into a single object of the correct type.

    Parameters
    ----------
    flatten : `callable`
        a method to take a list of ``cls`` instances, and combine them
        into a single ``cls`` instance

    cls : `type`
        the object type to read

    source : `str`, `list` of `str`, ...
        the input data source, can be of in many different forms

    *args
        positional arguments to pass to the reader

    **kwargs
        keyword arguments to pass to the reader
    """
    # parse input as a list of files
    try:  # try and map to a list of file-like objects
        files = file_list(source)
    except ValueError:  # otherwise treat as single file
        files = [source]

    # determine input format (so we don't have to do it multiple times)
    # -- this is basically harvested from astropy.io.registry.read()
    if kwargs.get('format', None) is None:
        ctx = None
        if isinstance(source, FILE_LIKE):
            fileobj = source
        elif isinstance(source, string_types):
            try:
                ctx = get_readable_fileobj(files[0], encoding='binary')
                fileobj = ctx.__enter__()
            except IOError:
                raise
            except Exception:
                fileobj = None
        kwargs['format'] = get_format('read', cls, files[0], fileobj, args,
                                      kwargs)
        if ctx is not None:
            ctx.__exit__(*sys.exc_info())

    # calculate maximum number of processes
    nproc = min(kwargs.pop('nproc', 1), len(files))

    # define multiprocessing method
    def _read_single_file(f):
        try:
            return f, io_read(cls, f, *args, **kwargs)
        except Exception as e:
            if nproc == 1:
                raise
            elif isinstance(e, SAXException):  # SAXExceptions don't pickle
                return f, e.getException()
            else:
                return f, e

    # read files
    output = mp_utils.multiprocess_with_queues(nproc,
                                               _read_single_file,
                                               files,
                                               raise_exceptions=False)

    # raise exceptions (from multiprocessing, single process raises inline)
    for f, x in output:
        if isinstance(x, Exception):
            x.args = ('Failed to read %s: %s' % (f, str(x)), )
            raise x

    # return combined object
    _, out = zip(*output)
    return flatten(out)
Ejemplo n.º 39
0
__author__ = "Alex Urban <*****@*****.**>"
__currentmodule__ = 'gwpy.timeseries'

# First, we prepare one second of Gaussian noise:

from numpy import random
from gwpy.timeseries import TimeSeries
noise = TimeSeries(random.normal(scale=.1, size=16384), sample_rate=16384)

# Then we can download a simulation of the GW150914 signal from LOSC:

from astropy.utils.data import get_readable_fileobj
source = 'https://losc.ligo.org/s/events/GW150914/P150914/'
url = '%s/fig2-unfiltered-waveform-H.txt' % source
with get_readable_fileobj(url) as f:
    signal = TimeSeries.read(f, format='txt')
signal.t0 = .5  # make sure this intersects with noise time samples

# Note, since this simulation cuts off before a certain time, it is
# important to taper its ends to zero to avoid ringing artifacts.
# We can accomplish this using the
# :meth:`~gwpy.timeseries.TimeSeries.taper` method.

signal = signal.taper()

# Since the time samples overlap, we can inject this into our noise data
# using :meth:`~gwpy.types.series.Series.inject`:

data = noise.inject(signal)
Ejemplo n.º 40
0
    def from_veto_definer_file(cls, fp, start=None, end=None, ifo=None,
                               format='ligolw'):
        """Read a `DataQualityDict` from a LIGO_LW XML VetoDefinerTable.

        Parameters
        ----------
        fp : `str`
            path of veto definer file to read
        start : `~gwpy.time.LIGOTimeGPS`, `int`, optional
            GPS start time at which to restrict returned flags
        end : `~gwpy.time.LIGOTimeGPS`, `int`, optional
            GPS end time at which to restrict returned flags
        ifo : `str`, optional
            interferometer prefix whose flags you want to read
        format : `str`, optional
            format of file to read, currently only 'ligolw' is supported

        Returns
        -------
        flags : `DataQualityDict`
            a `DataQualityDict` of flags parsed from the `veto_def_table`
            of the input file.

        Notes
        -----
        This method does not automatically `~DataQualityDict.populate`
        the `active` segment list of any flags, a separate call should
        be made for that as follows

        >>> flags = DataQualityDict.from_veto_definer_file('/path/to/file.xml')
        >>> flags.populate()

        """
        from ..io.ligolw import table_from_file

        if format != 'ligolw':
            raise NotImplementedError("Reading veto definer from non-ligolw "
                                      "format file is not currently "
                                      "supported")

        if start is not None:
            start = to_gps(start)
        if end is not None:
            end = to_gps(end)

        # read veto definer file
        with get_readable_fileobj(fp, show_progress=False) as f:
            veto_def_table = table_from_file(f, 'veto_definer')

        # parse flag definitions
        out = cls()
        for row in veto_def_table:
            if ifo and row.ifo != ifo:
                continue
            if start and 0 < row.end_time <= start:
                continue
            elif start:
                row.start_time = max(row.start_time, start)
            if end and row.start_time >= end:
                continue
            elif end and not row.end_time:
                row.end_time = end
            elif end:
                row.end_time = min(row.end_time, end)
            flag = DataQualityFlag.from_veto_def(row)
            if flag.name in out:
                out[flag.name].known.extend(flag.known)
                out[flag.name].known.coalesce()
            else:
                out[flag.name] = flag
        return out
Ejemplo n.º 41
0
    def get_catalog_gal(self, glon, glat, directory=None, radius=1, save=False,
            verbose=True, savename=None, overwrite=False):
        """
        Get all sources in the catalog within some radius

        Parameters
        ----------
        glon : float
        glat : float
            Galactic latitude and longitude at the center
        directory : None or string
            Directory to download files into.  Defaults to self.directory
        radius : float
            Radius in which to search for catalog entries in arcminutes
        savename : string or None
            The file name to save the catalog to.  If unspecified, will save as
            UKIDSS_catalog_G###.###-###.###_r###.fits.gz, where the #'s indicate
            galactic lon/lat and radius

        Returns
        -------
        List of fits.primaryHDU instances containing FITS tables

        Example
        -------
        >>> R = UKIDSSQuery()
        >>> data = R.get_catalog_gal(10.625,-0.38,radius=0.1)
        >>> bintable = data[0][1]
        """

        # Construct request
        self.request = {}
        self.request['database'] = self.database
        self.request['programmeID'] = verify_programme_id(self.programmeID,querytype='catalog')
        self.request['from'] = 'source'
        self.request['formaction'] = 'region'
        self.request['ra'] = glon
        self.request['dec'] = glat
        self.request['sys'] = 'G'
        self.request['radius'] = radius
        self.request['xSize'] = ''
        self.request['ySize'] = ''
        self.request['boxAlignment'] = 'RADec'
        self.request['emailAddress'] = ''
        self.request['format'] = 'FITS'
        self.request['compress'] = 'GZIP'
        self.request['rows'] = 1
        self.request['select'] = '*'
        self.request['where'] = ''
        self.query_str = url_getcatalog +"?"+ urlencode(self.request)

        if directory is None:
            directory = self.directory

        # Retrieve page
        page = self.opener.open(url_getcatalog, urlencode(self.request))
        with aud.get_readable_fileobj(page) as f:
            results = f.read()

        # Parse results for links
        format = formatter.NullFormatter()           # create default formatter
        htmlparser = LinksExtractor(format)        # create new parser object
        htmlparser.feed(results)
        htmlparser.close()
        links = list(set(htmlparser.get_links()))

        # Loop through links and retrieve FITS tables
        c = 0
        data = []
        for link in links:
            if not "8080" in link:
                c = c + 1

                if not os.path.exists(directory):
                    os.mkdir(directory)

                if save:
                    if savename is None:
                        savename = "UKIDSS_catalog_G%07.3f%+08.3f_r%03i.fits.gz" % (glon,glat,radius)
                    filename = directory + "/" + savename
                
                U = self.opener.open(link)
                with aud.get_readable_fileobj(U, cache=True) as f:
                    results = f.read()

                S = BytesIO(results)
                try: 
                    fitsfile = fits.open(S,ignore_missing_end=True)
                except IOError:
                    S.seek(0)
                    G = gzip.GzipFile(fileobj=S)
                    fitsfile = fits.open(G,ignore_missing_end=True)


                data.append(fitsfile)
                if save: 
                    fitsfile.writeto(filename.rstrip(".gz"), clobber=overwrite)

        return data
Ejemplo n.º 42
0
    def search(cls,
               gravityspy_id,
               howmany=10,
               era='ALL',
               ifos='H1L1',
               remote_timeout=20):
        """perform restful API version of search available here:
        https://gravityspytools.ciera.northwestern.edu/search/

        Parameters
        ----------
        gravityspy_id : `str`,
            This is the unique 10 character hash that identifies
            a Gravity Spy Image

        howmany : `int`, optional, default: 10
            number of similar images you would like

        Returns
        -------
        `GravitySpyTable` containing similar events based on
        an evaluation of the Euclidean distance of the input image
        to all other images in some Feature Space
        """
        # Need to build the url call for the restful API
        base = 'https://gravityspytools.ciera.northwestern.edu' + \
            '/search/similarity_search_restful_API'

        map_era_to_url = {
            'ALL': "event_time BETWEEN 1126400000 AND 1584057618",
            'O1': "event_time BETWEEN 1126400000 AND 1137250000",
            'ER10': "event_time BETWEEN 1161907217 AND 1164499217",
            'O2a': "event_time BETWEEN 1164499217 AND 1219276818",
            'ER13': "event_time BETWEEN 1228838418 AND 1229176818",
        }

        parts = {
            'howmany':
            howmany,
            'imageid':
            gravityspy_id,
            'era':
            map_era_to_url[era],
            'ifo':
            "{}".format(", ".join(
                map(repr, [ifos[i:i + 2] for i in range(0, len(ifos), 2)]), )),
            'database':
            'similarity_index_o3',
        }

        search = urlencode(parts)

        url = '{}/?{}'.format(base, search)

        try:
            with get_readable_fileobj(url, remote_timeout=remote_timeout) as f:
                return GravitySpyTable(json.load(f))
        except HTTPError as exc:
            if exc.code == 500:
                exc.msg += ', confirm the gravityspy_id is valid'
                raise
Ejemplo n.º 43
0
def download_list_of_fitsfiles(linklist,
                               output_directory=None,
                               output_prefix=None,
                               save=False,
                               overwrite=False,
                               verbose=False,
                               output_coord_format=None,
                               filename_header_keywords=None,
                               include_input_filename=True):
    """
    Given a list of file URLs, download them and (optionally) rename them

    example:
    download_list_of_fitsfiles(
        ['http://fermi.gsfc.nasa.gov/FTP/fermi/data/lat/queries/L130413170713F15B52BC06_PH00.fits',
         'http://fermi.gsfc.nasa.gov/FTP/fermi/data/lat/queries/L130413170713F15B52BC06_PH01.fits',
         'http://fermi.gsfc.nasa.gov/FTP/fermi/data/lat/queries/L130413170713F15B52BC06_SC00.fits'],
         output_directory='fermi_m31',
         output_prefix='FermiLAT',
         save=True,
         overwrite=False,
         verbose=True,
         output_coord_format=None, # FITS tables don't have crval/crpix, good one is: "%08.3g%+08.3g",
         filename_header_keywords=None, # couldn't find any useful ones
         include_input_filename=True)

    """
    # Loop through links and retrieve FITS images
    images = {}
    for link in linklist:

        if output_directory is None:
            output_directory = ""
        elif output_directory[-1] != "/":
            output_directory += "/"
            if not os.path.exists(output_directory):
                os.mkdir(output_directory)

        with aud.get_readable_fileobj(link, cache=True) as f:
            results = f.read()
        S = StringIO.StringIO(results)

        try:
            # try to open as a fits file
            fitsfile = fits.open(S, ignore_missing_end=True)
        except IOError:
            # if that fails, try to open as a gzip'd fits file
            # have to rewind to the start
            S.seek(0)
            G = gzip.GzipFile(fileobj=S)
            fitsfile = fits.open(G, ignore_missing_end=True)

        # Get Multiframe ID from the header
        images[link] = fitsfile

        if save:
            h0 = fitsfile[0].header

            if filename_header_keywords:  # is not None or empty
                nametxt = "_".join([
                    validify_filename(str(h0[key]))
                    for key in filename_header_keywords
                ])
            else:
                nametxt = ""

            if output_coord_format:
                lon = h0['CRVAL1']
                lat = h0['CRVAL2']

                # this part will eventually be handled by astropy.coordinates directly
                # ctype = h0['CTYPE1']
                # if 'RA' in ctype:
                #     coordinate = coord.ICRSCoordinates(lon,lat,unit=('deg','deg'))
                # elif 'GLON' in ctype:
                #     coordinate = coord.GalacticCoordinates(lon,lat,unit=('deg','deg'))
                # else:
                #     raise TypeError("Don't recognize ctype %s" % ctype)
                # coordstr = coordinate.format(output_coord_format)
                try:
                    coordstr = output_coord_format.format(lon, lat)
                except TypeError:
                    coordstr = output_coord_format % (lon, lat)
                nametxt += "_" + coordstr

            if include_input_filename:
                filename_root = os.path.split(link)[1]
            else:
                filename_root = ""

            savename = output_prefix if output_prefix else ""
            savename += nametxt
            savename += "_" + filename_root

            # Set final directory and file names
            final_file = output_directory + savename

            if verbose:
                print "Saving file %s" % final_file

            try:
                fitsfile.writeto(final_file, clobber=overwrite)
            except IOError:
                print "Skipped writing file %s because it exists and overwrite=False" % final_file

    return images
Ejemplo n.º 44
0
def get_nrao_image(lon,
                   lat,
                   system='galactic',
                   epoch='J2000',
                   size=1.0,
                   max_rms=1e4,
                   band="",
                   verbose=True,
                   savename=None,
                   save=True,
                   overwrite=False,
                   directory='./',
                   get_uvfits=False):
    """
    Search for and download

    Parameters
    ----------
    lon : float
    lat : float
        Right ascension and declination or glon/glat
    system : ['celestial','galactic']
        System of lon/lat.  Can be any valid coordinate system supported by the
        astropy.coordinates package
    epoch : string
        Epoch of the coordinate system (e.g., B1950, J2000)
    savename : None or string
        filename to save fits file as.  If None, will become G###.###p###.###_(survey).fits
    size : float
        Size of search radius (arcminutes)
    max_rms : float
        Maximum allowable noise level in the image (mJy)
    verbose : bool
        Print out extra error messages?
    save : bool
        Save FITS file?
    overwrite : bool
        Overwrite if file already exists?
    directory : string
        Directory to store file in.  Defaults to './'.  
    get_uvfits : bool
        Get the UVfits files instead of the IMfits files?

    Examples
    --------
    >>> fitsfile = get_nrao_image(49.489,-0.37)
    """

    if band not in valid_bands:
        raise ValueError("Invalid band.  Valid bands are: %s" % valid_bands)

    if system == 'celestial':
        radec = coord.FK5Coordinates(lon, lat, unit=('deg', 'deg'))
        galactic = radec.galactic
    elif system == 'galactic':
        galactic = coord.GalacticCoordinates(lon, lat, unit=('deg', 'deg'))
        radec = galactic.fk5

    radecstr = radec.ra.format(sep=' ') + ' ' + radec.dec.format(sep=' ')
    glon, glat = galactic.lonangle.degrees, galactic.latangle.degrees

    # Construct request
    request = {}
    request["nvas_pos"] = radecstr
    request["nvas_rad"] = size
    request["nvas_rms"] = max_rms
    request["nvas_scl"] = size
    request["submit"] = "Search"
    request["nvas_bnd"] = band

    # create the request header data
    request = urllib.urlencode(request)
    # load the URL as text
    U = urllib.urlopen(request_URL, request)
    # read results with progressbar
    results = progressbar.chunk_read(U, report_hook=progressbar.chunk_report)

    if get_uvfits:
        links = uvfits_re.findall(results)
    else:
        links = imfits_re.findall(results)
    configurations = config_re.findall(results)

    if len(links) == 0:
        if verbose:
            print "No matches found at ra,dec = %s." % (radecstr)
        return []

    if verbose > 1:
        print "Configurations: "
        print "\n".join(
            ["%40s: %20s" % (L, C) for L, C in zip(links, configurations)])

    if save and not os.path.exists(directory):
        os.mkdir(directory)
    if save:
        opener = urllib2.build_opener()

    if verbose:
        print "Found %i imfits files" % len(links)

    images = []

    for link, config in zip(links, configurations):

        # Get the file
        U = opener.open(link)
        with aud.get_readable_fileobj(U) as f:
            results = f.read()
        S = StringIO.StringIO(results)
        try:
            fitsfile = fits.open(S, ignore_missing_end=True)
        except IOError:
            S.seek(0)
            G = gzip.GzipFile(fileobj=S)
            fitsfile = fits.open(G, ignore_missing_end=True)

        # Get Multiframe ID from the header
        images.append(fitsfile)

        if save:
            h0 = fitsfile[0].header
            freq_ghz = h0['CRVAL3'] / 1e9
            for bn, bandlimits in band_freqs.iteritems():
                if freq_ghz < bandlimits[1] and freq_ghz > bandlimits[0]:
                    bandname = bn
            obj = str(h0['OBJECT']).strip()
            program = h0['OBSERVER'].strip()
            h0['CONFIG'] = config

            if savename is None:
                if get_uvfits:
                    filename = "VLA_%s_G%07.3f%+08.3f_%s_%s.uvfits" % (
                        bandname, glon, glat, obj, program)
                else:
                    filename = "VLA_%s_G%07.3f%+08.3f_%s_%s.fits" % (
                        bandname, glon, glat, obj, program)
            else:
                filename = savename

            # Set final directory and file names
            final_file = directory + '/' + filename

            if verbose:
                print "Saving file %s" % final_file

            fitsfile.writeto(final_file, clobber=overwrite)

    return images
Ejemplo n.º 45
0
def parse_cs(rid):
    """Return ``<testQuery>`` pars as dict for given Resource ID."""
    if isinstance(rid, bytes):  # pragma: py3
        rid = rid.decode('ascii')

    # Production server.
    url = ('http://vao.stsci.edu/directory/getRecord.aspx?'
           'id={0}&format=xml'.format(rid))

    # Test server (in case production server fails).
    backup_url = ('http://vaotest.stsci.edu/directory/getRecord.aspx?'
                  'id={0}&format=xml'.format(rid))

    tqp = ['ra', 'dec', 'sr']
    d = OrderedDict()
    urls_failed = False
    urls_errmsg = ''

    try:
        with get_readable_fileobj(url, encoding='binary',
                                  show_progress=False) as fd:
            dom = minidom.parse(fd)
    except Exception as e:  # pragma: no cover
        try:
            warnings.warn(
                '{0} raised {1}, trying {2}'.format(url, str(e), backup_url),
                AstropyUserWarning)
            with get_readable_fileobj(backup_url,
                                      encoding='binary',
                                      show_progress=False) as fd:
                dom = minidom.parse(fd)
        except Exception as e:
            urls_failed = True
            urls_errmsg = '{0} raised {1}, using default'.format(
                backup_url, str(e))

    if not urls_failed:
        tq = dom.getElementsByTagName('testQuery')
        if tq:
            for key in tqp:
                try:
                    d[key.upper()] = tq[0].getElementsByTagName(
                        key)[0].firstChild.nodeValue.strip()
                except Exception:  # pragma: no cover
                    urls_failed = True
                    urls_errmsg = ('Incomplete testQuery for {0}, '
                                   'using default'.format(rid))
        else:  # pragma: no cover
            urls_failed = True
            urls_errmsg = 'No testQuery found for {0}, using default'.format(
                rid)

    # Handle big SR returning too big a table for some queries, causing
    # tests to fail due to timeout.
    default_sr = '0.1'

    # If no testQuery found, use default
    if urls_failed:  # pragma: no cover
        d = OrderedDict({'RA': '0', 'DEC': '0', 'SR': default_sr})
        warnings.warn(urls_errmsg, AstropyUserWarning)
    # Force SR to be reasonably small
    elif d['SR'] > default_sr:
        warnings.warn(
            'SR={0} is too large, using SR={1} for {2}'.format(
                d['SR'], default_sr, rid), AstropyUserWarning)
        d['SR'] = default_sr

    return d
Ejemplo n.º 46
0
    def get_image_gal(self, glon, glat, filter='all', frametype='stack',
            directory=None, size=1.0, verbose=True, save=True, savename=None,
            overwrite=False):
        """
        Get an image at a specified glon/glat.  Size can be specified

        Parameters
        ----------
        glon : float
        glat : float
            Galactic latitude and longitude at the center
        filter : ['all','J','H','K','H2','Z','Y','Br']
            The color filter to download.
        frametype : ['stack','normal','interleave','deep%stack','confidence','difference','leavstack','all']
            The type of image
        directory : None or string
            Directory to download files into.  Defaults to self.directory
        size : float
            Size of cutout (symmetric) in arcminutes
        verbose : bool
            Print out extra error messages?
        save : bool
            Save FITS file?
        savename : string or None
            The file name to save the catalog to.  If unspecified, will save as
            UKIDSS_[band]_G###.###-###.###_[obj].fits.gz, where the #'s
            indicate galactic lon/lat and [band] and [obj] refer to the filter
            and the object name
        overwrite : bool
            Overwrite if file already exists?

        Examples
        --------
        >>> R = UKIDSSQuery()
        >>> fitsfile = R.get_image_gal(10.5,0.0)
        
        # get UWISH2 data (as per http://astro.kent.ac.uk/uwish2/main.html)
        >>> R.database='U09B8v20120403'
        >>> R.login(username='******',password='******',community='nonSurvey')
        >>> R.get_image_gal(49.489,-0.27,frametype='leavstack',size=20,filter='H2')
        """

        # Check for validity of requested frame_type
        if frametype not in frame_types:
            raise ValueError("Invalide frame type.  Valid frame types are: %s"
                    % frame_types)
        if filter not in self.filters:
            raise ValueError("Invalide filter.  Valid filters are: %s"
                    % self.filters.keys())

        # Construct request
        self.request = {}
        self.request['database']    = self.database
        self.request['programmeID'] = verify_programme_id(self.programmeID,querytype='image')
        self.request['ra']          = glon
        self.request['dec']         = glat
        self.request['sys']         = 'G'
        self.request['filterID']    = self.filters[filter]
        self.request['xsize']       = size
        self.request['ysize']       = size
        self.request['obsType']     = 'object'
        self.request['frameType']   = frametype
        self.request['mfid']        = ''
        self.query_str = url_getimage +"?"+ urlencode(self.request)

        if directory is None:
            directory = self.directory

        # Retrieve page
        page = self.opener.open(url_getimage, urlencode(self.request))
        with aud.get_readable_fileobj(page) as f:
            results = f.read()

        # Parse results for links
        format = formatter.NullFormatter()
        htmlparser = LinksExtractor(format)
        htmlparser.feed(results)
        htmlparser.close()
        links = htmlparser.get_links()

        if verbose:
            print("Found %i targets" % (len(links)))

        # Loop through links and retrieve FITS images
        images = []
        for link in links:

            if not os.path.exists(directory):
                os.mkdir(directory)

            # Get the file
            U = self.opener.open(link.replace("getImage", "getFImage"))
            with aud.get_readable_fileobj(U, cache=True) as f:
                results = f.read()
            S = BytesIO(results)

            try: 
                # try to open as a fits file
                fitsfile = fits.open(S,ignore_missing_end=True)
            except IOError:
                # if that fails, try to open as a gzip'd fits file
                # have to rewind to the start
                S.seek(0)
                G = gzip.GzipFile(fileobj=S)
                fitsfile = fits.open(G,ignore_missing_end=True)

            # Get Multiframe ID from the header
            images.append(fitsfile)

            if save:
                h0 = fitsfile[0].header
                filt = str(h0['FILTER']).strip()
                obj = filt + "_" + str(h0['OBJECT']).strip().replace(":", ".")

                if savename is None:
                    filename = "UKIDSS_%s_G%07.3f%+08.3f_%s.fits" % (filt,glon,glat,obj)
                else:
                    filename = savename

                # Set final directory and file names
                final_file = directory + '/' + filename

                if verbose:
                    print("Saving file %s" % final_file)

                fitsfile.writeto(final_file, clobber=overwrite)

        return images
Ejemplo n.º 47
0
def test_path_objects_get_readable_fileobj():
    fpath = pathlib.Path(TESTLOCAL)
    with get_readable_fileobj(fpath) as f:
        assert f.read().rstrip() == ('This file is used in the test_local_data_* '
                                     'testing functions\nCONTENT')
Ejemplo n.º 48
0
    def read(self, cls, *args, format=None, cache=False, **kwargs):
        """
        Read in data.

        Parameters
        ----------
        cls : class
        *args
            The arguments passed to this method depend on the format.
        format : str or None
        cache : bool
            Whether to cache the results of reading in the data.
        **kwargs
            The arguments passed to this method depend on the format.

        Returns
        -------
        object or None
            The output of the registered reader.
        """
        ctx = None
        try:
            # Expand a tilde-prefixed path if present in args[0]
            args = _expand_user_in_args(args)

            if format is None:
                path = None
                fileobj = None

                if len(args):
                    if isinstance(args[0],
                                  PATH_TYPES) and not os.path.isdir(args[0]):
                        from astropy.utils.data import get_readable_fileobj

                        # path might be a os.PathLike object
                        if isinstance(args[0], os.PathLike):
                            args = (os.fspath(args[0]), ) + args[1:]
                        path = args[0]
                        try:
                            ctx = get_readable_fileobj(args[0],
                                                       encoding='binary',
                                                       cache=cache)
                            fileobj = ctx.__enter__()
                        except OSError:
                            raise
                        except Exception:
                            fileobj = None
                        else:
                            args = [fileobj] + list(args[1:])
                    elif hasattr(args[0], 'read'):
                        path = None
                        fileobj = args[0]

                format = self._get_valid_format('read', cls, path, fileobj,
                                                args, kwargs)

            reader = self.get_reader(format, cls)
            data = reader(*args, **kwargs)

            if not isinstance(data, cls):
                # User has read with a subclass where only the parent class is
                # registered.  This returns the parent class, so try coercing
                # to desired subclass.
                try:
                    data = cls(data)
                except Exception:
                    raise TypeError('could not convert reader output to {} '
                                    'class.'.format(cls.__name__))
        finally:
            if ctx is not None:
                ctx.__exit__(*sys.exc_info())

        return data
Ejemplo n.º 49
0
def find_api_page(obj, version=None, openinbrowser=True, timeout=None):
    """
    Determines the URL of the API page for the specified object, and
    optionally open that page in a web browser.

    .. note::
        You must be connected to the internet for this to function even if
        ``openinbrowser`` is `False`, unless you provide a local version of
        the documentation to ``version`` (e.g., ``file:///path/to/docs``).

    Parameters
    ----------
    obj
        The object to open the docs for or its fully-qualified name
        (as a str).
    version : str
        The doc version - either a version number like '0.1', 'dev' for
        the development/latest docs, or a URL to point to a specific
        location that should be the *base* of the documentation. Defaults to
        latest if you are on aren't on a release, otherwise, the version you
        are on.
    openinbrowser : bool
        If `True`, the `webbrowser` package will be used to open the doc
        page in a new web browser window.
    timeout : number, optional
        The number of seconds to wait before timing-out the query to
        the astropy documentation.  If not given, the default python
        stdlib timeout will be used.

    Returns
    -------
    url : str
        The loaded URL

    Raises
    ------
    ValueError
        If the documentation can't be found

    """
    import webbrowser
    from zlib import decompress
    from astropy.utils.data import get_readable_fileobj

    if (not isinstance(obj, str) and hasattr(obj, '__module__')
            and hasattr(obj, '__name__')):
        obj = obj.__module__ + '.' + obj.__name__
    elif inspect.ismodule(obj):
        obj = obj.__name__

    if version is None:
        from astropy import version

        if version.release:
            version = 'v' + version.version
        else:
            version = 'dev'

    if '://' in version:
        if version.endswith('index.html'):
            baseurl = version[:-10]
        elif version.endswith('/'):
            baseurl = version
        else:
            baseurl = version + '/'
    elif version == 'dev' or version == 'latest':
        baseurl = 'http://devdocs.astropy.org/'
    else:
        baseurl = f'https://docs.astropy.org/en/{version}/'

    # Custom request headers; see
    # https://github.com/astropy/astropy/issues/8990
    url = baseurl + 'objects.inv'
    headers = {'User-Agent': f'Astropy/{version}'}
    with get_readable_fileobj(url,
                              encoding='binary',
                              remote_timeout=timeout,
                              http_headers=headers) as uf:
        oiread = uf.read()

        # need to first read/remove the first four lines, which have info before
        # the compressed section with the actual object inventory
        idx = -1
        headerlines = []
        for _ in range(4):
            oldidx = idx
            idx = oiread.index(b'\n', oldidx + 1)
            headerlines.append(oiread[(oldidx + 1):idx].decode('utf-8'))

        # intersphinx version line, project name, and project version
        ivers, proj, vers, compr = headerlines
        if 'The remainder of this file is compressed using zlib' not in compr:
            raise ValueError('The file downloaded from {} does not seem to be'
                             'the usual Sphinx objects.inv format.  Maybe it '
                             'has changed?'.format(baseurl + 'objects.inv'))

        compressed = oiread[(idx + 1):]

    decompressed = decompress(compressed).decode('utf-8')

    resurl = None

    for l in decompressed.strip().splitlines():
        ls = l.split()
        name = ls[0]
        loc = ls[3]
        if loc.endswith('$'):
            loc = loc[:-1] + name

        if name == obj:
            resurl = baseurl + loc
            break

    if resurl is None:
        raise ValueError(f'Could not find the docs for the object {obj}')
    elif openinbrowser:
        webbrowser.open(resurl)

    return resurl
Ejemplo n.º 50
0
    def get_images_radius(self, ra, dec, radius, filter='all',
            frametype='stack', directory=None, n_concurrent=1, save=True,
            verbose=True, overwrite=False):
        """
        Get all images within some radius of a specified RA/Dec

        Parameters
        ----------
        ra  : float
        dec : float
            ra/dec center to search around
        radius : float
            Radius of circle to search within
        filter : ['all','J','H','K']
            The color filter to download.
        frametype : ['stack', 'normal', 'interleave', 'deep%stack',
            'confidence', 'difference', 'leavstack', 'all']
            The type of image
        directory : None or string
            Directory to download files into.  Defaults to self.directory
        verbose : bool
            Print out extra error messages?
        save : bool
            Save FITS file?
        overwrite : bool
            Overwrite if file already exists?
        n_concurrent : int
            Number of concurrent download threads to start

        Examples
        --------
        >>> R = UKIDSSQuery()
        >>> fitsfile = R.get_image_gal(10.5,0.0)
        """

        # Check for validity of requested frame_type
        if frametype not in frame_types:
            raise ValueError("Invalide frame type.  Valid frame types are: %s"
                    % frame_types)
        if filter not in self.filters:
            raise ValueError("Invalide filter.  Valid filters are: %s"
                    % self.filters.keys())

        if directory is None:
            directory = self.directory

        # Construct self.request
        self.request = {}

        self.request['database']    = self.database
        self.request['programmeID'] = verify_programme_id(self.programmeID,querytype='image')
        self.request['userSelect']  = 'default'

        self.request['obsType']     = 'object'
        self.request['frameType']   = frametype
        self.request['filterID']    = self.filters[filter]

        self.request['minRA']       = str(round(ra - radius / cos(radians(dec)),2))
        self.request['maxRA']       = str(round(ra + radius / cos(radians(dec)),2))
        self.request['formatRA']    = 'degrees'

        self.request['minDec']       = str(dec - radius)
        self.request['maxDec']       = str(dec + radius)
        self.request['formatDec']    = 'degrees'

        self.request['startDay'] = 0
        self.request['startMonth'] = 0
        self.request['startYear'] = 0

        self.request['endDay'] = 0
        self.request['endMonth'] = 0
        self.request['endYear'] = 0

        self.request['dep'] = 0

        self.request['mfid'] = ''
        self.request['lmfid'] = ''
        self.request['fsid'] = ''

        self.request['rows'] = 1000
        self.query_str = url_getimages + "?" + urlencode(self.request)

        # Retrieve page
        page = self.opener.open(url_getimages, urlencode(self.request))
        with aud.get_readable_fileobj(page) as f:
            results = f.read()

        # Parse results for links
        format = formatter.NullFormatter()
        htmlparser = LinksExtractor(format)
        htmlparser.feed(results)
        htmlparser.close()
        links = htmlparser.get_links()

        # Loop through links and retrieve FITS images
        for link in links:

            if not os.path.exists(directory):
                os.mkdir(directory)
            if not os.path.exists(directory + '/' + frametype):
                os.mkdir(directory + '/' + frametype)

            if ('fits_download' in link and '_cat.fits' not in link and
                    '_two.fit' not in link):

                # Get image filename
                basename = os.path.basename(link.split("&")[0])
                temp_file = directory + '/' + frametype + '/' + basename

                if verbose:
                    print("Downloading %s..." % basename)
                    p = mp.Process(
                        target=progressbar.retrieve,
                        args=(link, temp_file, self.opener))
                else:
                    p = mp.Process(
                        target=urllib.urlretrieve, args=(link, temp_file))
                p.start()

                while True:
                    if len(mp.active_children()) < n_concurrent:
                        break
                    time.sleep(0.1)
Ejemplo n.º 51
0
def parse_cs(ivoid, cap_index=1):
    """Return test query pars as dict for given IVO ID and capability index."""
    if isinstance(ivoid, bytes):  # ASTROPY_LT_4_1
        ivoid = ivoid.decode('ascii')

    # Production server.
    url = ("https://vao.stsci.edu/regtap/tapservice.aspx/sync?lang=adql&"
           "query=select%20detail_xpath%2Cdetail_value%20from%20"
           "rr.res_detail%20where%20"
           "ivoid%3D%27{0}%27%20and%20cap_index={1}%20and%20"
           "detail_xpath%20in%20%28%27/capability/testQuery/ra%27%2C"
           "%27/capability/testQuery/dec%27%2C%27/capability/testQuery/sr%27"
           "%29".format(ivoid, cap_index))

    urls_failed = False
    default_sr = 0.1

    try:
        with get_readable_fileobj(url, encoding='binary',
                                  show_progress=False) as fd:
            t_query = Table.read(fd, format='votable')
    except Exception as e:  # pragma: no cover
        urls_failed = True
        urls_errmsg = '{0} raised {1}, using default'.format(
            url, str(e))

    if not urls_failed:
        try:
            xpath = t_query['detail_xpath']
            if ASTROPY_LT_4_1:
                ra = float(
                    t_query[xpath == b'/capability/testQuery/ra']['detail_value'])
                dec = float(
                    t_query[xpath == b'/capability/testQuery/dec']['detail_value'])
                sr = float(
                    t_query[xpath == b'/capability/testQuery/sr']['detail_value'])
            else:
                ra = float(
                    t_query[xpath == '/capability/testQuery/ra']['detail_value'])
                dec = float(
                    t_query[xpath == '/capability/testQuery/dec']['detail_value'])
                sr = float(
                    t_query[xpath == '/capability/testQuery/sr']['detail_value'])

            # Handle big SR returning too big a table for some queries, causing
            # tests to fail due to timeout.
            if sr > default_sr:
                warnings.warn(
                    'SR={0} is too large, using SR={1} for {2},{3}'.format(
                        sr, default_sr, ivoid, cap_index), AstropyUserWarning)
                sr = default_sr

            d = OrderedDict({'RA': ra, 'DEC': dec, 'SR': sr})

        except Exception as e:  # pragma: no cover
            urls_failed = True
            urls_errmsg = ('Failed to retrieve test query parameters for '
                           '{0},{1}, using default: {2}'.format(ivoid, cap_index, str(e)))

    # If no test query found, use default
    if urls_failed:  # pragma: no cover
        d = OrderedDict({'RA': 0, 'DEC': 0, 'SR': default_sr})
        warnings.warn(urls_errmsg, AstropyUserWarning)

    return d
Ejemplo n.º 52
0
def _download_file(url, cache=None, verbose=False):
    if cache is None:
        cache = bool_env('GWPY_CACHE', False)
    return get_readable_fileobj(url, cache=cache, show_progress=verbose)
Ejemplo n.º 53
0
def get_readable_fileobj(*args, **kwargs):
    """
    Overload astropy's get_readable_fileobj so that we can safely monkeypatch
    it in astroquery without affecting astropy core functionality
    """
    return aud.get_readable_fileobj(*args, **kwargs)
Ejemplo n.º 54
0
def get_besancon_model_file(filename,
                            verbose=True,
                            save=True,
                            savename=None,
                            overwrite=True):
    """
    Download a Besancon model from the website

    Parameters
    ----------
    filename : string
        The besancon filename, with format ##########.######.resu
    verbose : bool
        Print details about the download process
    save : bool
        Save the table after acquiring it?
    savename : None or string
        If not specified, defaults to the .resu table name
    overwrite : bool
        Overwrite the file if it exists?  Defaults to True because the .resu
        tables should have unique names by default, so there's little risk of
        accidentally overwriting important information
    """

    url = url_download + filename

    elapsed_time = 0
    t0 = time.time()

    sys.stdout.write("\n")
    while 1:
        sys.stdout.write(u"\r")
        try:
            U = urllib2.urlopen(url, timeout=5)
            with aud.get_readable_fileobj(U, cache=True) as f:
                results = f.read()
            break
        except urllib2.URLError:
            sys.stdout.write(
                u"Waiting 30s for model to finish (elapsed wait time %is, total %i)\r"
                % (elapsed_time, time.time() - t0))
            time.sleep(30)
            elapsed_time += 30
            continue
        except socket.timeout:
            sys.stdout.write(
                u"Waiting 30s for model to finish (elapsed wait time %is, total %i)\r"
                % (elapsed_time, time.time() - t0))
            time.sleep(30)
            elapsed_time += 30
            continue

    if save:
        if savename is None:
            savename = filename
        if not overwrite and os.path.exists(savename):
            raise IOError("File %s already exists." % savename)
        outf = open(savename, 'w')
        print >> outf, results
        outf.close()

    return parse_besancon_model_string(results)
Ejemplo n.º 55
0
    def from_registry(cls, registry_url, timeout=60, **kwargs):
        """
        Create a database of VO services from VO registry URL.

        This is described in detail in :ref:`vo-sec-validator-build-db`,
        except for the ``validate_xxx`` keys that are added by the
        validator itself.

        Parameters
        ----------
        registry_url : str
            URL of VO registry that returns a VO Table.
            For example, see
            ``astroquery.vo_conesearch.validator.conf.cs_mstr_list``.
            Pedantic is automatically set to `False` for parsing.

        timeout : number
            Temporarily set ``astropy.utils.data.conf.remote_timeout``
            to this value to avoid time out error while reading the
            entire registry.

        kwargs : dict
            Keywords accepted by
            :func:`~astropy.utils.data.get_readable_fileobj`.

        Returns
        -------
        db : `VOSDatabase`
            Database from given registry.

        Raises
        ------
        VOSError
            Invalid VO registry.

        """
        # Download registry as VO table
        with data_conf.set_temp('remote_timeout', timeout):
            with get_readable_fileobj(registry_url, **kwargs) as fd:
                tab_all = parse_single_table(fd, pedantic=False)

        # Registry must have these fields
        compulsory_fields = ['res_title', 'access_url']
        cat_fields = tab_all.array.dtype.names
        for field in compulsory_fields:
            if field not in cat_fields:  # pragma: no cover
                raise VOSError('"{0}" is missing from registry.'.format(field))

        title_counter = defaultdict(int)
        title_fmt = '{0} {1}'
        db = cls.create_empty()

        # Each row in the table becomes a catalog
        for arr in tab_all.array.data:
            cur_cat = {}
            cur_key = ''

            # Process each field and build the catalog.
            # Catalog is completely built before being thrown out
            # because codes need less changes should we decide to
            # allow duplicate URLs in the future.
            for field in cat_fields:

                # For primary key, a number needs to be appended to the title
                # because registry can have multiple entries with the same
                # title but different URLs.
                if field == 'res_title':
                    cur_title = arr['res_title']
                    title_counter[cur_title] += 1  # Starts with 1

                    if isinstance(cur_title, bytes):  # pragma: py3
                        cur_key = title_fmt.format(cur_title.decode('utf-8'),
                                                   title_counter[cur_title])
                    else:  # pragma: py2
                        cur_key = title_fmt.format(cur_title,
                                                   title_counter[cur_title])

                # Special handling of title and access URL,
                # otherwise no change.
                if field == 'access_url':
                    s = unescape_all(arr['access_url'])
                    if isinstance(s, six.binary_type):
                        s = s.decode('utf-8')
                    cur_cat['url'] = s
                elif field == 'res_title':
                    cur_cat['title'] = arr[field]
                else:
                    cur_cat[field] = arr[field]

            # New field to track duplicate access URLs.
            cur_cat['duplicatesIgnored'] = 0

            # Add catalog to database, unless duplicate access URL exists.
            # In that case, the entry is thrown out and the associated
            # counter is updated.
            dup_keys = db._url_keys[cur_cat['url']]
            if len(dup_keys) < 1:
                db.add_catalog(cur_key,
                               VOSCatalog(cur_cat),
                               allow_duplicate_url=False)
            else:
                db._catalogs[dup_keys[0]]['duplicatesIgnored'] += 1
                warnings.warn(
                    '{0} is thrown out because it has same access URL as '
                    '{1}.'.format(cur_key, dup_keys[0]), AstropyUserWarning)

        return db
Ejemplo n.º 56
0
def read(table, guess=None, **kwargs):
    """
    Read the input ``table`` and return the table.  Most of
    the default behavior for various parameters is determined by the Reader
    class.

    See also:

    - http://docs.astropy.org/en/stable/io/ascii/
    - http://docs.astropy.org/en/stable/io/ascii/read.html

    Parameters
    ----------
    table : str, file-like, list, pathlib.Path object
        Input table as a file name, file-like object, list of strings,
        single newline-separated string or pathlib.Path object .
    guess : bool
        Try to guess the table format. Defaults to None.
    format : str, `~astropy.io.ascii.BaseReader`
        Input table format
    Inputter : `~astropy.io.ascii.BaseInputter`
        Inputter class
    Outputter : `~astropy.io.ascii.BaseOutputter`
        Outputter class
    delimiter : str
        Column delimiter string
    comment : str
        Regular expression defining a comment line in table
    quotechar : str
        One-character string to quote fields containing special characters
    header_start : int
        Line index for the header line not counting comment or blank lines.
        A line with only whitespace is considered blank.
    data_start : int
        Line index for the start of data not counting comment or blank lines.
        A line with only whitespace is considered blank.
    data_end : int
        Line index for the end of data not counting comment or blank lines.
        This value can be negative to count from the end.
    converters : dict
        Dictionary of converters
    data_Splitter : `~astropy.io.ascii.BaseSplitter`
        Splitter class to split data columns
    header_Splitter : `~astropy.io.ascii.BaseSplitter`
        Splitter class to split header columns
    names : list
        List of names corresponding to each data column
    include_names : list
        List of names to include in output.
    exclude_names : list
        List of names to exclude from output (applied after ``include_names``)
    fill_values : dict
        specification of fill values for bad or missing table values
    fill_include_names : list
        List of names to include in fill_values.
    fill_exclude_names : list
        List of names to exclude from fill_values (applied after ``fill_include_names``)
    fast_reader : bool or dict
        Whether to use the C engine, can also be a dict with options which
        defaults to `False`; parameters for options dict:

        use_fast_converter: bool
            enable faster but slightly imprecise floating point conversion method
        parallel: bool or int
            multiprocessing conversion using ``cpu_count()`` or ``'number'`` processes
        exponent_style: str
            One-character string defining the exponent or ``'Fortran'`` to auto-detect
            Fortran-style scientific notation like ``'3.14159D+00'`` (``'E'``, ``'D'``, ``'Q'``),
            all case-insensitive; default ``'E'``, all other imply ``use_fast_converter``
        chunk_size : int
            If supplied with a value > 0 then read the table in chunks of
            approximately ``chunk_size`` bytes. Default is reading table in one pass.
        chunk_generator : bool
            If True and ``chunk_size > 0`` then return an iterator that returns a
            table for each chunk.  The default is to return a single stacked table
            for all the chunks.

    Reader : `~astropy.io.ascii.BaseReader`
        Reader class (DEPRECATED)
    encoding: str
        Allow to specify encoding to read the file (default= ``None``).

    Returns
    -------
    dat : `~astropy.table.Table` OR <generator>
        Output table

    """
    del _read_trace[:]

    # Downstream readers might munge kwargs
    kwargs = copy.deepcopy(kwargs)

    # Convert 'fast_reader' key in kwargs into a dict if not already and make sure
    # 'enable' key is available.
    fast_reader = _get_fast_reader_dict(kwargs)
    kwargs['fast_reader'] = fast_reader

    if fast_reader['enable'] and fast_reader.get('chunk_size'):
        return _read_in_chunks(table, **kwargs)

    if 'fill_values' not in kwargs:
        kwargs['fill_values'] = [('', '0')]

    # If an Outputter is supplied in kwargs that will take precedence.
    if 'Outputter' in kwargs:  # user specified Outputter, not supported for fast reading
        fast_reader['enable'] = False

    format = kwargs.get('format')
    # Dictionary arguments are passed by reference per default and thus need
    # special protection:
    new_kwargs = copy.deepcopy(kwargs)
    kwargs['fast_reader'] = copy.deepcopy(fast_reader)

    # Get the Reader class based on possible format and Reader kwarg inputs.
    Reader = _get_format_class(format, kwargs.get('Reader'), 'Reader')
    if Reader is not None:
        new_kwargs['Reader'] = Reader
        format = Reader._format_name

    # Remove format keyword if there, this is only allowed in read() not get_reader()
    if 'format' in new_kwargs:
        del new_kwargs['format']

    if guess is None:
        guess = _GUESS

    if guess:
        # If ``table`` is probably an HTML file then tell guess function to add
        # the HTML reader at the top of the guess list.  This is in response to
        # issue #3691 (and others) where libxml can segfault on a long non-HTML
        # file, thus prompting removal of the HTML reader from the default
        # guess list.
        new_kwargs['guess_html'] = _probably_html(table)

        # If `table` is a filename or readable file object then read in the
        # file now.  This prevents problems in Python 3 with the file object
        # getting closed or left at the file end.  See #3132, #3013, #3109,
        # #2001.  If a `readme` arg was passed that implies CDS format, in
        # which case the original `table` as the data filename must be left
        # intact.
        if 'readme' not in new_kwargs:
            encoding = kwargs.get('encoding')
            try:
                with get_readable_fileobj(table, encoding=encoding) as fileobj:
                    table = fileobj.read()
            except ValueError:  # unreadable or invalid binary file
                raise
            except Exception:
                pass
            else:
                # Ensure that `table` has at least one \r or \n in it
                # so that the core.BaseInputter test of
                # ('\n' not in table and '\r' not in table)
                # will fail and so `table` cannot be interpreted there
                # as a filename.  See #4160.
                if not re.search(r'[\r\n]', table):
                    table = table + os.linesep

                # If the table got successfully read then look at the content
                # to see if is probably HTML, but only if it wasn't already
                # identified as HTML based on the filename.
                if not new_kwargs['guess_html']:
                    new_kwargs['guess_html'] = _probably_html(table)

        # Get the table from guess in ``dat``.  If ``dat`` comes back as None
        # then there was just one set of kwargs in the guess list so fall
        # through below to the non-guess way so that any problems result in a
        # more useful traceback.
        dat = _guess(table, new_kwargs, format, fast_reader)
        if dat is None:
            guess = False

    if not guess:
        if format is None:
            reader = get_reader(**new_kwargs)
            format = reader._format_name

        # Try the fast reader version of `format` first if applicable.  Note that
        # if user specified a fast format (e.g. format='fast_basic') this test
        # will fail and the else-clause below will be used.
        if fast_reader['enable'] and 'fast_{0}'.format(
                format) in core.FAST_CLASSES:
            fast_kwargs = copy.deepcopy(new_kwargs)
            fast_kwargs['Reader'] = core.FAST_CLASSES['fast_{0}'.format(
                format)]
            fast_reader_rdr = get_reader(**fast_kwargs)
            try:
                dat = fast_reader_rdr.read(table)
                _read_trace.append({
                    'kwargs':
                    copy.deepcopy(fast_kwargs),
                    'Reader':
                    fast_reader_rdr.__class__,
                    'status':
                    'Success with fast reader (no guessing)'
                })
            except (core.ParameterError, cparser.CParserError,
                    UnicodeEncodeError) as err:
                # special testing value to avoid falling back on the slow reader
                if fast_reader['enable'] == 'force':
                    raise core.InconsistentTableError(
                        'fast reader {} exception: {}'.format(
                            fast_reader_rdr.__class__, err))
                # If the fast reader doesn't work, try the slow version
                reader = get_reader(**new_kwargs)
                dat = reader.read(table)
                _read_trace.append({
                    'kwargs':
                    copy.deepcopy(new_kwargs),
                    'Reader':
                    reader.__class__,
                    'status':
                    'Success with slow reader after failing'
                    ' with fast (no guessing)'
                })
        else:
            reader = get_reader(**new_kwargs)
            dat = reader.read(table)
            _read_trace.append({
                'kwargs':
                copy.deepcopy(new_kwargs),
                'Reader':
                reader.__class__,
                'status':
                'Success with specified Reader class '
                '(no guessing)'
            })

    return dat