예제 #1
0
    def fov_shape(self):
        """Choosing the FoV shape: box (1) or circle (2)."""

        valid_fov_shape = False
        while not valid_fov_shape:
            try:
                print ('--> FoV shape: type (1) for a box and (2) for a circle')
                shape = input( ' : ' )
            
                # loading footprint template from Git--> box or circle
                if shape==1: 
                    url_id = "https://raw.githubusercontent.com/ggreco77/GWsky/master/footprint_box"
                    template_fov_footprint = download_file(url_id, cache=True, timeout=300)
                    self._fov_box()
                    self._make_fov_footprint(template_fov_footprint)
                elif shape==2:
                    url_id = "https://raw.githubusercontent.com/ggreco77/GWsky/master/footprint_circle"
                    template_fov_footprint = download_file(url_id, cache=True, timeout=300)
                    self._fov_circle()
                    self._make_fov_footprint(template_fov_footprint)                 
                else:
                    continue
            except SyntaxError as io_error:
                    print (io_error)
            except NameError as value_error:
                    print (value_error)
            else:
                valid_fov_shape = True
예제 #2
0
파일: test_data.py 프로젝트: Cadair/astropy
def test_download_cache():

    from astropy.utils.data import download_file, clear_download_cache

    download_dir = _get_download_cache_locs()[0]

    # Download the test URL and make sure it exists, then clear just that
    # URL and make sure it got deleted.
    fnout = download_file(TESTURL, cache=True)
    assert os.path.isdir(download_dir)
    assert os.path.isfile(fnout)
    clear_download_cache(TESTURL)
    assert not os.path.exists(fnout)

    # Test issues raised in #4427 with clear_download_cache() without a URL,
    # followed by subsequent download.
    fnout = download_file(TESTURL, cache=True)
    assert os.path.isfile(fnout)
    clear_download_cache()
    assert not os.path.exists(fnout)
    assert not os.path.exists(download_dir)
    fnout = download_file(TESTURL, cache=True)
    assert os.path.isfile(fnout)

    # Clearing download cache succeeds even if the URL does not exist.
    clear_download_cache('http://this_was_never_downloaded_before.com')

    # Make sure lockdir was released
    lockdir = os.path.join(download_dir, 'lock')
    assert not os.path.isdir(lockdir), 'Cache dir lock was not released!'
예제 #3
0
파일: test_data.py 프로젝트: Cadair/astropy
def test_is_url_in_cache():
    from astropy.utils.data import download_file, is_url_in_cache

    assert not is_url_in_cache('http://astropy.org/nonexistentfile')

    download_file(TESTURL, cache=True, show_progress=False)
    assert is_url_in_cache(TESTURL)
예제 #4
0
def setup_module(module):
    try:
        db_path = download_file("http://github.com/BDNYC/BDNYCdb/raw/master/bdnyc_database.db")
    except:
        db_path = download_file("http://github.com/BDNYC/BDNYCdb/raw/master/BDNYCv1.0.db")
    module.bdnyc_db = astrodb.Database(db_path)
    astrodb.create_database(filename)
    module.empty_db = astrodb.Database(filename)
예제 #5
0
파일: test_data.py 프로젝트: Cadair/astropy
def test_invalid_location_download_noconnect():
    """
    checks that download_file gives an OSError if the socket is blocked
    """
    from astropy.utils.data import download_file

    # This should invoke socket's monkeypatched failure
    with pytest.raises(OSError):
        download_file('http://astropy.org/nonexistentfile')
예제 #6
0
파일: test_data.py 프로젝트: Cadair/astropy
def test_invalid_location_download():
    """
    checks that download_file gives a URLError and not an AttributeError,
    as its code pathway involves some fiddling with the exception.
    """
    from astropy.utils.data import download_file

    with pytest.raises(urllib.error.URLError):
        download_file('http://www.astropy.org/nonexistentfile')
예제 #7
0
def get_path(filename, location='local', cache=True, show_progress=False):
    """
    Get path (location on your disk) for a given file.

    Parameters
    ----------
    filename : str
        File name in the local or remote data folder.
    location : {'local', 'remote', 'photutils-datasets'}
        File location.  ``'local'`` means bundled with ``photutils``.
        ``'remote'`` means the astropy data server (or the
        photutils-datasets repo as a backup) or the Astropy cache on
        your machine. ``'photutils-datasets'`` means the
        photutils-datasets repo or the Astropy cache on your machine.
    cache : bool, optional
        Whether to cache the contents of remote URLs.  Default is
        `True`.
    show_progress : bool, optional
        Whether to display a progress bar during the download (default
        is `False`).

    Returns
    -------
    path : str
        Path (location on your disk) of the file.

    Examples
    --------
    >>> from astropy.io import fits
    >>> from photutils import datasets
    >>> hdulist = fits.open(datasets.get_path('fermi_counts.fits.gz'))
    """

    datasets_url = ('https://github.com/astropy/photutils-datasets/raw/'
                    'master/data/{0}'.format(filename))

    if location == 'local':
        path = get_pkg_data_filename('data/' + filename)
    elif location == 'remote':    # pragma: no cover
        try:
            url = 'https://data.astropy.org/photometry/{0}'.format(filename)
            path = download_file(url, cache=cache,
                                 show_progress=show_progress)
        except (URLError, HTTPError):   # timeout or not found
            path = download_file(datasets_url, cache=cache,
                                 show_progress=show_progress)
    elif location == 'photutils-datasets':    # pragma: no cover
            path = download_file(datasets_url, cache=cache,
                                 show_progress=show_progress)
    else:
        raise ValueError('Invalid location: {0}'.format(location))

    return path
예제 #8
0
def download_radex(redownload=True,
                   url='http://www.sron.rug.nl/~vdtak/radex/radex_public.tar.gz'):

    filename = 'radex_public.tar.gz'

    if os.path.isfile(filename) and not redownload:
        return filename

    print("Downloading RADEX")

    try:
        filename = download_file(url, cache=True)
    except:
        import requests
        r = requests.get(url,
                         #data={'filename':filename},
                         stream=True,
                         verify=False)
        with open(filename,'wb') as f:
            for chunk in r.iter_content(1024):
                f.write(chunk)

    print("Download succeeded, or at least didn't obviously fail.")

    return filename
예제 #9
0
def get_path(filename, location='local'):
    """Get path (location on your disk) for a given file.

    Parameters
    ----------
    filename : str
        File name in the local or remote data folder
    location : {'local', 'remote'}
        File location.
        ``'local'`` means bundled with ``photutils``.
        ``'remote'`` means a server or the Astropy cache on your machine.

    Returns
    -------
    path : str
        Path (location on your disk) of the file.

    Examples
    --------
    >>> from astropy.io import fits
    >>> from photutils import datasets
    >>> hdu_list = fits.open(datasets.get_path('fermi_counts.fits.gz'))
    """
    if location == 'local':
        path = get_pkg_data_filename('data/' + filename)
    elif location == 'remote':    # pragma: no cover
        url = 'https://github.com/astropy/photutils-datasets/blob/master/data/{0}?raw=true'.format(filename)
        path = download_file(url, cache=True)
    else:
        raise ValueError('Invalid location: {0}'.format(location))

    return path
예제 #10
0
def get_path(filename, location='local'):
    """Get path (location on your disk) for a given file.

    Parameters
    ----------
    filename : str
        File name in the local or remote data folder
    location : {'local', 'remote'}
        File location.
        ``'local'`` means bundled with ``gammapy``.
        ``'remote'`` means in the ``gammapy-extra`` repo in the ``datasets`` folder.

    Returns
    -------
    path : str
        Path (location on your disk) of the file.

    Examples
    --------
    >>> from gammapy import datasets
    >>> datasets.get_path('fermi/fermi_counts.fits.gz')
    '/Users/deil/code/gammapy/gammapy/datasets/data/fermi/fermi_counts.fits.gz'
    >>> datasets.get_path('vela_region/counts_vela.fits', location='remote')
    '/Users/deil/.astropy/cache/download/ce2456b0c9d1476bfd342eb4148144dd'
    """
    if location == 'local':
        path = get_pkg_data_filename('data/' + filename)
    elif location == 'remote':
        url = ('https://github.com/gammapy/gammapy-extra/blob/master/datasets/'
               '{0}?raw=true'.format(filename))
        path = download_file(url, cache=True)
    else:
        raise ValueError('Invalid location: {0}'.format(location))

    return path
예제 #11
0
def _get_IERS_A_table(warn_update=14*u.day):
    """
    Grab the locally cached copy of the IERS Bulletin A table. Check to see
    if it's up to date, and warn the user if it is not.

    This will fail and raise OSError if the file is not in the cache.
    """
    if IERS_A_in_cache():
        path = download_file(iers.IERS_A_URL, cache=True, show_progress=True)
        table = iers.IERS_A.open(path)
        # Use polar motion flag to identify last observation before predictions
        index_of_last_observation = ''.join(table['PolPMFlag_A']).index('IP')
        time_of_last_observation = Time(table['MJD'][index_of_last_observation],
                                        format='mjd')
        time_since_last_update = Time.now() - time_of_last_observation

        # If the IERS bulletin is more than `warn_update` days old, warn user
        if warn_update < time_since_last_update:
            warnmsg = ("Your version of the IERS Bulletin A is {:.1f} days "
                       "old. ".format(time_since_last_update.to(u.day).value) +
                       IERS_A_WARNING)
            warnings.warn(warnmsg, OldEarthOrientationDataWarning)
        return table
    else:
        raise OSError("No IERS A table has been downloaded.")
예제 #12
0
def test_subimage_integ_header():
    # getting a dummy .fits file
    if not os.path.exists('foo.fits'):
        from astropy.utils.data import download_file
        tmp_path = download_file('https://db.tt/oleS9xD6')
        try:
            os.rename(tmp_path, 'foo.fits')
        except OSError:
            # os.rename doesn't like cross-device links
            import shutil
            shutil.move(tmp_path, 'foo.fits')

    cube = fits.getdata('foo.fits')
    header = fits.getheader('foo.fits')

    xcen, ycen = 4.5, 4.5
    xwidth, ywidth = 2.5, 2.5

    # saving results from subimage_integ:
    cutData, cutHead = cubes.subimage_integ(cube, xcen, xwidth, ycen, ywidth,
                                            vrange=(0,9), zunits='pixels',
                                            units='pixels', header=header)

    assert cutHead['CRPIX1'] == 7.0
    assert cutHead['CRPIX2'] == -2.0

    w1 = wcs.WCS(header)
    w2 = wcs.WCS(cutHead)

    # pixel 2,2 in the original image should be pixel 0,0 in the new one
    x1,y1,z1 = w1.wcs_pix2world(2,2,0,0)
    x2,y2 = w2.wcs_pix2world(0,0,0)

    np.testing.assert_almost_equal(x1,x2)
    np.testing.assert_almost_equal(y1,y2)
예제 #13
0
    def strain(self, ifo, duration=32, sample_rate=4096):
        """ Return strain around the event

        Currently this will return the strain around the event in the smallest
        format available. Selection of other data is not yet available.

        Parameters
        ----------
        ifo: str
            The name of the observatory you want strain for. Ex. H1, L1, V1

        Returns
        -------
        strain: pycbc.types.TimeSeries
            Strain around the event.
        """
        from astropy.utils.data import download_file
        from pycbc.frame import read_frame

        # Information is currently wrong on GWOSC!
        # channels = self.data['files']['FrameChannels']
        # for channel in channels:
        #    if ifo in channel:
        #        break

        length = "{}sec".format(duration)
        if sample_rate == 4096:
            sampling = "4KHz"
        elif sample_rate == 16384:
            sampling = "16KHz"

        channel = "{}:GWOSC-{}_R1_STRAIN".format(ifo, sampling.upper())
        url = self.data['files'][ifo][length][sampling]['GWF']
        filename = download_file(url, cache=True)
        return read_frame(str(filename), str(channel))
예제 #14
0
def _get_kernel(value):
    """
    Try importing jplephem, download/retrieve from cache the Satellite Planet
    Kernel corresponding to the given ephemeris.
    """
    if value is None or value.lower() == 'builtin':
        return None

    if value.lower() == 'jpl':
        value = DEFAULT_JPL_EPHEMERIS

    if value.lower() in ('de430', 'de432s'):
        value = ('http://naif.jpl.nasa.gov/pub/naif/generic_kernels'
                 '/spk/planets/{:s}.bsp'.format(value.lower()))
    else:
        try:
            urlparse(value)
        except Exception:
            raise ValueError('{} was not one of the standard strings and '
                             'could not be parsed as a URL'.format(value))

    try:
        from jplephem.spk import SPK
    except ImportError:
        raise ImportError("Solar system JPL ephemeris calculations require "
                          "the jplephem package "
                          "(https://pypi.python.org/pypi/jplephem)")

    return SPK.open(download_file(value, cache=True))
예제 #15
0
def get_test_filename(filename, cache=True, timeout=30):
    '''Get a test filename from the package directory
    '''
    # this stopped working when we went to testing files from the web
    #url = os.path.join(ROOT_URL, filename)
    url = "{}/{}".format(ROOT_URL, filename)
    return download_file(url, cache=cache, timeout=timeout)
def get_lsm(field_of_view):
    catalog_file = download_file(
        'http://www.astrop.physics.usyd.edu.au/sumsscat/sumsscat.Mar-11-2008.gz',
        cache=True)
    full_cat = sumss_file_to_dataframe(catalog_file)
    lsm_cat = lsm_extract(field_of_view, full_cat)
    return lsm_cat
예제 #17
0
def download_sample_data(progress=True):
    """
    Download the sample data.

    Parameters
    ----------
    progress: bool
        Show a progress bar during download

    Returns
    -------
    None
    """
    number_of_files_fetched = 0
    print("Downloading sample files to " + sampledata_dir)
    for base_url in _base_urls:
        for file_name in _files.itervalues():
            full_file_name = file_name[0] + file_name[1]
            if url_exists(os.path.join(base_url, full_file_name)):
                f = download_file(os.path.join(base_url, full_file_name))
                real_name, ext = os.path.splitext(full_file_name)

                if file_name[1] == '.zip':
                    print("Unpacking: %s" % real_name)
                    with ZipFile(f, 'r') as zip_file:
                        zip_file.extract(real_name, sampledata_dir)
                    remove(f)
                else:
                    # move files to the data directory
                    move(f, os.path.join(sampledata_dir, file_name[0]))
                # increment the number of files obtained to check later
                number_of_files_fetched += 1

    if number_of_files_fetched < len(_files.keys()):
        raise URLError("Could not download all samples files. Problem with accessing sample data servers.")
예제 #18
0
def load_salt2model(remote_url, topdir, name=None, version=None):
    fn = download_file(remote_url, cache=True, timeout=REMOTE_TIMEOUT())
    t = tarfile.open(fn, 'r:gz')

    errscalefn = join(topdir, 'salt2_spec_dispersion_scaling.dat')
    if errscalefn in t.getnames():
        errscalefile = t.extractfile(errscalefn)
    else:
        errscalefile = None

    model = SALT2Source(
        m0file=t.extractfile(join(topdir,'salt2_template_0.dat')),
        m1file=t.extractfile(join(topdir,'salt2_template_1.dat')),
        clfile=t.extractfile(join(topdir,'salt2_color_correction.dat')),
        cdfile=t.extractfile(join(topdir,'salt2_color_dispersion.dat')),
        errscalefile=t.extractfile(
            join(topdir,'salt2_lc_dispersion_scaling.dat')),
        lcrv00file=t.extractfile(
            join(topdir,'salt2_lc_relative_variance_0.dat')),
        lcrv11file=t.extractfile(
            join(topdir,'salt2_lc_relative_variance_1.dat')),
        lcrv01file=t.extractfile(
            join(topdir,'salt2_lc_relative_covariance_01.dat')),
        name=name,
        version=version)
    t.close()
    #set_bandfluxerror_sn1a(model)
    return model
예제 #19
0
    def setup_class(self):

        # make a fake header to test the helper functions which access the header
        w = WCS(naxis=2)

        w.wcs.crpix = [crpix_val, crpix_val]
        w.wcs.cdelt = np.array([-cdelt_val, cdelt_val])
        w.wcs.crval = [cr1val_val, cr2val_val]
        w.wcs.ctype = [b"RA---TAN", b"DEC--TAN"]
        w.wcs.crota = [0, crota2_val]

        self.header = w.to_header()

        # make a temporary directory for the input and output
        self.tmpdir = tempfile.mkdtemp()

        # get the test data and copy it to the temp directory
        if os.path.exists('../data/testimgs'): # copy from ../data/testimgs if that exists 
            shutil.copytree('../data/testimgs',self.tmpdir+'/imagecubetest')
        else: # download and symlink to temp directory: NOT WORKING
            os.makedirs(self.tmpdir+'/imagecubetest/')
            for fname in test_data_files:
                tmpname = download_file(test_data_loc+fname)
                linked_name = self.tmpdir+'/imagecubetest/'+fname
                shutil.copy2(tmpname, linked_name)
예제 #20
0
def example_dataset_rdi():
    """
    Download example FITS cube from github + prepare HCIDataset object.

    Returns
    -------
    dataset : HCIDataset

    Notes
    -----
    Astropy's ``download_file`` uses caching, so the file is downloaded at most
    once per test run.

    """
    print("downloading data...")

    url_prefix = "https://github.com/carlgogo/VIP_extras/raw/master/datasets"

    f1 = download_file("{}/naco_betapic_cube.fits".format(url_prefix),
                       cache=True)
    f2 = download_file("{}/naco_betapic_psf.fits".format(url_prefix),
                       cache=True)
    f3 = download_file("{}/naco_betapic_pa.fits".format(url_prefix),
                       cache=True)

    # load fits
    cube = vip.fits.open_fits(f1)
    angles = vip.fits.open_fits(f3).flatten()  # shape (61,1) -> (61,)
    psf = vip.fits.open_fits(f2)
    # creating a flux screen
    scr = vip.var.create_synth_psf('moff', (101, 101), fwhm=50)
    scrcu = np.array([scr * i for i in np.linspace(1e3, 2e3, num=31)])
    # upscaling (1.2) and taking half of the frames, reversing order
    cube_upsc = cube_px_resampling(cube[::-1], 1.2, verbose=False)[::2]
    # cropping and adding the flux screen
    cube_ref = cube_crop_frames(cube_upsc, 101, verbose=False) + scrcu

    # create dataset object
    dataset = vip.Dataset(cube, angles=angles, psf=psf, cuberef=cube_ref,
                          px_scale=vip.conf.VLT_NACO['plsc'])

    dataset.normalize_psf(size=20, force_odd=False)

    # overwrite PSF for easy access
    dataset.psf = dataset.psfn

    return dataset
예제 #21
0
    def _initialise_ephemeris(self, earth_ephem, sun_ephem, time_corr):
        """
        Initialise the solar system ephemeris.
        """

        if earth_ephem is not None:
            earthfile = earth_ephem
        else:
            earthfile = self.__earthstr.format(self.ephem)

        if sun_ephem is not None:
            sunfile = sun_ephem
        else:
            sunfile = self.__sunstr.format(self.ephem)

        if time_corr is not None:
            timefile = time_corr
        else:
            timefile = self.__timecorrstr.format(self.__units_map[self.units])

        try:
            edat = lalpulsar.InitBarycenter(earthfile, sunfile)
        except RuntimeError:
            try:
                # try downloading the ephemeris files
                from astropy.utils.data import download_file

                efile = download_file(DOWNLOAD_URL.format(earthfile), cache=True)
                sfile = download_file(DOWNLOAD_URL.format(sunfile), cache=True)
                edat = lalpulsar.InitBarycenter(efile, sfile)
            except Exception as e:
                raise IOError("Could not read in ephemeris files: {}".format(e))

        try:
            tdat = lalpulsar.InitTimeCorrections(timefile)
        except RuntimeError:
            try:
                # try downloading the ephemeris files
                from astropy.utils.data import download_file

                tfile = download_file(DOWNLOAD_URL.format(timefile), cache=True)
                tdat = lalpulsar.InitTimeCorrections(tfile)
            except Exception as e:
                raise IOError("Could not read in time correction file: {}".format(e))

        return edat, tdat
예제 #22
0
파일: serv.py 프로젝트: itoledoc/gWTO3
 def __init__(self):
     xmlrpc.XMLRPC.__init__(self)
     import DsaDataBase3 as Data
     from astropy.utils.data import download_file
     from astropy.utils import iers
     iers.IERS.iers_table = iers.IERS_A.open(
         download_file(iers.IERS_A_URL, cache=True))
     self.data = Data.DsaDatabase3(refresh_apdm=True, allc2=False, loadp1=False)
예제 #23
0
def example_dataset_ifs():
    """
    Download example FITS cube from github + prepare HCIDataset object.

    Returns
    -------
    dataset : HCIDataset

    Notes
    -----
    Astropy's ``download_file`` uses caching, so the file is downloaded at most
    once per test run.

    """
    print("downloading data...")

    url_prefix = "https://github.com/carlgogo/VIP_extras/raw/master/datasets"

    f1 = download_file("{}/sphere_v471tau_cube.fits".format(url_prefix),
                       cache=True)
    f2 = download_file("{}/sphere_v471tau_psf.fits".format(url_prefix),
                       cache=True)
    f3 = download_file("{}/sphere_v471tau_pa.fits".format(url_prefix),
                       cache=True)
    f4 = download_file("{}/sphere_v471tau_wl.fits".format(url_prefix),
                       cache=True)

    # load fits
    cube = vip.fits.open_fits(f1)
    angles = vip.fits.open_fits(f3).flatten()
    psf = vip.fits.open_fits(f2)
    wl = vip.fits.open_fits(f4)

    # create dataset object
    dataset = vip.Dataset(cube, angles=angles, psf=psf,
                          px_scale=vip.conf.VLT_SPHERE_IFS['plsc'],
                          wavelengths=wl)

    # crop
    dataset.crop_frames(size=100, force=True)
    dataset.normalize_psf(size=None, force_odd=False)

    # overwrite PSF for easy access
    dataset.psf = dataset.psfn

    return dataset
예제 #24
0
def download_dsn():
    """
    Download the DSN data from spaceprob.es
    """
    dsn_url = 'http://murmuring-anchorage-8062.herokuapp.com/dsn/probes.json'
    f = download_file(dsn_url, cache=False)
    dsn = json.loads(open(f).read())['dsn_by_probe']
    return dsn
예제 #25
0
파일: load.py 프로젝트: ellisowen/gammapy
    def filenames():
        """Dictionary of available file names."""
        result = dict()

        BASE_URL = 'https://github.com/gammapy/gammapy-extra/raw/master/datasets/vela_region/'
        url_counts = BASE_URL + 'counts_vela.fits'
        url_exposure = BASE_URL + 'exposure_vela.fits'
        url_background = BASE_URL + 'background_vela.fits'
        url_diffuse = BASE_URL + 'gll_iem_v05_rev1_cutout.fit'
        url_psf = BASE_URL + 'psf_vela.fits'

        result['counts_cube'] = data.download_file(url_counts, cache=True)
        result['exposure_cube'] = data.download_file(url_exposure, cache=True)
        result['background_image'] = data.download_file(url_background, cache=True)
        result['diffuse_model'] = data.download_file(url_diffuse, cache=True)
        result['psf'] = data.download_file(url_psf, cache=True)
        return result
def cli(ra, dec, radius, outfile, vcat, catalog_file):
    """
    Extracts sources from the catalog within a circular region

    \b
    Example usage:

        fastimg_extract_lsm -- 189.2 -45.6 1.5 lsm.csv

    or with separate variables catalog:

        fastimg_extract_lsm --vcat var.csv -- 189.2 -45.6 1.5 lsm.csv

    Note: arguments (RA, dec, radius, [outfile]) are separated from options by the
    separator `--`, this avoids mistakenly trying to parse a negative
    declination as an option flag.

    \b
    Args:
    - ra (float): RA of centre (decimal degrees, J2000)
    - dec (float): Dec of centre (decimal degrees, J2000)
    - radius (float): Cone-radius (decimal degrees)
    - outfile (filepath): Filename to write filtered catalog to.
        (Default is stdout)


    \b
    Options:
    - vcat (path): Path to output a separate catalog containing only
        variable sources.
    - catalog_file (path): Path to SUMS catalog file (sumsscat.Mar-11-2008.gz).
        If unsupplied, will attempt to download / use cached version via the
        Astropy download cache.

    Outputs:
        An list of matching sources in GSM-format.

    """

    field_of_view = SkyRegion(SkyCoord(ra * u.deg, dec * u.deg),
                              Angle(radius * u.deg))

    if catalog_file is None:
        catalog_file = download_file(
            'http://www.astrop.physics.usyd.edu.au/sumsscat/sumsscat.Mar-11-2008.gz',
            cache=True)

    full_cat = sumss_file_to_dataframe(catalog_file)
    lsm_cat = lsm_extract(field_of_view, full_cat)
    variable_cat = [s for s in lsm_cat if s.variable]

    write_catalog(lsm_cat, outfile)
    if vcat:
        write_catalog(variable_cat, vcat)

    click.echo("{} sources matched, of which {} variable".format(
        len(lsm_cat), len(variable_cat)),
        err=True)
예제 #27
0
파일: data.py 프로젝트: panoptes/POCS
 def download_one_file(fn):
     dest = "{}/{}".format(data_folder, os.path.basename(fn))
     if not os.path.exists(dest):
         url = "http://data.astrometry.net/{}".format(fn)
         df = data.download_file(url)
         try:
             shutil.move(df, dest)
         except OSError as e:
             print("Problem saving. (Maybe permissions?): {}".format(e))
예제 #28
0
    def __init__(self):
        xmlrpc.XMLRPC.__init__(self)

        iers.IERS.iers_table = iers.IERS_A.open(
            download_file(iers.IERS_A_URL, cache=True))
        self.data = Data.DsaDatabase3(
                path='/home/itoledo/sim/',
                refresh_apdm=True, loadp1=False)
        self.dsa = Dsa.DsaAlgorithm3(self.data)
예제 #29
0
def get_source(source):
    """Get the source data for a particular GW catalog
    """
    if source == 'gwtc-1':
        fname = download_file(gwtc1_url, cache=True)
        data = json.load(open(fname, 'r'))
    else:
        raise ValueError('Unkown catalog source {}'.format(source))
    return data['data']
예제 #30
0
def load_spectral_magsys_fits(remote_url, name=None):
    fn = download_file(remote_url, cache=True)
    hdulist = fits.open(fn)
    dispersion = hdulist[1].data['WAVELENGTH']
    flux_density = hdulist[1].data['FLUX']
    hdulist.close()
    refspectrum = Spectrum(dispersion, flux_density,
                           unit=(u.erg / u.s / u.cm**2 / u.AA), wave_unit=u.AA)
    return SpectralMagSystem(refspectrum, name=name)
import pyspeckit
import os

if not os.path.exists('n2hp_cube.fit'):
    import astropy.utils.data as aud
    from astropy.io import fits
    f = aud.download_file('ftp://cdsarc.u-strasbg.fr/pub/cats/J/A%2BA/472/519/fits/opha_n2h.fit')
    with fits.open(f) as ff:
        ff[0].header['CUNIT3'] = 'm/s'
        for kw in ['CTYPE4','CRVAL4','CDELT4','CRPIX4']:
            del ff[0].header[kw]
        ff.writeto('n2hp_cube.fit')

# Load the spectral cube
spc = pyspeckit.Cube('n2hp_cube.fit')

# Register the fitter
# The N2H+ fitter is 'built-in' but is not registered by default; this example
# shows how to register a fitting procedure
# 'multi' indicates that it is possible to fit multiple components and a
# background will not automatically be fit 4 is the number of parameters in the
# model (excitation temperature, optical depth, line center, and line width)
spc.Registry.add_fitter('n2hp_vtau',pyspeckit.models.n2hp.n2hp_vtau_fitter,4,multisingle='multi')

# Run the fitter
spc.fiteach(fittype='n2hp_vtau', multifit=True,
            guesses=[5,0.5,3,1], # Tex=5K, tau=0.5, v_center=12, width=1 km/s
            signal_cut=6, # minimize the # of pixels fit for the example
            )
# There are a huge number of parameters for the fiteach procedure.  See:
# http://pyspeckit.readthedocs.org/en/latest/example_nh3_cube.html
예제 #32
0
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from astropy.utils.data import download_file
from astropy.io import fits

image_list = [ download_file('http://www.mistisoftware.com/astronomy/fits/'+n+'m_L.FIT', cache=True ) \
                for n in ['m27_050711_6i30', 'ngc7293_050906_8i40']]

image_concat = []
for image in image_list:
    plt.figure()
    hdu_list = fits.open(image)
    hdu_list.info()

    image_data = hdu_list[0].data
    hdu_list.close()
    plt.imshow(image_data, cmap='jet')
    plt.draw()
    image_concat.append(fits.getdata(image))
plt.figure()

final_image = np.sum(image_concat, axis=0)

plt.imshow(final_image, cmap='jet')
plt.colorbar()
plt.draw()

plt.show()

outfile = 'concat.fits'
예제 #33
0
def get_sample_file(filename,
                    url_list,
                    show_progress=True,
                    overwrite=False,
                    timeout=None):
    """
    Downloads a sample file. Will download  a sample data file and move it to
    the sample data directory. Also, uncompresses zip files if necessary.
    Returns the local file if exists.

    Parameters
    ----------
    filename: `str`
        Name of the file
    url_list: `str` or `list`
        urls where to look for the file
    show_progress: `bool`
        Show a progress bar during download
    overwrite: `bool`
        If True download and overwrite an existing file.
    timeout: `float`
        The timeout in seconds. If `None` the default timeout is used from
        `astropy.utils.data.Conf.remote_timeout`.

    Returns
    -------
    result: `str`
        The local path of the file. None if it failed.
    """

    if filename[-3:] == 'zip':
        uncompressed_filename = filename[:-4]
    else:
        uncompressed_filename = filename
    # check if the (uncompressed) file exists
    if not overwrite and os.path.isfile(
            os.path.join(sampledata_dir, uncompressed_filename)):
        return os.path.join(sampledata_dir, uncompressed_filename)
    else:
        # check each provided url to find the file
        for base_url in url_list:
            online_filename = filename
            if base_url.count('github'):
                online_filename += '?raw=true'
            try:
                exists = url_exists(os.path.join(base_url, online_filename))
                if exists:
                    f = download_file(os.path.join(base_url, online_filename),
                                      show_progress=show_progress,
                                      timeout=timeout)
                    real_name, ext = os.path.splitext(f)

                    if ext == '.zip':
                        print("Unpacking: {}".format(real_name))
                        with ZipFile(f, 'r') as zip_file:
                            unzipped_f = zip_file.extract(
                                real_name, sampledata_dir)
                        os.remove(f)
                        move(
                            unzipped_f,
                            os.path.join(sampledata_dir,
                                         uncompressed_filename))
                        return os.path.join(sampledata_dir,
                                            uncompressed_filename)
                    else:
                        # move files to the data directory
                        move(
                            f,
                            os.path.join(sampledata_dir,
                                         uncompressed_filename))
                        return os.path.join(sampledata_dir,
                                            uncompressed_filename)
            except (socket.error, socket.timeout) as e:
                warnings.warn("Download failed with error {}. \n"
                              "Retrying with different mirror.".format(e))
        # if reach here then file has not been downloaded.
        warnings.warn("File {} not found.".format(filename))
        return None
예제 #34
0
def get_icrs_coordinates(name, parse=False, cache=False):
    """
    Retrieve an ICRS object by using an online name resolving service to
    retrieve coordinates for the specified name. By default, this will
    search all available databases until a match is found. If you would like
    to specify the database, use the science state
    ``astropy.coordinates.name_resolve.sesame_database``. You can also
    specify a list of servers to use for querying Sesame using the science
    state ``astropy.coordinates.name_resolve.sesame_url``. This will try
    each one in order until a valid response is returned. By default, this
    list includes the main Sesame host and a mirror at vizier.  The
    configuration item `astropy.utils.data.Conf.remote_timeout` controls the
    number of seconds to wait for a response from the server before giving
    up.

    Parameters
    ----------
    name : str
        The name of the object to get coordinates for, e.g. ``'M42'``.
    parse: bool
        Whether to attempt extracting the coordinates from the name by
        parsing with a regex. For objects catalog names that have
        J-coordinates embedded in their names eg:
        'CRTS SSS100805 J194428-420209', this may be much faster than a
        sesame query for the same object name. The coordinates extracted
        in this way may differ from the database coordinates by a few
        deci-arcseconds, so only use this option if you do not need
        sub-arcsecond accuracy for coordinates.
    cache : bool, str, optional
        Determines whether to cache the results or not. Passed through to
        `~astropy.utils.data.download_file`, so pass "update" to update the
        cached value.

    Returns
    -------
    coord : `astropy.coordinates.ICRS` object
        The object's coordinates in the ICRS frame.

    """

    # if requested, first try extract coordinates embedded in the object name.
    # Do this first since it may be much faster than doing the sesame query
    if parse:
        from . import jparser
        if jparser.search(name):
            return jparser.to_skycoord(name)
        else:
            # if the parser failed, fall back to sesame query.
            pass
            # maybe emit a warning instead of silently falling back to sesame?

    database = sesame_database.get()
    # The web API just takes the first letter of the database name
    db = database.upper()[0]

    # Make sure we don't have duplicates in the url list
    urls = []
    domains = []
    for url in sesame_url.get():
        domain = urllib.parse.urlparse(url).netloc

        # Check for duplicates
        if domain not in domains:
            domains.append(domain)

            # Add the query to the end of the url, add to url list
            fmt_url = os.path.join(url, "{db}?{name}")
            fmt_url = fmt_url.format(name=urllib.parse.quote(name), db=db)
            urls.append(fmt_url)

    exceptions = []
    for url in urls:
        try:
            resp_data = get_file_contents(
                download_file(url, cache=cache, show_progress=False))
            break
        except urllib.error.URLError as e:
            exceptions.append(e)
            continue
        except socket.timeout as e:
            # There are some cases where urllib2 does not catch socket.timeout
            # especially while receiving response data on an already previously
            # working request
            e.reason = "Request took longer than the allowed {:.1f} " \
                       "seconds".format(data.conf.remote_timeout)
            exceptions.append(e)
            continue

    # All Sesame URL's failed...
    else:
        messages = [f"{url}: {e.reason}" for url, e in zip(urls, exceptions)]
        raise NameResolveError("All Sesame queries failed. Unable to "
                               "retrieve coordinates. See errors per URL "
                               "below: \n {}".format("\n".join(messages)))

    ra, dec = _parse_response(resp_data)

    if ra is None and dec is None:
        if db == "A":
            err = f"Unable to find coordinates for name '{name}'"
        else:
            err = "Unable to find coordinates for name '{}' in database {}"\
                  .format(name, database)

        raise NameResolveError(err)

    # Return SkyCoord object
    sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame='icrs')
    return sc
예제 #35
0
def get_toi_list(use_cache=True):
    url = TOI_URL + "?sort=toi&output=csv&version={0}".format(__version__)
    if use_cache:
        return pd.read_csv(download_file(url, cache=True))
    return pd.read_csv(url)
예제 #36
0
def test_aspcapStar_loader(tmpdir):
    aspcap_url = ("https://data.sdss.org/sas/dr16/apogee/spectro/aspcap/r12/"
                  "l33/apo25m/N7789/aspcapStar-r12-2M00005414+5522241.fits")
    with set_temp_cache(path=str(tmpdir)):
        filename = download_file(aspcap_url, cache=True)
        spectrum = aspcapStar_loader(filename)
    raise KeyError(
        "Please work in a conda environment with lalsuite and cwinpy installed"
    )

execpath = os.path.join(execpath, "bin")

lppen = os.path.join(execpath, "lalapps_pulsar_parameter_estimation_nested")
n2p = os.path.join(execpath, "lalinference_nest2pos")

Nlive = 1024  # number of nested sampling live points
Nmcmcinitial = 0  # set to 0 so that prior samples are not resampled

outfile = os.path.join(outdir, "{}_nest.hdf".format(label))

# set ephemeris files
efile = download_file(DOWNLOAD_URL.format("earth00-40-DE405.dat.gz"),
                      cache=True)
sfile = download_file(DOWNLOAD_URL.format("sun00-40-DE405.dat.gz"), cache=True)
tfile = download_file(DOWNLOAD_URL.format("te405_2000-2040.dat.gz"),
                      cache=True)

# set the command line arguments
runcmd = " ".join([
    lppen,
    "--verbose",
    "--input-files",
    ",".join(hetfiles),
    "--detectors",
    detector,
    "--par-file",
    parfile,
    "--prior-file",
예제 #38
0
    def setup(self, request, reference, data_path):
        """
        This method does initial setup of creating configuration and performing
        a single run of integration test.
        """
        # The last component in dirpath can be extracted as name of setup.
        self.name = data_path['setup_name']

        self.config_file = os.path.join(data_path['config_dirpath'], "config.yml")

        # A quick hack to use atom data per setup. Atom data is ingested from
        # local HDF or downloaded and cached from a url, depending on data_path
        # keys.
        atom_data_name = yaml.load(open(self.config_file))['atom_data']

        # Get the path to HDF file:
        if 'atom_data_url' in data_path:
            # If the atom data is to be ingested from url:
            atom_data_filepath = download_file(urlparse.urljoin(
                base=data_path['atom_data_url'], url=atom_data_name), cache=True
            )
        else:
            # If the atom data is to be ingested from local file:
            atom_data_filepath = os.path.join(
                data_path['atom_data_dirpath'], atom_data_name
            )

        # Load atom data file separately, pass it for forming tardis config.
        self.atom_data = AtomData.from_hdf5(atom_data_filepath)

        # Check whether the atom data file in current run and the atom data
        # file used in obtaining the reference data are same.
        # TODO: hard coded UUID for kurucz atom data file, generalize it later.
        # kurucz_data_file_uuid1 = "5ca3035ca8b311e3bb684437e69d75d7"
        # assert self.atom_data.uuid1 == kurucz_data_file_uuid1

        # Create a Configuration through yaml file and atom data.
        tardis_config = Configuration.from_yaml(
            self.config_file, atom_data=self.atom_data)

        # Check whether current run is with less packets.
        if request.config.getoption("--less-packets"):
            less_packets = request.config.integration_tests_config['less_packets']
            tardis_config['montecarlo']['no_of_packets'] = (
                less_packets['no_of_packets']
            )
            tardis_config['montecarlo']['last_no_of_packets'] = (
                less_packets['last_no_of_packets']
            )

        # We now do a run with prepared config and get radial1d model.
        self.result = Radial1DModel(tardis_config)

        # If current test run is just for collecting reference data, store the
        # output model to HDF file, save it at specified path. Skip all tests.
        # Else simply perform the run and move further for performing
        # assertions.
        if request.config.getoption("--generate-reference"):
            run_radial1d(self.result, hdf_path_or_buf=os.path.join(
                data_path['gen_ref_dirpath'], "{0}.h5".format(self.name)
            ))
            pytest.skip("Reference data saved at {0}".format(
                data_path['gen_ref_dirpath']
            ))
        else:
            run_radial1d(self.result)

        # Get the reference data through the fixture.
        self.reference = reference
예제 #39
0
    parser.add_argument('-w',
                        '--noweb',
                        dest='noweb',
                        default=False,
                        action='store_true',
                        help='Does not build the web.')

    # We use parse_known_args because we want to leave the remaining args for distutils
    args = parser.parse_known_args()[0]

    if args.noweb:
        packages = find_packages(where='python', exclude=['marvin.web*'])
    else:
        packages = find_packages(where='python')

    data_files = get_data_files(with_web=not args.noweb)

    maskbits_path = download_file(
        'https://svn.sdss.org/public/repo/sdss/idlutils/'
        'trunk/data/sdss/sdssMaskbits.par')
    shutil.copy(
        maskbits_path,
        os.path.join(os.path.dirname(__file__), 'python/marvin/data/',
                     'sdssMaskbits.par'))

    # Now we remove all our custom arguments to make sure they don't interfere with distutils
    remove_args(parser)

    # Runs distutils
    run(data_files, packages)
예제 #40
0
    def compute_TDBs(self):
        """Compute and add TDB and TDB long double columns to the TOA table.

        This routine creates new columns 'tdb' and 'tdbld' in a TOA table
        for TDB times, using the Observatory locations and IERS A Earth
        rotation corrections for UT1.
        """
        from astropy.utils.iers import IERS_A, IERS_A_URL
        from astropy.utils.data import download_file, clear_download_cache
        global iers_a_file, iers_a
        # If previous columns exist, delete them
        if 'tdb' in self.table.colnames:
            log.info('tdb column already exists. Deleting...')
            self.table.remove_column('tdb')
        if 'tdbld' in self.table.colnames:
            log.info('tdbld column already exists. Deleting...')
            self.table.remove_column('tdbld')

        # First make sure that we have already applied clock corrections
        ccs = False
        for tfs in self.table['flags']:
            if 'clkcorr' in tfs: ccs = True
        if ccs is False:
            log.warn(
                "No TOAs have clock corrections.  Use .apply_clock_corrections() first."
            )
        # These will be the new table columns
        col_tdb = numpy.zeros_like(self.table['mjd'])
        col_tdbld = numpy.zeros(self.ntoas, dtype=numpy.longdouble)
        # Read the IERS for ut1_utc corrections, if needed
        iers_a_file = download_file(IERS_A_URL, cache=True)
        # Check to see if the cached file is older than any of the TOAs
        iers_file_time = time.Time(os.path.getctime(iers_a_file),
                                   format="unix")
        if (iers_file_time.mjd < self.last_MJD.mjd):
            clear_download_cache(iers_a_file)
            try:
                log.warn("Cached IERS A file is out-of-date.  Re-downloading.")
                iers_a_file = download_file(IERS_A_URL, cache=True)
            except:
                pass
        iers_a = IERS_A.open(iers_a_file)
        # Now step through in observatory groups to compute TDBs
        for ii, key in enumerate(self.table.groups.keys):
            grp = self.table.groups[ii]
            obs = self.table.groups.keys[ii]['obs']
            loind, hiind = self.table.groups.indices[ii:ii + 2]
            # Make sure the string precisions are all set to 9 for all TOAs
            for t in grp['mjd']:
                t.precision = 9
            if key['obs'] in ["Barycenter", "Geocenter", "Spacecraft"]:
                # For these special cases, convert the times to TDB.
                # For Barycenter this will be
                # a null conversion, but for Geocenter the scale will Likely
                # be TT (if they came from a spacecraft like Fermi, RXTE or NICER)
                tdbs = [t.tdb for t in grp['mjd']]
            elif key['obs'] in observatories:
                # For a normal observatory, convert to Time in UTC
                # with location specified as observatory,
                # and then convert to TDB
                utcs = time.Time([t.isot for t in grp['mjd']],
                                 format='isot',
                                 scale='utc',
                                 precision=9,
                                 location=observatories[obs].loc)
                utcs.delta_ut1_utc = utcs.get_delta_ut1_utc(iers_a)
                # Also save delta_ut1_utc for these TOAs for later use
                for toa, dut1 in zip(grp['mjd'], utcs.delta_ut1_utc):
                    toa.delta_ut1_utc = dut1
                # The actual conversion from UTC to TDB is done by astropy.Time
                # as described here <http://docs.astropy.org/en/stable/time/>,
                # with the real work done by the IAU SOFA library
                tdbs = utcs.tdb
            else:
                log.error("Unknown observatory ({0})".format(key['obs']))

            col_tdb[loind:hiind] = numpy.asarray([t for t in tdbs])
            col_tdbld[loind:hiind] = numpy.asarray(
                [utils.time_to_longdouble(t) for t in tdbs])
        # Now add the new columns to the table
        col_tdb = table.Column(name='tdb', data=col_tdb)
        col_tdbld = table.Column(name='tdbld', data=col_tdbld)
        self.table.add_columns([col_tdb, col_tdbld])
예제 #41
0
of the Sun as observed by SDO/AIA.
"""
import matplotlib.pyplot as plt

from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.coordinates import solar_system_ephemeris
from astropy.utils.data import download_file

import sunpy.map
from sunpy.coordinates import get_body_heliographic_stonyhurst

###############################################################################
# Let's download an image of the Venus transit.
f = download_file(
    'http://jsoc.stanford.edu/data/events/Venus_AIA24s_1600/Out/fits/20120606_040731_UTC.0041.fits'
)
aiamap = sunpy.map.Map(f)

###############################################################################
# For this example, we require high precision ephemeris information. The built-in
# ephemeris provided by astropy are not accurate enough. This requires jplephem
# to be installed.
solar_system_ephemeris.set('jpl')

###############################################################################
# Now we get the position of venus and convert it into the SDO/AIA coordinates.
venus = get_body_heliographic_stonyhurst('venus',
                                         aiamap.date,
                                         observer=aiamap.observer_coordinate)
venus_hpc = venus.transform_to(aiamap.coordinate_frame)
예제 #42
0
file_best_fit_1c = './fits/NGC1333_NH3_MC_best_npeaks1.fits'
file_best_fit_2c = './fits/NGC1333_NH3_MC_best_npeaks2.fits'

download_data = False
do_1comp = False
do_2comp = False
do_inspect = True

if download_data:
    from astropy.utils.data import download_file
    import os, shutil
    if os.path.exists('data') == False:
        os.mkdir('data')
    # get rms file
    if os.path.isfile(file_rms) == False:
        tmp_rms = download_file(
            'https://dataverse.harvard.edu/api/access/datafile/3008213')
        shutil.move(tmp_rms, file_rms)
    # get cube
    if os.path.isfile(file_cube) == False:
        tmp_cube = download_file(
            'https://dataverse.harvard.edu/api/access/datafile/2902348')
        shutil.move(tmp_cube, file_cube)
    # create Peak temperature file
    if os.path.isfile(file_tp) == False:
        hd = fits.getheader(file_rms)
        hd['BUNIT'] = 'K'
        cube = fits.getdata(file_cube)
        tp_data = cube.max(axis=0)
        fits.writeto(file_tp, tp_data, hd)

hd = fits.getheader(file_rms)
"""
Quick-look to demonstrate that the off positions are relatively emission-free
"""
import aplpy
from astropy.utils.data import download_file
import paths
dame2001 = download_file('http://www.cfa.harvard.edu/mmw/Wco_DHT2001.fits.gz',
                         cache=True)
F = aplpy.FITSFigure(dame2001, convention='calabretta')
F.show_grayscale()
F.recenter(0, 0, width=5, height=3)  # center on the inner 5x3 degrees
F.show_regions(paths.rpath('target_fields_8x8.reg'))
F.show_regions(paths.rpath('off_positions_selectedfromDame2001.reg'))
F.save(paths.fpath('Dame2001_APEXCMZ_offpositions.png'))
F.save(paths.fpath('Dame2001_APEXCMZ_offpositions.pdf'))

F.hide_layer('region_set_1')
F.hide_layer('region_set_1_txt')
F.show_regions(paths.rpath('target_fields_8x8_coloredbyoffposition.reg'))
F.save(paths.fpath('Dame2001_APEXCMZ_offpositions_coloredbyoff.png'))
예제 #44
0
def sdss_astrombad(run, camcol, field, photolog_version='dr10'):
    """For a list of RUN, CAMCOL, FIELD, return whether each field has bad
    astrometry.

    Parameters
    ----------
    run, camcol, field : :class:`int` or array of :class:`int`
        Run, camcol and field.  If arrays are passed,
        all must have the same length.
    photolog_version : :class:`str`, optional
        Use this version of photolog to obtain the obBadfields.par file,
        if :envvar:`PHOTOLOG_DIR` is not set.

    Returns
    -------
    :class:`numpy.ndarray` of :class:`bool`
        Array of bool.  ``True`` indicates the field is bad.

    Raises
    ------
    ValueError
        If the sizes of the arrays don't match or if the array values are out
        of bounds.

    Notes
    -----
    Reads data from ``$PHOTOLOG_DIR/opfiles/opBadFields.par``.

    If there is a problem with one camcol, we assume a
    problem with all camcols.

    """
    from astropy.utils.data import download_file
    from .yanny import yanny
    global opbadfields
    #
    # Check inputs
    #
    if isinstance(run, int):
        #
        # Assume all inputs are integers & promote to arrays.
        #
        run = np.array([run], dtype=np.int64)
        camcol = np.array([camcol], dtype=np.int64)
        field = np.array([field], dtype=np.int64)
    else:
        #
        # Check that all inputs have the same shape.
        #
        if run.shape != camcol.shape:
            raise ValueError("camcol.shape does not match run.shape!")
        if run.shape != field.shape:
            raise ValueError("field.shape does not match run.shape!")
    #
    # Check ranges of parameters
    #
    if ((run < 0) | (run >= 2**16)).any():
        raise ValueError("run values are out-of-bounds!")
    if ((camcol < 1) | (camcol > 6)).any():
        raise ValueError("camcol values are out-of-bounds!")
    if ((field < 0) | (field >= 2**12)).any():
        raise ValueError("camcol values are out-of-bounds!")
    #
    # Read the file
    #
    if opbadfields is None:  # pragma: no cover
        if os.getenv('PHOTOLOG_DIR') is None:
            if (photolog_version == 'trunk' or
                    photolog_version.startswith('branches/')):
                iversion = photolog_version
            else:
                iversion = 'tags/'+photolog_version
            baseurl = ('https://svn.sdss.org/public/data/sdss/photolog/' +
                        '{0}/opfiles/opBadfields.par').format(iversion)
            filename = download_file(baseurl, cache=True)
        else:
            filename = os.path.join(os.getenv('PHOTOLOG_DIR'), 'opfiles',
                            'opBadfields.par')
        astrombadfile = yanny(filename)
        w = ((astrombadfile['BADFIELDS']['problem'] == 'astrom'.encode()) |
            (astrombadfile['BADFIELDS']['problem'] == 'rotator'.encode()))
        opbadfields = astrombadfile['BADFIELDS'][w]
    #
    # opbadfields already has astrom problems selected at this point
    #
    bad = np.zeros(run.shape, dtype=bool)
    for row in opbadfields:
        w = ((run == row['run']) &
        (field >= row['firstfield']) & (field < row['lastfield']))
        if w.any():
            bad[w] = True
    return bad
예제 #45
0
파일: srtm.py 프로젝트: srmchcy/pycraf
def _download(ilon, ilat):
    # download the tile to path

    srtm_dir = SrtmConf.srtm_dir
    server = SrtmConf.server

    tile_name = _hgt_filename(ilon, ilat)
    tile_path = os.path.join(srtm_dir, tile_name)

    # Unfortunately, each server has a different structure.
    # NASA stores them in sub-directories (by continents)
    # Panoramic-Viewfinders has a flat structure but has several hgt tiles
    # zipped in a file

    # Furthermore, we need to check against the available tiles
    # (ocean tiles and polar caps are not present); we also do this
    # in the _get_hgt_file function (because it's not only important
    # for downloading). However, we have to figure out, in which
    # subdirectory/zip-file a tile is located.

    if server.startswith('nasa_v'):

        if server == 'nasa_v1.0':
            base_url = 'https://dds.cr.usgs.gov/srtm/version1/'
        elif server == 'nasa_v2.1':
            base_url = 'https://dds.cr.usgs.gov/srtm/version2_1/SRTM3/'

        continent = _check_availability(ilon, ilat)

        # downloading
        full_url = base_url + continent + '/' + tile_name + '.zip'
        tmp_path = download_file(full_url)

        # move to srtm_dir
        shutil.move(tmp_path, tile_path + '.zip')

        # unpacking
        with ZipFile(tile_path + '.zip', 'r') as zf:
            zf.extractall(srtm_dir)

        os.remove(tile_path + '.zip')

    elif server == 'viewpano':

        base_url = 'http://viewfinderpanoramas.org/dem3/'

        zipfile_name = _check_availability(ilon, ilat)
        super_tile_path = os.path.join(srtm_dir, zipfile_name)

        # downloading
        full_url = base_url + zipfile_name
        tmp_path = download_file(full_url)

        # move to srtm_dir
        shutil.move(tmp_path, super_tile_path)

        # unpacking
        with ZipFile(super_tile_path, 'r') as zf:
            zf.extractall(srtm_dir)

        os.remove(super_tile_path)
예제 #46
0
파일: irsa.py 프로젝트: BrentLoving/SOUSA
def grabImage(ra,dec):
	imagelist = IrsaDust.get_image_list(SkyCoord(ra,dec).fk5, image_type="100um", radius=2*u.degree)
	image_file = download_file(imagelist[0],cache=True)
	image_data.append(fits.getdata(image_file, ext=0)) #gets image from IRSA database
예제 #47
0
def test_make_kurucz_tlusty_spectral_grid():

    # download the needed files
    url_loc = 'http://www.stsci.edu/~kgordon/beast/'
    kurucz_fname_dld = download_file('%s%s' %
                                     (url_loc, 'kurucz2004.grid.fits'))
    tlusty_fname_dld = download_file('%s%s' %
                                     (url_loc, 'tlusty.lowres.grid.fits'))
    filter_fname_dld = download_file('%s%s' % (url_loc, 'filters.hd5'))
    iso_fname_dld = download_file('%s%s' %
                                  (url_loc, 'beast_example_phat_iso.csv'))

    # rename files to have the correct extensions
    kurucz_fname = '%s.fits' % (kurucz_fname_dld)
    os.rename(kurucz_fname_dld, kurucz_fname)
    tlusty_fname = '%s.fits' % (tlusty_fname_dld)
    os.rename(tlusty_fname_dld, tlusty_fname)
    filter_fname = '%s.hd5' % (filter_fname_dld)
    os.rename(filter_fname_dld, filter_fname)
    iso_fname = '%s.csv' % (iso_fname_dld)
    os.rename(iso_fname_dld, iso_fname)

    # download cached version of spectral grid
    filename = download_file('%s%s' %
                             (url_loc, 'beast_example_phat_spec_grid.hd5'))

    hdf_cache = h5py.File(filename, 'r')

    ################
    # generate the same spectral grid from the code

    # read in the cached isochrones
    oiso = ezIsoch(iso_fname)

    # define the distance
    distances = [24.47]
    distance_unit = units.mag

    velocity = -300 * units.km / units.s
    redshift = (velocity / const.c).decompose().value

    # define the spectral libraries to use
    osl = stellib.Tlusty(filename=tlusty_fname) \
        + stellib.Kurucz(filename=kurucz_fname)

    filters = [
        'HST_WFC3_F275W', 'HST_WFC3_F336W', 'HST_ACS_WFC_F475W',
        'HST_ACS_WFC_F814W', 'HST_WFC3_F110W', 'HST_WFC3_F160W'
    ]
    add_spectral_properties_kwargs = dict(filternames=filters)

    spec_fname = '/tmp/beast_example_phat_spec_grid.hd5'
    spec_fname, g = make_spectral_grid(
        'test',
        oiso,
        osl=osl,
        redshift=redshift,
        distance=distances,
        distance_unit=distance_unit,
        spec_fname=spec_fname,
        filterLib=filter_fname,
        add_spectral_properties_kwargs=add_spectral_properties_kwargs)

    # open the hdf file with the specral grid
    hdf_new = h5py.File(spec_fname, 'r')

    # go through the file and check if it is exactly the same
    for sname in hdf_cache.keys():
        if isinstance(hdf_cache[sname], h5py.Dataset):
            cvalue = hdf_cache[sname]
            cvalue_new = hdf_new[sname]
            if cvalue.dtype.fields is None:
                np.testing.assert_equal(cvalue.value, cvalue_new.value,
                                        'testing %s' % (sname))
            else:
                for ckey in cvalue.dtype.fields.keys():
                    np.testing.assert_equal(cvalue.value[ckey],
                                            cvalue_new.value[ckey],
                                            'testing %s/%s' % (sname, ckey))
예제 #48
0
try:
    from urllib.error import URLError
except ImportError:
    from urllib2 import URLError

# Science modules
from random import gauss
import numpy as np
from astropy import units as u
from astropy.time import Time
from astropy.table import Table, Column, vstack
from astropy.utils.iers import IERS_A, IERS_A_URL, IERS
from astropy.utils.data import download_file
from astropy.coordinates import EarthLocation
try:
    iers_a_file = download_file(IERS_A_URL, cache=True)
    iers_a = IERS_A.open(iers_a_file)
    IERS.iers_table = iers_a
except:
    print('IERS_A_URL is temporarily unavailable')
    pass

# Custom modules
import dfn_utils
from orbital_utilities import OrbitObject, \
    random_compute_orbit_integration_EOE, \
    random_compute_orbit_ceplecha, random_compute_orbit_integration_posvel, \
    compute_cartesian_velocities_from_radiant#, compute_infinity_radiant

# Define some constants
mu_e = 3.986005000e14  #4418e14 # Earth's standard gravitational parameter (m3/s2)
예제 #49
0
    def setup_class(cls):
        # create dummy frame cache files
        cls.dummydir = "testing_frame_cache"
        os.makedirs(cls.dummydir, exist_ok=True)
        cls.dummy_cache_files = []
        for i in range(0, 5):
            dummyfile = os.path.join(cls.dummydir,
                                     "frame_cache_{0:01d}.cache".format(i))
            cls.dummy_cache_files.append(dummyfile)
            with open(dummyfile, "w") as fp:
                fp.write("blah\n")

        # create some fake data frames using lalapps_Makefakedata_v5
        mfd = shutil.which("lalapps_Makefakedata_v5")

        cls.fakedatadir = "testing_fake_frame_cache"
        cls.fakedatadetectors = ["H1", "L1"]
        cls.fakedatachannels = [
            "{}:FAKE_DATA".format(det) for det in cls.fakedatadetectors
        ]
        cls.fakedatastarts = [1000000000, 1000000000 + 86400 * 2]
        cls.fakedataduration = 86400

        os.makedirs(cls.fakedatadir, exist_ok=True)

        cls.fakedatabandwidth = 8  # Hz
        sqrtSn = 1e-29  # noise amplitude spectral density
        cls.fakedataname = "FAKEDATA"

        # Create two pulsars to inject: one isolated and one binary
        cls.fakepulsarpar = []

        # requirements for Makefakedata pulsar input files
        isolatedstr = """\
Alpha = {alpha}
Delta = {delta}
Freq = {f0}
f1dot = {f1}
f2dot = {f2}
refTime = {pepoch}
h0 = {h0}
cosi = {cosi}
psi = {psi}
phi0 = {phi0}
"""

        binarystr = """\
orbitasini = {asini}
orbitPeriod = {period}
orbitTp = {Tp}
orbitArgp = {argp}
orbitEcc = {ecc}
"""

        transientstr = """\
transientWindowType = {wintype}
transientStartTime = {tstart}
transientTau = {tau}
"""

        # FIRST PULSAR (ISOLATED)
        f0 = 6.9456 / 2.0  # source rotation frequency (Hz)
        f1 = -9.87654e-11 / 2.0  # source rotational frequency derivative (Hz/s)
        f2 = 2.34134e-18 / 2.0  # second frequency derivative (Hz/s^2)
        alpha = 0.0  # source right ascension (rads)
        delta = 0.5  # source declination (rads)
        pepoch = 1000000000  # frequency epoch (GPS)

        # GW parameters
        h0 = 3.0e-24  # GW amplitude
        phi0 = 1.0  # GW initial phase (rads)
        cosiota = 0.1  # cosine of inclination angle
        psi = 0.5  # GW polarisation angle (rads)

        mfddic = {
            "alpha": alpha,
            "delta": delta,
            "f0": 2 * f0,
            "f1": 2 * f1,
            "f2": 2 * f2,
            "pepoch": pepoch,
            "h0": h0,
            "cosi": cosiota,
            "psi": psi,
            "phi0": phi0,
        }

        cls.fakepulsarpar.append(PulsarParametersPy())
        cls.fakepulsarpar[0]["PSRJ"] = "J0000+0000"
        cls.fakepulsarpar[0]["H0"] = h0
        cls.fakepulsarpar[0]["PHI0"] = phi0 / 2.0
        cls.fakepulsarpar[0]["PSI"] = psi
        cls.fakepulsarpar[0]["COSIOTA"] = cosiota
        cls.fakepulsarpar[0]["F"] = [f0, f1, f2]
        cls.fakepulsarpar[0]["RAJ"] = alpha
        cls.fakepulsarpar[0]["DECJ"] = delta
        cls.fakepulsarpar[0]["PEPOCH"] = pepoch
        cls.fakepulsarpar[0]["EPHEM"] = "DE405"
        cls.fakepulsarpar[0]["UNITS"] = "TDB"

        cls.fakepardir = "testing_fake_par_dir"
        os.makedirs(cls.fakepardir, exist_ok=True)
        cls.fakeparfile = []
        cls.fakeparfile.append(os.path.join(cls.fakepardir, "J0000+0000.par"))
        cls.fakepulsarpar[0].pp_to_par(cls.fakeparfile[-1])

        injfile = os.path.join(cls.fakepardir, "inj.dat")
        with open(injfile, "w") as fp:
            fp.write("[Pulsar 1]\n")
            fp.write(isolatedstr.format(**mfddic))
            fp.write("\n")

        # SECOND PULSAR (BINARY SYSTEM)
        f0 = 3.8654321 / 2.0  # source rotation frequency (Hz)
        f1 = 9.87654e-13 / 2.0  # source rotational frequency derivative (Hz/s)
        f2 = -1.34134e-20 / 2.0  # second frequency derivative (Hz/s^2)
        alpha = 1.3  # source right ascension (rads)
        delta = -0.4  # source declination (rads)
        pepoch = 1000086400  # frequency epoch (GPS)

        # GW parameters
        h0 = 7.5e-25  # GW amplitude
        phi0 = 0.7  # GW initial phase (rads)
        cosiota = 0.6  # cosine of inclination angle
        psi = 1.1  # GW polarisation angle (rads)

        # binary parameters
        asini = 1.4  # projected semi-major axis (ls)
        period = 0.1 * 86400  # orbital period (s)
        Tp = 999992083  # time of periastron (GPS)
        argp = 0.0  # argument of perisatron (rad)
        ecc = 0.09  # the orbital eccentricity

        mfddic = {
            "alpha": alpha,
            "delta": delta,
            "f0": 2 * f0,
            "f1": 2 * f1,
            "f2": 2 * f2,
            "pepoch": pepoch,
            "h0": h0,
            "cosi": cosiota,
            "psi": psi,
            "phi0": phi0,
        }

        mfdbindic = {
            "asini": asini,
            "Tp": Tp,
            "period": period,
            "argp": argp,
            "ecc": ecc,
        }

        cls.fakepulsarpar.append(PulsarParametersPy())
        cls.fakepulsarpar[1]["PSRJ"] = "J1111+1111"
        cls.fakepulsarpar[1]["H0"] = h0
        cls.fakepulsarpar[1]["PHI0"] = phi0 / 2.0
        cls.fakepulsarpar[1]["PSI"] = psi
        cls.fakepulsarpar[1]["COSIOTA"] = cosiota
        cls.fakepulsarpar[1]["F"] = [f0, f1, f2]
        cls.fakepulsarpar[1]["RAJ"] = alpha
        cls.fakepulsarpar[1]["DECJ"] = delta
        cls.fakepulsarpar[1]["PEPOCH"] = pepoch
        cls.fakepulsarpar[1]["BINARY"] = "BT"
        cls.fakepulsarpar[1]["E"] = ecc
        cls.fakepulsarpar[1]["A1"] = asini
        cls.fakepulsarpar[1]["T0"] = Tp
        cls.fakepulsarpar[1]["OM"] = argp
        cls.fakepulsarpar[1]["PB"] = period
        cls.fakepulsarpar[1]["EPHEM"] = "DE405"
        cls.fakepulsarpar[1]["UNITS"] = "TDB"

        cls.fakeparfile.append(os.path.join(cls.fakepardir, "J1111+1111.par"))
        cls.fakepulsarpar[1].pp_to_par(cls.fakeparfile[-1])

        with open(injfile, "a") as fp:
            fp.write("[Pulsar 2]\n")
            fp.write(isolatedstr.format(**mfddic))
            fp.write(binarystr.format(**mfdbindic))
            fp.write("\n")

        # THIRD PULSAR (GLITCHING PULSAR)
        f0 = 5.3654321 / 2.0  # source rotation frequency (Hz)
        f1 = -4.57654e-10 / 2.0  # source rotational frequency derivative (Hz/s)
        f2 = 1.34134e-18 / 2.0  # second frequency derivative (Hz/s^2)
        alpha = 4.6  # source right ascension (rads)
        delta = -0.9  # source declination (rads)
        pepoch = 1000000000 + 1.5 * 86400  # frequency epoch (GPS)

        # glitch parameters
        df0 = 0.0001  # EM glitch frequency jump
        df1 = 1.2e-11  # EM glitch frequency derivative jump
        df2 = -4.5e-19  # EM glitch frequency second derivative jump
        dphi = 1.1  # EM glitch phase offset
        glepoch = pepoch  # glitch epoch

        # GW parameters
        h0 = 8.7e-25  # GW amplitude
        phi0 = 0.142  # GW initial phase (rads)
        cosiota = -0.3  # cosine of inclination angle
        psi = 0.52  # GW polarisation angle (rads)

        # binary parameters
        asini = 2.9  # projected semi-major axis (ls)
        period = 0.3 * 86400  # orbital period (s)
        Tp = 999995083  # time of periastron (GPS)
        argp = 0.5  # argument of perisatron (rad)
        ecc = 0.09  # the orbital eccentricity

        # for MFD I need to create this as two transient pulsars using a
        # rectangular window cutting before and after the glitch
        mfddic = {
            "alpha": alpha,
            "delta": delta,
            "f0": 2 * f0,
            "f1": 2 * f1,
            "f2": 2 * f2,
            "pepoch": pepoch,
            "h0": h0,
            "cosi": cosiota,
            "psi": psi,
            "phi0": phi0,
        }

        mfdbindic = {
            "asini": asini,
            "Tp": Tp,
            "period": period,
            "argp": argp,
            "ecc": ecc,
        }

        mfdtransientdic = {
            "wintype": "rect",
            "tstart": cls.fakedatastarts[0],
            "tau": 86400,
        }

        # signal before the glitch
        with open(injfile, "a") as fp:
            fp.write("[Pulsar 3]\n")
            fp.write(isolatedstr.format(**mfddic))
            fp.write(binarystr.format(**mfdbindic))
            fp.write(transientstr.format(**mfdtransientdic))
            fp.write("\n")

        mfddic["f0"] = 2 * (f0 + df0)
        mfddic["f1"] = 2 * (f1 + df1)
        mfddic["f2"] = 2 * (f2 + df2)
        mfddic["phi0"] = phi0 + 2 * dphi

        mfdtransientdic["tstart"] = cls.fakedatastarts[1]

        # signal after the glitch
        with open(injfile, "a") as fp:
            fp.write("[Pulsar 4]\n")
            fp.write(isolatedstr.format(**mfddic))
            fp.write(binarystr.format(**mfdbindic))
            fp.write(transientstr.format(**mfdtransientdic))

        cls.fakepulsarpar.append(PulsarParametersPy())
        cls.fakepulsarpar[2]["PSRJ"] = "J2222+2222"
        cls.fakepulsarpar[2]["H0"] = h0
        cls.fakepulsarpar[2]["PHI0"] = phi0 / 2.0
        cls.fakepulsarpar[2]["PSI"] = psi
        cls.fakepulsarpar[2]["COSIOTA"] = cosiota
        cls.fakepulsarpar[2]["F"] = [f0, f1, f2]
        cls.fakepulsarpar[2]["RAJ"] = alpha
        cls.fakepulsarpar[2]["DECJ"] = delta
        cls.fakepulsarpar[2]["PEPOCH"] = pepoch
        cls.fakepulsarpar[2]["BINARY"] = "BT"
        cls.fakepulsarpar[2]["E"] = ecc
        cls.fakepulsarpar[2]["A1"] = asini
        cls.fakepulsarpar[2]["T0"] = Tp
        cls.fakepulsarpar[2]["OM"] = argp
        cls.fakepulsarpar[2]["PB"] = period
        cls.fakepulsarpar[2]["EPHEM"] = "DE405"
        cls.fakepulsarpar[2]["UNITS"] = "TDB"
        cls.fakepulsarpar[2]["GLEP"] = [glepoch]
        cls.fakepulsarpar[2]["GLF0"] = [df0]
        cls.fakepulsarpar[2]["GLF1"] = [df1]
        cls.fakepulsarpar[2]["GLF2"] = [df2]
        cls.fakepulsarpar[2]["GLPH"] = [dphi / (2 * np.pi)]

        cls.fakeparfile.append(os.path.join(cls.fakepardir, "J2222+2222.par"))
        cls.fakepulsarpar[2].pp_to_par(cls.fakeparfile[-1])

        # set ephemeris files
        efile = download_file(DOWNLOAD_URL.format("earth00-40-DE405.dat.gz"),
                              cache=True)
        sfile = download_file(DOWNLOAD_URL.format("sun00-40-DE405.dat.gz"),
                              cache=True)

        for datastart in cls.fakedatastarts:
            for i in range(len(cls.fakedatachannels)):
                cmds = [
                    "-F",
                    cls.fakedatadir,
                    "--outFrChannels={}".format(cls.fakedatachannels[i]),
                    "-I",
                    cls.fakedatadetectors[i],
                    "--sqrtSX={0:.1e}".format(sqrtSn),
                    "-G",
                    str(datastart),
                    "--duration={}".format(cls.fakedataduration),
                    "--Band={}".format(cls.fakedatabandwidth),
                    "--fmin",
                    "0",
                    '--injectionSources="{}"'.format(injfile),
                    "--outLabel={}".format(cls.fakedataname),
                    '--ephemEarth="{}"'.format(efile),
                    '--ephemSun="{}"'.format(sfile),
                ]

                # run makefakedata
                sp.run([mfd] + cmds)

        # create dummy segment file
        cls.dummysegments = [(1000000000, 1000000600),
                             (1000000800, 1000000900)]
        cls.dummysegmentfile = os.path.join(cls.fakedatadir,
                                            "fakesegments.txt")
        with open(cls.dummysegmentfile, "w") as fp:
            for segs in cls.dummysegments:
                fp.write("{} {}\n".format(segs[0], segs[1]))
예제 #50
0
In this example we load the Daily Synoptic Maps produced by the HMI team. This
data is an interesting demonstration of SunPy's Map class as it is not in the
more common Helioprojective coordinate system, it is in Heliographic Carrington
coordinates and in a non-trivial Cylindrical Equal Area projection.
"""
import matplotlib.pyplot as plt

from astropy.utils.data import download_file

import sunpy.map

###############################################################################
# Let's download the file and read it into a Map
filename = download_file(
    'http://jsoc.stanford.edu/data/hmi/synoptic/hmi.Synoptic_Mr.2191.fits',
    cache=True)
syn_map = sunpy.map.Map(filename)

###############################################################################
# Let's fix the plot settings
syn_map.plot_settings['cmap'] = 'hmimag'
syn_map.plot_settings['norm'] = plt.Normalize(-1500, 1500)

###############################################################################
# Let's plot the results
fig = plt.figure(figsize=(12, 5))
axes = plt.subplot(projection=syn_map)
im = syn_map.plot()

# Set up the Sine Latitude Grid
예제 #51
0
def query_flag(ifo,
               name,
               start_time,
               end_time,
               source='any',
               server="segments.ligo.org",
               veto_definer=None,
               cache=False):
    """Return the times where the flag is active

    Parameters
    ----------
    ifo: string
        The interferometer to query (H1, L1).
    name: string
        The status flag to query from LOSC.
    start_time: int
        The starting gps time to begin querying from LOSC
    end_time: int
        The end gps time of the query
    source: str, Optional
        Choice between "GWOSC" or "dqsegdb". If dqsegdb, the server option may
        also be given. The default is to try GWOSC first then try dqsegdb.
    server: str, Optional
        The server path. Only used with dqsegdb atm.
    veto_definer: str, Optional
        The path to a veto definer to define groups of flags which
        themselves define a set of segments.
    cache: bool
        If true cache the query. Default is not to cache

    Returns
    ---------
    segments: glue.segments.segmentlist
        List of segments
    """
    info = name.split(':')
    if len(info) == 2:
        segment_name, version = info
    elif len(info) == 1:
        segment_name = info[0]
        version = 1

    flag_segments = segmentlist([])

    if source in ['GWOSC', 'any']:
        # Special cases as the LOSC convention is backwards from normal
        # LIGO / Virgo operation!!!!
        if (('_HW_INJ' in segment_name and 'NO' not in segment_name)
                or 'VETO' in segment_name):
            data = query_flag(ifo, 'DATA', start_time, end_time)

            if '_HW_INJ' in segment_name:
                name = 'NO_' + segment_name
            else:
                name = segment_name.replace('_VETO', '')

            negate = query_flag(ifo, name, start_time, end_time, cache=cache)
            return (data - negate).coalesce()

        duration = end_time - start_time
        url = GWOSC_URL.format(get_run(start_time + duration / 2), ifo,
                               segment_name, int(start_time), int(duration))

        try:
            fname = download_file(url, cache=cache)
            data = json.load(open(fname, 'r'))
            if 'segments' in data:
                flag_segments = data['segments']

        except Exception as e:
            msg = "Unable to find segments in GWOSC, check flag name or times"
            print(e)
            if source != 'any':
                raise ValueError(msg)
            else:
                print("Tried and failed GWOSC {}, trying dqsegdb", name)

            return query_flag(ifo,
                              segment_name,
                              start_time,
                              end_time,
                              source='dqsegdb',
                              server=server,
                              veto_definer=veto_definer)

    elif source == 'dqsegdb':
        # Let's not hard require dqsegdb to be installed if we never get here.
        try:
            from dqsegdb.apicalls import dqsegdbQueryTimes as query
        except ImportError:
            raise ValueError("Could not query flag. Install dqsegdb"
                             ":'pip install dqsegdb'")

        # The veto definer will allow the use of MACRO names
        # These directly correspond the name defined in the veto definer file.
        if veto_definer is not None:
            veto_def = parse_veto_definer(veto_definer)

        # We treat the veto definer name as if it were its own flag and
        # a process the flags in the veto definer
        if veto_definer is not None and segment_name in veto_def[ifo]:
            for flag in veto_def[ifo][segment_name]:
                segs = query("https", server, ifo, flag['name'],
                             flag['version'], 'active', int(start_time),
                             int(end_time))[0]['active']

                # Apply padding to each segment
                for rseg in segs:
                    seg_start = rseg[0] + flag['start_pad']
                    seg_end = rseg[1] + flag['end_pad']
                    flag_segments.append(segment(seg_start, seg_end))

            # Apply start / end of the veto definer segment
            send = segmentlist([segment([veto_def['start'], veto_def['end']])])
            flag_segments = (flag_segments.coalesce() & send)

        else:  # Standard case just query directly.
            try:
                segs = query("https", server, ifo, name, version, 'active',
                             int(start_time), int(end_time))[0]['active']
                for rseg in segs:
                    flag_segments.append(segment(rseg[0], rseg[1]))
            except Exception as e:
                print("Could not query flag, check name "
                      " (%s) or times" % segment_name)
                raise e

    else:
        raise ValueError("Source must be dqsegdb or GWOSC."
                         " Got {}".format(source))

    return segmentlist(flag_segments).coalesce()
예제 #52
0
파일: plot_adr.py 프로젝트: dmargala/tpcorr
# plots differntial refraction relative to 4000A and 5400A (adr_plot.pdf)

import numpy as np
import matplotlib as mpl
mpl.use('Agg')
mpl.rcParams.update({'font.size': 18})
mpl.rcParams.update({'savefig.dpi': 200})
mpl.rcParams.update({'savefig.bbox': 'tight'})

import matplotlib.pyplot as plt

from astropy.utils.data import download_file
from astropy.utils import iers
iers.IERS.iers_table = iers.IERS_A.open(
    download_file(iers.IERS_A_URL, cache=True))

import astropy.units as u
import astropy.coordinates
import astropy.time


def main():
    # observatory
    apo = astropy.coordinates.EarthLocation.of_site('apo')
    pressure = 72.80555 * u.kPa
    temperature = 15 * u.deg_C
    when = astropy.time.Time.now()

    # pointings
    num_alt_steps = 5
예제 #53
0
*By: Adrian Price-Whelan*

*License: BSD*

-------------------

"""

from astropy.io import fits

##############################################################################
# Download a FITS file:

from astropy.utils.data import download_file
fits_file = download_file(
    'http://data.astropy.org/tutorials/FITS-Header/input_file.fits',
    cache=True)

##############################################################################
# Look at contents of the FITS file

fits.info(fits_file)

##############################################################################
# Look at the headers of the two extensions:

print("Before modifications:")
print()
print("Extension 0:")
print(repr(fits.getheader(fits_file, 0)))
print()
예제 #54
0
    def _pvobs(self):
        '''calculates position and velocity of the observatory
           returns position/velocity in AU and AU/d in GCRS reference frame
        '''

        # convert obs position from WGS84 (lat long) to ITRF geocentric coords in AU
        xyz = self.location.to(u.AU).value

        # now we need to convert this position to Celestial Coords
        # specifically, the GCRS coords.
        # conversion from celestial to terrestrial coords given by
        # [TRS] = RPOM * R_3(ERA) * RC2I * [CRS]
        # where:
        # [CRS] is vector in GCRS (geocentric celestial system)
        # [TRS] is vector in ITRS (International Terrestrial Ref System)
        # ERA is earth rotation angle
        # RPOM = polar motion matrix

        tt = self.tt
        mjd = self.utc.mjd

        # we need the IERS values to correct for the precession/nutation of the Earth
        iers_tab = IERS.open()

        # Find UT1, which is needed to calculate ERA
        # uses IERS_B by default , for more recent times use IERS_A download
        try:
            ut1 = self.ut1
        except:
            try:
                iers_a_file = download_file(IERS_A_URL, cache=True)
                iers_a = IERS_A.open(iers_a_file)
                self.delta_ut1_utc = self.get_delta_ut1_utc(iers_a)
                ut1 = self.ut1
            except:
                # fall back to UTC with degraded accuracy
                warnings.warn(
                    'Cannot calculate UT1: using UTC with degraded accuracy')
                ut1 = self.utc

        # Gets x,y coords of Celestial Intermediate Pole (CIP) and CIO locator s
        # CIO = Celestial Intermediate Origin
        # Both in GCRS
        X, Y, S = erfa.xys00a(tt.jd1, tt.jd2)

        # Get dX and dY from IERS B
        dX = np.interp(mjd, iers_tab['MJD'], iers_tab['dX_2000A']) * u.arcsec
        dY = np.interp(mjd, iers_tab['MJD'], iers_tab['dY_2000A']) * u.arcsec

        # Get GCRS to CIRS matrix
        # can be used to convert to Celestial Intermediate Ref Sys
        # from GCRS.
        rc2i = erfa.c2ixys(X + dX.to(u.rad).value, Y + dY.to(u.rad).value, S)

        # Gets the Terrestrial Intermediate Origin (TIO) locator s'
        # Terrestrial Intermediate Ref Sys (TIRS) defined by TIO and CIP.
        # TIRS related to to CIRS by Earth Rotation Angle
        sp = erfa.sp00(tt.jd1, tt.jd2)

        # Get X and Y from IERS B
        # X and Y are
        xp = np.interp(mjd, iers_tab['MJD'], iers_tab['PM_x']) * u.arcsec
        yp = np.interp(mjd, iers_tab['MJD'], iers_tab['PM_y']) * u.arcsec

        # Get the polar motion matrix. Relates ITRF to TIRS.
        rpm = erfa.pom00(xp.to(u.rad).value, yp.to(u.rad).value, sp)

        # multiply ITRF position of obs by transpose of polar motion matrix
        # Gives Intermediate Ref Frame position of obs
        x, y, z = np.array([rpmMat.T.dot(xyz) for rpmMat in rpm]).T

        # Functions of Earth Rotation Angle, theta
        # Theta is angle bewtween TIO and CIO (along CIP)
        # USE UT1 here.
        theta = erfa.era00(ut1.jd1, ut1.jd2)
        S, C = np.sin(theta), np.cos(theta)

        # Position #GOT HERE
        pos = np.asarray([C * x - S * y, S * x + C * y, z]).T

        # multiply by inverse of GCRS to CIRS matrix
        # different methods for scalar times vs arrays
        if pos.ndim > 1:
            pos = np.array(
                [np.dot(rc2i[j].T, pos[j]) for j in range(len(pos))])
        else:
            pos = np.dot(rc2i.T, pos)

        # Velocity
        vel = np.asarray(
            [SR * (-S * x - C * y), SR * (C * x - S * y),
             np.zeros_like(x)]).T
        # multiply by inverse of GCRS to CIRS matrix
        if vel.ndim > 1:
            vel = np.array(
                [np.dot(rc2i[j].T, vel[j]) for j in range(len(pos))])
        else:
            vel = np.dot(rc2i.T, vel)

        #return position and velocity
        return pos, vel
예제 #55
0
파일: snrcat.py 프로젝트: tredsvc/gammapy
def _fetch_catalog_snrcat_snr_table(cache):
    url = "http://www.physics.umanitoba.ca/snr/SNRcat/SNRdownload.php?table=SNR"
    filename = download_file(url, cache=cache)

    # Note: currently the first line contains this comment, which we skip via `header_start=1`
    table = Table.read(filename, format="ascii.csv", header_start=1, delimiter=";")
    table.meta["url"] = url
    table.meta["version"] = _snrcat_parse_download_date(filename)

    # TODO: doesn't work properly ... don't call for now.
    # _snrcat_fix_na(table)

    table.rename_column("G", "Source_Name")

    table.rename_column("J2000_ra (hh:mm:ss)", "RAJ2000_str")
    table.rename_column("J2000_dec (dd:mm:ss)", "DEJ2000_str")

    data = Angle(table["RAJ2000_str"], unit="hour").deg
    index = table.index_column("RAJ2000_str") + 1
    table.add_column(Column(data=data, name="RAJ2000", unit="deg"), index=index)

    data = Angle(table["DEJ2000_str"], unit="deg").deg
    index = table.index_column("DEJ2000_str") + 1
    table.add_column(Column(data=data, name="DEJ2000", unit="deg"), index=index)

    radec = SkyCoord(table["RAJ2000"], table["DEJ2000"], unit="deg")
    galactic = radec.galactic
    table.add_column(Column(data=galactic.l.deg, name="GLON", unit="deg"))
    table.add_column(Column(data=galactic.b.deg, name="GLAT", unit="deg"))

    table.rename_column("age_min (yr)", "age_min")
    table["age_min"].unit = "year"
    table.rename_column("age_max (yr)", "age_max")
    table["age_max"].unit = "year"
    distance = np.mean([table["age_min"], table["age_max"]], axis=0)
    index = table.index_column("age_max") + 1
    table.add_column(Column(distance, name="age", unit="year"), index=index)

    table.rename_column("distance_min (kpc)", "distance_min")
    table["distance_min"].unit = "kpc"
    table.rename_column("distance_max (kpc)", "distance_max")
    table["distance_max"].unit = "kpc"
    distance = np.mean([table["distance_min"], table["distance_max"]], axis=0)
    index = table.index_column("distance_max") + 1
    table.add_column(Column(distance, name="distance", unit="kpc"), index=index)

    table.rename_column("size_radio", "diameter_radio_str")
    diameter_radio_mean = _snrcat_parse_diameter(table["diameter_radio_str"])
    index = table.index_column("diameter_radio_str") + 1
    table.add_column(
        Column(diameter_radio_mean, name="diameter_radio_mean", unit="arcmin"),
        index=index,
    )

    table.rename_column("size_X", "diameter_xray_str")
    diameter_xray_mean = _snrcat_parse_diameter(table["diameter_xray_str"])
    index = table.index_column("diameter_xray_str") + 1
    table.add_column(
        Column(diameter_xray_mean, name="diameter_xray_mean", unit="arcmin"),
        index=index,
    )

    table.rename_column("size_coarse (arcmin)", "diameter_mean")
    table["diameter_mean"].unit = "arcmin"

    table.rename_column("size_imprecise", "diameter_mean_is_imprecise")

    return table
예제 #56
0
파일: dq.py 프로젝트: veronica-villa/pycbc
def query_flag(ifo,
               segment_name,
               start_time,
               end_time,
               source='any',
               server="https://segments.ligo.org",
               veto_definer=None,
               cache=False):
    """Return the times where the flag is active

    Parameters
    ----------
    ifo: string
        The interferometer to query (H1, L1).
    segment_name: string
        The status flag to query from LOSC.
    start_time: int
        The starting gps time to begin querying from LOSC
    end_time: int
        The end gps time of the query
    source: str, Optional
        Choice between "GWOSC" or "dqsegdb". If dqsegdb, the server option may
        also be given. The default is to try GWOSC first then try dqsegdb.
    server: str, Optional
        The server path. Only used with dqsegdb atm.
    veto_definer: str, Optional
        The path to a veto definer to define groups of flags which
        themselves define a set of segments.
    cache: bool
        If true cache the query. Default is not to cache

    Returns
    ---------
    segments: glue.segments.segmentlist
        List of segments
    """
    flag_segments = segmentlist([])

    if source in ['GWOSC', 'any']:
        # Special cases as the LOSC convention is backwards from normal
        # LIGO / Virgo operation!!!!
        if (('_HW_INJ' in segment_name and 'NO' not in segment_name)
                or 'VETO' in segment_name):
            data = query_flag(ifo, 'DATA', start_time, end_time)

            if '_HW_INJ' in segment_name:
                name = 'NO_' + segment_name
            else:
                name = segment_name.replace('_VETO', '')

            negate = query_flag(ifo, name, start_time, end_time, cache=cache)
            return (data - negate).coalesce()

        duration = end_time - start_time
        try:
            url = GWOSC_URL.format(get_run(start_time + duration / 2),
                                   ifo, segment_name, int(start_time),
                                   int(duration))

            fname = download_file(url, cache=cache)
            data = json.load(open(fname, 'r'))
            if 'segments' in data:
                flag_segments = data['segments']

        except Exception as e:
            if source != 'any':
                print(e)
                raise ValueError("Unable to find {} segments in GWOSC, check "
                                 "flag name or times".format(segment_name))
            else:
                print("Tried and failed to find {} in GWOSC, trying dqsegdb".
                      format(segment_name))

            return query_flag(ifo,
                              segment_name,
                              start_time,
                              end_time,
                              source='dqsegdb',
                              server=server,
                              veto_definer=veto_definer)

    elif source == 'dqsegdb':
        # Let's not hard require dqsegdb to be installed if we never get here.
        try:
            from dqsegdb2.query import query_segments as query
        except ImportError:
            raise ValueError("Could not query flag. Install dqsegdb2"
                             ":'pip install dqsegdb2'")

        # The veto definer will allow the use of MACRO names
        # These directly correspond to the name in the veto definer file
        if veto_definer is not None:
            veto_def = parse_veto_definer(veto_definer, [ifo])

        # We treat the veto definer name as if it were its own flag and
        # process the flags in the veto definer
        if veto_definer is not None and segment_name in veto_def[ifo]:
            for flag in veto_def[ifo][segment_name]:
                partial = segmentlist([])
                segs = query(ifo + ':' + flag['full_name'],
                             int(start_time),
                             int(end_time),
                             host=server)['active']

                # Apply padding to each segment
                for rseg in segs:
                    seg_start = rseg[0] + flag['start_pad']
                    seg_end = rseg[1] + flag['end_pad']
                    partial.append(segment(seg_start, seg_end))

                # Limit to the veto definer stated valid region of this flag
                flag_start = flag['start']
                flag_end = flag['end']
                # Corner case: if the flag end time is 0 it means 'no limit'
                # so use the query end time
                if flag_end == 0:
                    flag_end = int(end_time)
                send = segmentlist([segment(flag_start, flag_end)])
                flag_segments += (partial.coalesce() & send)

        else:  # Standard case just query directly
            try:
                segs = query(':'.join([ifo, segment_name]),
                             int(start_time),
                             int(end_time),
                             host=server)['active']
                for rseg in segs:
                    flag_segments.append(segment(rseg[0], rseg[1]))
            except Exception as e:
                print("Could not query flag, check name "
                      "(%s) or times" % segment_name)
                raise e

        # dqsegdb output is not guaranteed to lie entirely within start
        # and end times, hence restrict to this range
        flag_segments = flag_segments.coalesce() & \
            segmentlist([segment(int(start_time), int(end_time))])

    else:
        raise ValueError("Source must be dqsegdb or GWOSC."
                         " Got {}".format(source))

    return segmentlist(flag_segments).coalesce()
예제 #57
0
"""

##############################################################################
# Use `astropy.utils.data` subpackage to download the FITS file used in this
# example. Also import `~astropy.table.Table` from the `astropy.table` subpackage
# and `astropy.io.fits`

from astropy.utils.data import download_file
from astropy.table import Table
from astropy.io import fits

##############################################################################
# Download a FITS file

event_filename = download_file('http://data.astropy.org/tutorials/FITS-tables/chandra_events.fits',
                               cache=True)

##############################################################################
# Display information about the contents of the FITS file.

fits.info(event_filename)

##############################################################################
# Extension 1, EVENTS, is a Table that contains information about each X-ray
# photon that hit Chandra's HETG-S detector.
#
# Use `~astropy.table.Table` to read the table

events = Table.read(event_filename, hdu=1)

##############################################################################
예제 #58
0
def fetch_fermi_catalog(catalog, extension=None):
    """Fetch Fermi catalog data.

    Reference: http://fermi.gsfc.nasa.gov/ssc/data/access/lat/.

    The Fermi catalogs contain the following relevant catalog HDUs:

    * 3FGL Catalog : LAT 4-year Point Source Catalog
        * ``LAT_Point_Source_Catalog`` Point Source Catalog Table.
        * ``ExtendedSources`` Extended Source Catalog Table.
    * 2FGL Catalog : LAT 2-year Point Source Catalog
        * ``LAT_Point_Source_Catalog`` Point Source Catalog Table.
        * ``ExtendedSources`` Extended Source Catalog Table.
    * 1FGL Catalog : LAT 1-year Point Source Catalog
        * ``LAT_Point_Source_Catalog`` Point Source Catalog Table.
    * 2FHL Catalog : Second Fermi-LAT Catalog of High-Energy Sources
        * ``Count Map`` AIT projection 2D count image
        * ``2FHL Source Catalog`` Main catalog
        * ``Extended Sources`` Extended Source Catalog Table
        * ``ROIs`` Regions of interest
    * 1FHL Catalog : First Fermi-LAT Catalog of Sources above 10 GeV
        * ``LAT_Point_Source_Catalog`` Point Source Catalog Table.
        * ``ExtendedSources`` Extended Source Catalog Table.
    * 2PC Catalog : LAT Second Catalog of Gamma-ray Pulsars
        * ``PULSAR_CATALOG`` Pulsar Catalog Table.
        * ``SPECTRAL`` Table of Pulsar Spectra Parameters.
        * ``OFF_PEAK`` Table for further Spectral and Flux data for the Catalog.

    Parameters
    ----------
    catalog : {'3FGL', '2FGL', '1FGL', '1FHL', '2FHL', '2PC'}
       Specifies which catalog to display.
    extension : str
        Specifies which catalog HDU to provide as a table (optional).
        See list of catalog HDUs above.

    Returns
    -------
    hdu_list (Default) : `~astropy.io.fits.HDUList`
        Catalog FITS HDU list (for access to full catalog dataset).
    catalog_table : `~astropy.table.Table`
        Catalog table for a selected hdu extension.

    Examples
    --------
    >>> from gammapy.catalog import fetch_fermi_catalog
    >>> fetch_fermi_catalog('2FGL')
        [<astropy.io.fits.hdu.image.PrimaryHDU at 0x3330790>,
         <astropy.io.fits.hdu.table.BinTableHDU at 0x338b990>,
         <astropy.io.fits.hdu.table.BinTableHDU at 0x3396450>,
         <astropy.io.fits.hdu.table.BinTableHDU at 0x339af10>,
         <astropy.io.fits.hdu.table.BinTableHDU at 0x339ff10>]

    >>> from gammapy.catalog import fetch_fermi_catalog
    >>> fetch_fermi_catalog('2FGL', 'LAT_Point_Source_Catalog')
        <Table rows=1873 names= ... >
    """
    BASE_URL = 'http://fermi.gsfc.nasa.gov/ssc/data/access/lat/'

    if catalog == '3FGL':
        url = BASE_URL + '4yr_catalog/gll_psc_v16.fit'
    elif catalog == '2FGL':
        url = BASE_URL + '2yr_catalog/gll_psc_v08.fit'
    elif catalog == '1FGL':
        url = BASE_URL + '1yr_catalog/gll_psc_v03.fit'
    elif catalog == '1FHL':
        url = BASE_URL + '1FHL/gll_psch_v07.fit'
    elif catalog == '2FHL':
        url = 'https://github.com/gammapy/gammapy-extra/raw/master/datasets/catalogs/fermi/gll_psch_v08.fit.gz'
    elif catalog == '2PC':
        url = BASE_URL + '2nd_PSR_catalog/2PC_catalog_v03.fits'
    else:
        ss = 'Invalid catalog: {0}\n'.format(catalog)
        raise ValueError(ss)

    filename = download_file(url, cache=True)
    hdu_list = fits.open(filename)

    if extension is None:
        return hdu_list

    # TODO: 2FHL doesn't have a 'CLASS1' column, just 'CLASS'
    # It's probably better if we make a `SourceCatalog` class
    # and then sub-class `FermiSourceCatalog` and `Fermi2FHLSourceCatalog`
    # and handle catalog-specific stuff in these classes,
    # trying to provide an as-uniform as possible API to the common catalogs.
    table = Table(hdu_list[extension].data)
    table['IS_GALACTIC'] = [_is_galactic(_) for _ in table['CLASS1']]

    return table
예제 #59
0
# Set up matplotlib and astropy
import matplotlib.pyplot as plt
from astropy.io import fits

# Download the fits file
from astropy.utils.data import download_file

file = 'http://data.astropy.org/tutorials/FITS-images/HorseHead.fits'

image = download_file(file, cache=True)

# simple image grabber
hdu_list = fits.open(image)
hdu_list.info()
new = hdu_list[0].data

# show the image
plt.imshow(new)
plt.colorbar()
plt.show()
from pathlib import Path
import tarfile

from astropy.utils.data import download_file

# Get single images
url = 'https://zenodo.org/record/3320113/files/combined_bias_100_images.fit.bz2?download=1'
download = download_file(url, show_progress=True)
p = Path(download)
p.rename('combined_bias_100_images.fit.bz2')

url = 'https://zenodo.org/record/3312535/files/dark-test-0002d1000.fit.bz2?download=1'
download = download_file(url, show_progress=True)
p = Path(download)
p.rename('dark-test-0002d1000.fit.bz2')

url = 'https://zenodo.org/record/3332818/files/combined_dark_300.000.fits.bz2?download=1'
download = download_file(url, show_progress=True)
p = Path(download)
p.rename('combined_dark_300.000.fits.bz2')

url = 'https://zenodo.org/record/4302262/files/combined_dark_exposure_1000.0.fit.bz2?download=1'
download = download_file(url, show_progress=True)
p = Path(download)
p.rename('combined_dark_exposure_1000.0.fit.bz2')

# Get the tarball for the smaller example and extract it
url = 'https://zenodo.org/record/3254683/files/example-cryo-LFC.tar.bz2?download=1'
download = download_file(url, show_progress=True, cache=True)
tarball = tarfile.open(download)
tarball.extractall('.')