Exemplo n.º 1
0
def push_to_url(local_file_path,
                remote_path,
                keep_original=True,
                extra_args=None):
    if sys.platform == "win32":
        if remote_path[1] == ':':
            remote_path = "file://" + remote_path
    remote_url = url_util.parse(remote_path)
    verify_ssl = spack.config.get('config:verify_ssl')

    if __UNABLE_TO_VERIFY_SSL and verify_ssl and uses_ssl(remote_url):
        warn_no_ssl_cert_checking()

    remote_file_path = url_util.local_file_path(remote_url)
    if remote_file_path is not None:
        mkdirp(os.path.dirname(remote_file_path))
        if keep_original:
            shutil.copy(local_file_path, remote_file_path)
        else:
            try:
                rename(local_file_path, remote_file_path)
            except OSError as e:
                if e.errno == errno.EXDEV:
                    # NOTE(opadron): The above move failed because it crosses
                    # filesystem boundaries.  Copy the file (plus original
                    # metadata), and then delete the original.  This operation
                    # needs to be done in separate steps.
                    shutil.copy2(local_file_path, remote_file_path)
                    os.remove(local_file_path)
                else:
                    raise

    elif remote_url.scheme == 's3':
        if extra_args is None:
            extra_args = {}

        remote_path = remote_url.path
        while remote_path.startswith('/'):
            remote_path = remote_path[1:]

        s3 = s3_util.create_s3_session(
            remote_url,
            connection=s3_util.get_mirror_connection(remote_url))  # noqa: E501
        s3.upload_file(local_file_path,
                       remote_url.netloc,
                       remote_path,
                       ExtraArgs=extra_args)

        if not keep_original:
            os.remove(local_file_path)

    elif remote_url.scheme == 'gs':
        gcs = gcs_util.GCSBlob(remote_url)
        gcs.upload_to_blob(local_file_path)
        if not keep_original:
            os.remove(local_file_path)

    else:
        raise NotImplementedError('Unrecognized URL scheme: {SCHEME}'.format(
            SCHEME=remote_url.scheme))
Exemplo n.º 2
0
Arquivo: web.py Projeto: key4hep/spack
def url_exists(url):
    url = url_util.parse(url)
    local_path = url_util.local_file_path(url)
    if local_path:
        return os.path.exists(local_path)

    if url.scheme == 's3':
        s3 = s3_util.create_s3_session(url)
        try:
            s3.get_object(Bucket=url.netloc, Key=url.path.lstrip('/'))
            return True
        except s3.ClientError as err:
            if err.response['Error']['Code'] == 'NoSuchKey':
                return False
            raise err

    elif url.scheme == 'gs':
        gcs = gcs_util.GCSBlob(url)
        return gcs.exists()

    # otherwise, just try to "read" from the URL, and assume that *any*
    # non-throwing response contains the resource represented by the URL
    try:
        read_from_url(url)
        return True
    except (SpackWebError, URLError):
        return False
Exemplo n.º 3
0
def push_keys(*mirrors, **kwargs):
    """
    Upload pgp public keys to the given mirrors
    """
    keys = kwargs.get('keys')
    regenerate_index = kwargs.get('regenerate_index', False)
    tmpdir = kwargs.get('tmpdir')
    remove_tmpdir = False

    keys = spack.util.gpg.public_keys(*(keys or []))

    try:
        for mirror in mirrors:
            push_url = getattr(mirror, 'push_url', mirror)
            keys_url = url_util.join(push_url, _build_cache_relative_path,
                                     _build_cache_keys_relative_path)
            keys_local = url_util.local_file_path(keys_url)

            verb = 'Writing' if keys_local else 'Uploading'
            tty.debug('{0} public keys to {1}'.format(
                verb, url_util.format(push_url)))

            if keys_local:  # mirror is local, don't bother with the tmpdir
                prefix = keys_local
                mkdirp(keys_local)
            else:
                # A tmp dir is created for the first mirror that is non-local.
                # On the off-hand chance that all the mirrors are local, then
                # we can avoid the need to create a tmp dir.
                if tmpdir is None:
                    tmpdir = tempfile.mkdtemp()
                    remove_tmpdir = True
                prefix = tmpdir

            for fingerprint in keys:
                tty.debug('    ' + fingerprint)
                filename = fingerprint + '.pub'

                export_target = os.path.join(prefix, filename)
                spack.util.gpg.export_keys(export_target, fingerprint)

                # If mirror is local, the above export writes directly to the
                # mirror (export_target points directly to the mirror).
                #
                # If not, then export_target is a tmpfile that needs to be
                # uploaded to the mirror.
                if not keys_local:
                    spack.util.web.push_to_url(export_target,
                                               url_util.join(
                                                   keys_url, filename),
                                               keep_original=False)

            if regenerate_index:
                if keys_local:
                    generate_key_index(keys_url)
                else:
                    generate_key_index(keys_url, tmpdir)
    finally:
        if remove_tmpdir:
            shutil.rmtree(tmpdir)
Exemplo n.º 4
0
def get_spec(spec=None, force=False):
    """
    Check if spec.yaml exists on mirrors and return it if it does
    """
    global _cached_specs
    urls = set()
    if spec is None:
        return {}
    specfile_name = tarball_name(spec, '.spec.yaml')

    if not spack.mirror.MirrorCollection():
        tty.debug("No Spack mirrors are currently configured")
        return {}

    if _cached_specs and spec in _cached_specs:
        return _cached_specs

    for mirror in spack.mirror.MirrorCollection().values():
        fetch_url_build_cache = url_util.join(mirror.fetch_url,
                                              _build_cache_relative_path)

        mirror_dir = url_util.local_file_path(fetch_url_build_cache)
        if mirror_dir:
            tty.debug('Finding buildcaches in {0}'.format(mirror_dir))
            link = url_util.join(fetch_url_build_cache, specfile_name)
            urls.add(link)

        else:
            tty.debug('Finding buildcaches at {0}'.format(
                url_util.format(fetch_url_build_cache)))
            link = url_util.join(fetch_url_build_cache, specfile_name)
            urls.add(link)

    return try_download_specs(urls=urls, force=force)
Exemplo n.º 5
0
def generate_key_index(key_prefix, tmpdir=None):
    """Create the key index page.

    Creates (or replaces) the "index.json" page at the location given in
    key_prefix.  This page contains an entry for each key (.pub) under
    key_prefix.
    """

    tty.debug(' '.join(('Retrieving key.pub files from',
                        url_util.format(key_prefix), 'to build key index')))

    fingerprints = (entry[:-4]
                    for entry in web_util.list_url(key_prefix, recursive=False)
                    if entry.endswith('.pub'))

    keys_local = url_util.local_file_path(key_prefix)
    if keys_local:
        target = os.path.join(keys_local, 'index.json')
    else:
        target = os.path.join(tmpdir, 'index.json')

    index = {
        'keys':
        dict((fingerprint, {}) for fingerprint in sorted(set(fingerprints)))
    }
    with open(target, 'w') as f:
        sjson.dump(index, f)

    if not keys_local:
        web_util.push_to_url(target,
                             url_util.join(key_prefix, 'index.json'),
                             keep_original=False,
                             extra_args={'ContentType': 'application/json'})
Exemplo n.º 6
0
def create(path, specs, skip_unstable_versions=False):
    """Create a directory to be used as a spack mirror, and fill it with
    package archives.

    Arguments:
        path: Path to create a mirror directory hierarchy in.
        specs: Any package versions matching these specs will be added \
            to the mirror.
        skip_unstable_versions: if true, this skips adding resources when
            they do not have a stable archive checksum (as determined by
            ``fetch_strategy.stable_target``)

    Return Value:
        Returns a tuple of lists: (present, mirrored, error)

        * present:  Package specs that were already present.
        * mirrored: Package specs that were successfully mirrored.
        * error:    Package specs that failed to mirror due to some error.

    This routine iterates through all known package versions, and
    it creates specs for those versions.  If the version satisfies any spec
    in the specs list, it is downloaded and added to the mirror.
    """
    parsed = url_util.parse(path)
    mirror_root = url_util.local_file_path(parsed)
    if not mirror_root:
        raise spack.error.SpackError(
            'MirrorCaches only work with file:// URLs')

    # automatically spec-ify anything in the specs array.
    specs = [
        s if isinstance(s, spack.spec.Spec) else spack.spec.Spec(s)
        for s in specs
    ]

    # Get the absolute path of the root before we start jumping around.
    if not os.path.isdir(mirror_root):
        try:
            mkdirp(mirror_root)
        except OSError as e:
            raise MirrorError("Cannot create directory '%s':" % mirror_root,
                              str(e))

    mirror_cache = spack.caches.MirrorCache(
        mirror_root, skip_unstable_versions=skip_unstable_versions)
    mirror_stats = MirrorStats()

    # Iterate through packages and download all safe tarballs for each
    for spec in specs:
        if spec.package.has_code:
            mirror_stats.next_spec(spec)
            _add_single_spec(spec, mirror_cache, mirror_stats)
        else:
            tty.msg("Skipping package {pkg} without code".format(
                pkg=spec.format("{name}{@version}")))

    return mirror_stats.stats()
Exemplo n.º 7
0
def get_specs(force=False):
    """
    Get spec.yaml's for build caches available on mirror
    """
    global _cached_specs

    if _cached_specs:
        tty.debug("Using previously-retrieved specs")
        return _cached_specs

    if not spack.mirror.MirrorCollection():
        tty.warn("No Spack mirrors are currently configured")
        return {}

    urls = set()
    for mirror in spack.mirror.MirrorCollection().values():
        fetch_url_build_cache = url_util.join(mirror.fetch_url,
                                              _build_cache_relative_path)

        mirror_dir = url_util.local_file_path(fetch_url_build_cache)
        if mirror_dir:
            tty.msg("Finding buildcaches in %s" % mirror_dir)
            if os.path.exists(mirror_dir):
                files = os.listdir(mirror_dir)
                for file in files:
                    if re.search('spec.yaml', file):
                        link = url_util.join(fetch_url_build_cache, file)
                        urls.add(link)
        else:
            tty.msg("Finding buildcaches at %s" %
                    url_util.format(fetch_url_build_cache))
            p, links = web_util.spider(
                url_util.join(fetch_url_build_cache, 'index.html'))
            for link in links:
                if re.search("spec.yaml", link):
                    urls.add(link)

    _cached_specs = []
    for link in urls:
        with Stage(link, name="build_cache", keep=True) as stage:
            if force and os.path.exists(stage.save_filename):
                os.remove(stage.save_filename)
            if not os.path.exists(stage.save_filename):
                try:
                    stage.fetch()
                except fs.FetchError:
                    continue
            with open(stage.save_filename, 'r') as f:
                # read the spec from the build cache file. All specs
                # in build caches are concrete (as they are built) so
                # we need to mark this spec concrete on read-in.
                spec = Spec.from_yaml(f)
                spec._mark_concrete()
                _cached_specs.append(spec)

    return _cached_specs
Exemplo n.º 8
0
def list_url(url):
    url = url_util.parse(url)

    local_path = url_util.local_file_path(url)
    if local_path:
        return os.listdir(local_path)

    if url.scheme == 's3':
        s3 = s3_util.create_s3_session(url)
        return list(
            set(key.split('/', 1)[0] for key in _iter_s3_prefix(s3, url)))
Exemplo n.º 9
0
def remove_url(url, recursive=False):
    url = url_util.parse(url)

    local_path = url_util.local_file_path(url)
    if local_path:
        if recursive:
            shutil.rmtree(local_path)
        else:
            os.remove(local_path)
        return

    if url.scheme == 's3':
        # Try to find a mirror for potential connection information
        s3 = s3_util.create_s3_session(
            url, connection=s3_util.get_mirror_connection(url))  # noqa: E501
        bucket = url.netloc
        if recursive:
            # Because list_objects_v2 can only return up to 1000 items
            # at a time, we have to paginate to make sure we get it all
            prefix = url.path.strip('/')
            paginator = s3.get_paginator('list_objects_v2')
            pages = paginator.paginate(Bucket=bucket, Prefix=prefix)

            delete_request = {'Objects': []}
            for item in pages.search('Contents'):
                if not item:
                    continue

                delete_request['Objects'].append({'Key': item['Key']})

                # Make sure we do not try to hit S3 with a list of more
                # than 1000 items
                if len(delete_request['Objects']) >= 1000:
                    r = s3.delete_objects(Bucket=bucket, Delete=delete_request)
                    _debug_print_delete_results(r)
                    delete_request = {'Objects': []}

            # Delete any items that remain
            if len(delete_request['Objects']):
                r = s3.delete_objects(Bucket=bucket, Delete=delete_request)
                _debug_print_delete_results(r)
        else:
            s3.delete_object(Bucket=bucket, Key=url.path.lstrip('/'))
        return

    elif url.scheme == 'gs':
        if recursive:
            bucket = gcs_util.GCSBucket(url)
            bucket.destroy(recursive=recursive)
        else:
            blob = gcs_util.GCSBlob(url)
            blob.delete_blob()
        return
Exemplo n.º 10
0
Arquivo: web.py Projeto: eic/spack
def remove_url(url):
    url = url_util.parse(url)

    local_path = url_util.local_file_path(url)
    if local_path:
        os.remove(local_path)
        return

    if url.scheme == 's3':
        s3 = s3_util.create_s3_session(url)
        s3.delete_object(Bucket=url.netloc, Key=url.path)
        return
Exemplo n.º 11
0
def get_keys(install=False, trust=False, force=False):
    """
    Get pgp public keys available on mirror
    with suffix .key or .pub
    """
    if not spack.mirror.MirrorCollection():
        tty.die("Please add a spack mirror to allow " +
                "download of build caches.")

    keys = set()

    for mirror in spack.mirror.MirrorCollection().values():
        fetch_url_build_cache = url_util.join(mirror.fetch_url,
                                              _build_cache_relative_path)

        mirror_dir = url_util.local_file_path(fetch_url_build_cache)
        if mirror_dir:
            tty.msg("Finding public keys in %s" % mirror_dir)
            files = os.listdir(str(mirror_dir))
            for file in files:
                if re.search(r'\.key', file) or re.search(r'\.pub', file):
                    link = url_util.join(fetch_url_build_cache, file)
                    keys.add(link)
        else:
            tty.msg("Finding public keys at %s" %
                    url_util.format(fetch_url_build_cache))
            # For s3 mirror need to request index.html directly
            p, links = web_util.spider(url_util.join(fetch_url_build_cache,
                                                     'index.html'),
                                       depth=1)

            for link in links:
                if re.search(r'\.key', link) or re.search(r'\.pub', link):
                    keys.add(link)

        for link in keys:
            with Stage(link, name="build_cache", keep=True) as stage:
                if os.path.exists(stage.save_filename) and force:
                    os.remove(stage.save_filename)
                if not os.path.exists(stage.save_filename):
                    try:
                        stage.fetch()
                    except fs.FetchError:
                        continue
            tty.msg('Found key %s' % link)
            if install:
                if trust:
                    Gpg.trust(stage.save_filename)
                    tty.msg('Added this key to trusted keys.')
                else:
                    tty.msg('Will not add this key to trusted keys.'
                            'Use -t to install all downloaded keys')
Exemplo n.º 12
0
def create(path, specs):
    """Create a directory to be used as a spack mirror, and fill it with
    package archives.

    Arguments:
        path: Path to create a mirror directory hierarchy in.
        specs: Any package versions matching these specs will be added \
            to the mirror.

    Return Value:
        Returns a tuple of lists: (present, mirrored, error)

        * present:  Package specs that were already present.
        * mirrored: Package specs that were successfully mirrored.
        * error:    Package specs that failed to mirror due to some error.

    This routine iterates through all known package versions, and
    it creates specs for those versions.  If the version satisfies any spec
    in the specs list, it is downloaded and added to the mirror.
    """
    parsed = url_util.parse(path)
    mirror_root = url_util.local_file_path(parsed)
    if not mirror_root:
        raise spack.error.SpackError(
            'MirrorCaches only work with file:// URLs')

    # automatically spec-ify anything in the specs array.
    specs = [
        s if isinstance(s, spack.spec.Spec) else spack.spec.Spec(s)
        for s in specs
    ]

    # Get the absolute path of the root before we start jumping around.
    if not os.path.isdir(mirror_root):
        try:
            mkdirp(mirror_root)
        except OSError as e:
            raise MirrorError("Cannot create directory '%s':" % mirror_root,
                              str(e))

    mirror_cache = spack.caches.MirrorCache(mirror_root)
    mirror_stats = MirrorStats()
    try:
        spack.caches.mirror_cache = mirror_cache
        # Iterate through packages and download all safe tarballs for each
        for spec in specs:
            mirror_stats.next_spec(spec)
            add_single_spec(spec, mirror_root, mirror_stats)
    finally:
        spack.caches.mirror_cache = None

    return mirror_stats.stats()
Exemplo n.º 13
0
def test_url_local_file_path():
    spack_root = spack.paths.spack_root
    sep = os.path.sep
    lfp = url_util.local_file_path('/a/b/c.txt')
    assert (lfp == sep + os.path.join('a', 'b', 'c.txt'))

    lfp = url_util.local_file_path('file:///a/b/c.txt')
    assert (lfp == sep + os.path.join('a', 'b', 'c.txt'))

    if is_windows:
        lfp = url_util.local_file_path('file://a/b/c.txt')
        expected = os.path.abspath(os.path.join('a', 'b', 'c.txt'))
        assert (lfp == expected)

    lfp = url_util.local_file_path('file://$spack/a/b/c.txt')
    expected = os.path.abspath(os.path.join(spack_root, 'a', 'b', 'c.txt'))
    assert (lfp == expected)

    if is_windows:
        lfp = url_util.local_file_path('file:///$spack/a/b/c.txt')
        expected = os.path.abspath(os.path.join(spack_root, 'a', 'b', 'c.txt'))
        assert (lfp == expected)

    lfp = url_util.local_file_path('file://$spack/a/b/c.txt')
    expected = os.path.abspath(os.path.join(spack_root, 'a', 'b', 'c.txt'))
    assert (lfp == expected)

    # not a file:// URL - so no local file path
    lfp = url_util.local_file_path('http:///a/b/c.txt')
    assert (lfp is None)

    lfp = url_util.local_file_path('http://a/b/c.txt')
    assert (lfp is None)

    lfp = url_util.local_file_path('http:///$spack/a/b/c.txt')
    assert (lfp is None)

    lfp = url_util.local_file_path('http://$spack/a/b/c.txt')
    assert (lfp is None)
Exemplo n.º 14
0
def test_url_local_file_path():
    spack_root = os.path.abspath(os.environ['SPACK_ROOT'])

    lfp = url_util.local_file_path('/a/b/c.txt')
    assert (lfp == '/a/b/c.txt')

    lfp = url_util.local_file_path('file:///a/b/c.txt')
    assert (lfp == '/a/b/c.txt')

    lfp = url_util.local_file_path('file://a/b/c.txt')
    expected = os.path.abspath(os.path.join('a', 'b', 'c.txt'))
    assert (lfp == expected)

    lfp = url_util.local_file_path('$spack/a/b/c.txt')
    expected = os.path.abspath(os.path.join(spack_root, 'a', 'b', 'c.txt'))
    assert (lfp == expected)

    lfp = url_util.local_file_path('file:///$spack/a/b/c.txt')
    expected = os.path.abspath(os.path.join(spack_root, 'a', 'b', 'c.txt'))
    assert (lfp == expected)

    lfp = url_util.local_file_path('file://$spack/a/b/c.txt')
    expected = os.path.abspath(os.path.join(spack_root, 'a', 'b', 'c.txt'))
    assert (lfp == expected)

    # not a file:// URL - so no local file path
    lfp = url_util.local_file_path('http:///a/b/c.txt')
    assert (lfp is None)

    lfp = url_util.local_file_path('http://a/b/c.txt')
    assert (lfp is None)

    lfp = url_util.local_file_path('http:///$spack/a/b/c.txt')
    assert (lfp is None)

    lfp = url_util.local_file_path('http://$spack/a/b/c.txt')
    assert (lfp is None)
Exemplo n.º 15
0
Arquivo: web.py Projeto: timkphd/spack
def push_to_url(local_file_path, remote_path, **kwargs):
    keep_original = kwargs.get('keep_original', True)

    remote_url = url_util.parse(remote_path)
    verify_ssl = spack.config.get('config:verify_ssl')

    if __UNABLE_TO_VERIFY_SSL and verify_ssl and uses_ssl(remote_url):
        warn_no_ssl_cert_checking()

    remote_file_path = url_util.local_file_path(remote_url)
    if remote_file_path is not None:
        mkdirp(os.path.dirname(remote_file_path))
        if keep_original:
            shutil.copy(local_file_path, remote_file_path)
        else:
            try:
                os.rename(local_file_path, remote_file_path)
            except OSError as e:
                if e.errno == errno.EXDEV:
                    # NOTE(opadron): The above move failed because it crosses
                    # filesystem boundaries.  Copy the file (plus original
                    # metadata), and then delete the original.  This operation
                    # needs to be done in separate steps.
                    shutil.copy2(local_file_path, remote_file_path)
                    os.remove(local_file_path)

    elif remote_url.scheme == 's3':
        extra_args = kwargs.get('extra_args', {})

        remote_path = remote_url.path
        while remote_path.startswith('/'):
            remote_path = remote_path[1:]

        s3 = s3_util.create_s3_session(remote_url)
        s3.upload_file(local_file_path,
                       remote_url.netloc,
                       remote_path,
                       ExtraArgs=extra_args)

        if not keep_original:
            os.remove(local_file_path)

    else:
        raise NotImplementedError('Unrecognized URL scheme: {SCHEME}'.format(
            SCHEME=remote_url.scheme))
Exemplo n.º 16
0
def list_url(url, recursive=False):
    url = url_util.parse(url)

    local_path = url_util.local_file_path(url)
    if local_path:
        if recursive:
            return list(_iter_local_prefix(local_path))
        return [subpath for subpath in os.listdir(local_path)
                if os.path.isfile(os.path.join(local_path, subpath))]

    if url.scheme == 's3':
        s3 = s3_util.create_s3_session(url)
        if recursive:
            return list(_iter_s3_prefix(s3, url))

        return list(set(
            key.split('/', 1)[0]
            for key in _iter_s3_prefix(s3, url)))
Exemplo n.º 17
0
def get_specs(force=False, allarch=False):
    """
    Get spec.yaml's for build caches available on mirror
    """
    arch = architecture.Arch(architecture.platform(), 'default_os',
                             'default_target')
    arch_pattern = ('([^-]*-[^-]*-[^-]*)')
    if not allarch:
        arch_pattern = '(%s-%s-[^-]*)' % (arch.platform, arch.os)

    regex_pattern = '%s(.*)(spec.yaml$)' % (arch_pattern)
    arch_re = re.compile(regex_pattern)

    if not spack.mirror.MirrorCollection():
        tty.debug("No Spack mirrors are currently configured")
        return {}

    urls = set()
    for mirror in spack.mirror.MirrorCollection().values():
        fetch_url_build_cache = url_util.join(mirror.fetch_url,
                                              _build_cache_relative_path)

        mirror_dir = url_util.local_file_path(fetch_url_build_cache)
        if mirror_dir:
            tty.msg("Finding buildcaches in %s" % mirror_dir)
            if os.path.exists(mirror_dir):
                files = os.listdir(mirror_dir)
                for file in files:
                    m = arch_re.search(file)
                    if m:
                        link = url_util.join(fetch_url_build_cache, file)
                        urls.add(link)
        else:
            tty.msg("Finding buildcaches at %s" %
                    url_util.format(fetch_url_build_cache))
            p, links = web_util.spider(
                url_util.join(fetch_url_build_cache, 'index.html'))
            for link in links:
                m = arch_re.search(link)
                if m:
                    urls.add(link)

    return try_download_specs(urls=urls, force=force)
Exemplo n.º 18
0
Arquivo: web.py Projeto: mcuma/spack
def list_url(url, recursive=False):
    url = url_util.parse(url)

    local_path = url_util.local_file_path(url)
    if local_path:
        if recursive:
            return list(_iter_local_prefix(local_path))
        return [
            subpath for subpath in os.listdir(local_path)
            if os.path.isfile(os.path.join(local_path, subpath))
        ]

    if url.scheme == 's3':
        s3 = s3_util.create_s3_session(
            url, connection=s3_util.get_mirror_connection(url))  # noqa: E501
        if recursive:
            return list(_iter_s3_prefix(s3, url))

        return list(
            set(key.split('/', 1)[0] for key in _iter_s3_prefix(s3, url)))

    elif url.scheme == 'gs':
        gcs = gcs_util.GCSBucket(url)
        return gcs.get_all_blobs(recursive=recursive)
Exemplo n.º 19
0
def url_exists(url):
    url = url_util.parse(url)
    local_path = url_util.local_file_path(url)
    if local_path:
        return os.path.exists(local_path)

    if url.scheme == 's3':
        s3 = s3_util.create_s3_session(url)
        from botocore.exceptions import ClientError
        try:
            s3.get_object(Bucket=url.netloc, Key=url.path)
            return True
        except ClientError as err:
            if err.response['Error']['Code'] == 'NoSuchKey':
                return False
            raise err

    # otherwise, just try to "read" from the URL, and assume that *any*
    # non-throwing response contains the resource represented by the URL
    try:
        read_from_url(url)
        return True
    except URLError:
        return False