Beispiel #1
0
def _urlopen(req, *args, **kwargs):
    """Wrapper for compatibility with old versions of Python."""
    url = req
    try:
        url = url.get_full_url()
    except AttributeError:
        pass

    # Note: 'context' parameter was only introduced starting
    # with versions 2.7.9 and 3.4.3 of Python.
    if __UNABLE_TO_VERIFY_SSL:
        del kwargs['context']

    opener = urlopen
    if url_util.parse(url).scheme == 's3':
        import spack.s3_handler
        opener = spack.s3_handler.open
    elif url_util.parse(url).scheme == 'gs':
        import spack.gcs_handler
        opener = spack.gcs_handler.gcs_open

    try:
        return opener(req, *args, **kwargs)
    except TypeError as err:
        # If the above fails because of 'context', call without 'context'.
        if 'context' in kwargs and 'context' in str(err):
            del kwargs['context']
        return opener(req, *args, **kwargs)
Beispiel #2
0
def createtarball(args):
    """create a binary package from an existing install"""

    # restrict matching to current environment if one is active
    env = ev.active_environment()

    output_location = None
    if args.directory:
        output_location = args.directory

        # User meant to provide a path to a local directory.
        # Ensure that they did not accidentally pass a URL.
        scheme = url_util.parse(output_location, scheme='<missing>').scheme
        if scheme != '<missing>':
            raise ValueError(
                '"--directory" expected a local path; got a URL, instead')

        # User meant to provide a path to a local directory.
        # Ensure that the mirror lookup does not mistake it for a named mirror.
        output_location = 'file://' + output_location

    elif args.mirror_name:
        output_location = args.mirror_name

        # User meant to provide the name of a preconfigured mirror.
        # Ensure that the mirror lookup actually returns a named mirror.
        result = spack.mirror.MirrorCollection().lookup(output_location)
        if result.name == "<unnamed>":
            raise ValueError('no configured mirror named "{name}"'.format(
                name=output_location))

    elif args.mirror_url:
        output_location = args.mirror_url

        # User meant to provide a URL for an anonymous mirror.
        # Ensure that they actually provided a URL.
        scheme = url_util.parse(output_location, scheme='<missing>').scheme
        if scheme == '<missing>':
            raise ValueError(
                '"{url}" is not a valid URL'.format(url=output_location))
    add_spec = ('package' in args.things_to_install)
    add_deps = ('dependencies' in args.things_to_install)

    _createtarball(env,
                   spec_yaml=args.spec_yaml,
                   packages=args.specs,
                   add_spec=add_spec,
                   add_deps=add_deps,
                   output_location=output_location,
                   signing_key=args.key,
                   force=args.force,
                   make_relative=args.rel,
                   unsigned=args.unsigned,
                   allow_root=args.allow_root,
                   rebuild_index=args.rebuild_index)
Beispiel #3
0
def push_to_url(local_file_path,
                remote_path,
                keep_original=True,
                extra_args=None):
    if sys.platform == "win32":
        if remote_path[1] == ':':
            remote_path = "file://" + remote_path
    remote_url = url_util.parse(remote_path)
    verify_ssl = spack.config.get('config:verify_ssl')

    if __UNABLE_TO_VERIFY_SSL and verify_ssl and uses_ssl(remote_url):
        warn_no_ssl_cert_checking()

    remote_file_path = url_util.local_file_path(remote_url)
    if remote_file_path is not None:
        mkdirp(os.path.dirname(remote_file_path))
        if keep_original:
            shutil.copy(local_file_path, remote_file_path)
        else:
            try:
                rename(local_file_path, remote_file_path)
            except OSError as e:
                if e.errno == errno.EXDEV:
                    # NOTE(opadron): The above move failed because it crosses
                    # filesystem boundaries.  Copy the file (plus original
                    # metadata), and then delete the original.  This operation
                    # needs to be done in separate steps.
                    shutil.copy2(local_file_path, remote_file_path)
                    os.remove(local_file_path)
                else:
                    raise

    elif remote_url.scheme == 's3':
        if extra_args is None:
            extra_args = {}

        remote_path = remote_url.path
        while remote_path.startswith('/'):
            remote_path = remote_path[1:]

        s3 = s3_util.create_s3_session(
            remote_url,
            connection=s3_util.get_mirror_connection(remote_url))  # noqa: E501
        s3.upload_file(local_file_path,
                       remote_url.netloc,
                       remote_path,
                       ExtraArgs=extra_args)

        if not keep_original:
            os.remove(local_file_path)

    elif remote_url.scheme == 'gs':
        gcs = gcs_util.GCSBlob(remote_url)
        gcs.upload_to_blob(local_file_path)
        if not keep_original:
            os.remove(local_file_path)

    else:
        raise NotImplementedError('Unrecognized URL scheme: {SCHEME}'.format(
            SCHEME=remote_url.scheme))
Beispiel #4
0
def create_s3_session(url):
    url = url_util.parse(url)
    if url.scheme != 's3':
        raise ValueError(
            'Can not create S3 session from URL with scheme: {SCHEME}'.format(
                SCHEME=url.scheme))

    # NOTE(opadron): import boto and friends as late as possible.  We don't
    # want to require boto as a dependency unless the user actually wants to
    # access S3 mirrors.
    from boto3 import Session
    from botocore.exceptions import ClientError

    session = Session()

    s3_client_args = {"use_ssl": spack.config.get('config:verify_ssl')}

    endpoint_url = os.environ.get('S3_ENDPOINT_URL')
    if endpoint_url:
        s3_client_args['endpoint_url'] = _parse_s3_endpoint_url(endpoint_url)

    # if no access credentials provided above, then access anonymously
    if not session.get_credentials():
        from botocore import UNSIGNED
        from botocore.client import Config

        s3_client_args["config"] = Config(signature_version=UNSIGNED)

    client = session.client('s3', **s3_client_args)
    client.ClientError = ClientError
    return client
Beispiel #5
0
    def fetch(self):
        if self.archive_file:
            tty.msg("Already downloaded %s" % self.archive_file)
            return

        parsed_url = url_util.parse(self.url)
        if parsed_url.scheme != 's3':
            raise FetchError('S3FetchStrategy can only fetch from s3:// urls.')

        tty.msg("Fetching %s" % self.url)

        basename = os.path.basename(parsed_url.path)

        with working_dir(self.stage.path):
            _, headers, stream = web_util.read_from_url(self.url)

            with open(basename, 'wb') as f:
                shutil.copyfileobj(stream, f)

            content_type = headers['Content-type']

        if content_type == 'text/html':
            warn_content_type_mismatch(self.archive_file or "the archive")

        if self.stage.save_filename:
            os.rename(os.path.join(self.stage.path, basename),
                      self.stage.save_filename)

        if not self.archive_file:
            raise FailedDownloadError(self.url)
Beispiel #6
0
def url_exists(url):
    url = url_util.parse(url)
    local_path = url_util.local_file_path(url)
    if local_path:
        return os.path.exists(local_path)

    if url.scheme == 's3':
        s3 = s3_util.create_s3_session(url)
        try:
            s3.get_object(Bucket=url.netloc, Key=url.path.lstrip('/'))
            return True
        except s3.ClientError as err:
            if err.response['Error']['Code'] == 'NoSuchKey':
                return False
            raise err

    elif url.scheme == 'gs':
        gcs = gcs_util.GCSBlob(url)
        return gcs.exists()

    # otherwise, just try to "read" from the URL, and assume that *any*
    # non-throwing response contains the resource represented by the URL
    try:
        read_from_url(url)
        return True
    except (SpackWebError, URLError):
        return False
Beispiel #7
0
def read_from_url(url, accept_content_type=None):
    url = url_util.parse(url)
    context = None

    verify_ssl = spack.config.get('config:verify_ssl')

    # Don't even bother with a context unless the URL scheme is one that uses
    # SSL certs.
    if uses_ssl(url):
        if verify_ssl:
            if __UNABLE_TO_VERIFY_SSL:
                # User wants SSL verification, but it cannot be provided.
                warn_no_ssl_cert_checking()
            else:
                # User wants SSL verification, and it *can* be provided.
                context = ssl.create_default_context()  # novm
        else:
            # User has explicitly indicated that they do not want SSL
            # verification.
            if not __UNABLE_TO_VERIFY_SSL:
                context = ssl._create_unverified_context()

    req = Request(url_util.format(url))
    content_type = None
    is_web_url = url.scheme in ('http', 'https')
    if accept_content_type and is_web_url:
        # Make a HEAD request first to check the content type.  This lets
        # us ignore tarballs and gigantic files.
        # It would be nice to do this with the HTTP Accept header to avoid
        # one round-trip.  However, most servers seem to ignore the header
        # if you ask for a tarball with Accept: text/html.
        req.get_method = lambda: "HEAD"
        resp = _urlopen(req, timeout=_timeout, context=context)

        content_type = get_header(resp.headers, 'Content-type')

    # Do the real GET request when we know it's just HTML.
    req.get_method = lambda: "GET"

    try:
        response = _urlopen(req, timeout=_timeout, context=context)
    except URLError as err:
        raise SpackWebError('Download failed: {ERROR}'.format(ERROR=str(err)))

    if accept_content_type and not is_web_url:
        content_type = get_header(response.headers, 'Content-type')

    reject_content_type = (accept_content_type and
                           (content_type is None or
                            not content_type.startswith(accept_content_type)))

    if reject_content_type:
        tty.debug("ignoring page {0}{1}{2}".format(
            url_util.format(url),
            " with content type " if content_type is not None else "",
            content_type or ""))

        return None, None, None

    return response.geturl(), response.headers, response
Beispiel #8
0
def push_url_from_mirror_url(mirror_url):
    """Given a mirror URL, return the URL on which to push binary packages."""
    scheme = url_util.parse(mirror_url, scheme='<missing>').scheme
    if scheme == '<missing>':
        raise ValueError('"{0}" is not a valid URL'.format(mirror_url))
    mirror = spack.mirror.MirrorCollection().lookup(mirror_url)
    return url_util.format(mirror.push_url)
Beispiel #9
0
def create_s3_session(url, connection={}):
    url = url_util.parse(url)
    if url.scheme != 's3':
        raise ValueError(
            'Can not create S3 session from URL with scheme: {SCHEME}'.format(
                SCHEME=url.scheme))

    # NOTE(opadron): import boto and friends as late as possible.  We don't
    # want to require boto as a dependency unless the user actually wants to
    # access S3 mirrors.
    from boto3 import Session  # type: ignore[import]
    from botocore.exceptions import ClientError  # type: ignore[import]

    s3_connection, s3_client_args = get_mirror_s3_connection_info(connection)

    session = Session(**s3_connection)
    # if no access credentials provided above, then access anonymously
    if not session.get_credentials():
        from botocore import UNSIGNED  # type: ignore[import]
        from botocore.client import Config  # type: ignore[import]

        s3_client_args["config"] = Config(signature_version=UNSIGNED)

    client = session.client('s3', **s3_client_args)
    client.ClientError = ClientError
    return client
Beispiel #10
0
def create(path, specs, skip_unstable_versions=False):
    """Create a directory to be used as a spack mirror, and fill it with
    package archives.

    Arguments:
        path: Path to create a mirror directory hierarchy in.
        specs: Any package versions matching these specs will be added \
            to the mirror.
        skip_unstable_versions: if true, this skips adding resources when
            they do not have a stable archive checksum (as determined by
            ``fetch_strategy.stable_target``)

    Return Value:
        Returns a tuple of lists: (present, mirrored, error)

        * present:  Package specs that were already present.
        * mirrored: Package specs that were successfully mirrored.
        * error:    Package specs that failed to mirror due to some error.

    This routine iterates through all known package versions, and
    it creates specs for those versions.  If the version satisfies any spec
    in the specs list, it is downloaded and added to the mirror.
    """
    parsed = url_util.parse(path)
    mirror_root = url_util.local_file_path(parsed)
    if not mirror_root:
        raise spack.error.SpackError(
            'MirrorCaches only work with file:// URLs')

    # automatically spec-ify anything in the specs array.
    specs = [
        s if isinstance(s, spack.spec.Spec) else spack.spec.Spec(s)
        for s in specs
    ]

    # Get the absolute path of the root before we start jumping around.
    if not os.path.isdir(mirror_root):
        try:
            mkdirp(mirror_root)
        except OSError as e:
            raise MirrorError("Cannot create directory '%s':" % mirror_root,
                              str(e))

    mirror_cache = spack.caches.MirrorCache(
        mirror_root, skip_unstable_versions=skip_unstable_versions)
    mirror_stats = MirrorStats()

    # Iterate through packages and download all safe tarballs for each
    for spec in specs:
        if spec.package.has_code:
            mirror_stats.next_spec(spec)
            _add_single_spec(spec, mirror_cache, mirror_stats)
        else:
            tty.msg("Skipping package {pkg} without code".format(
                pkg=spec.format("{name}{@version}")))

    return mirror_stats.stats()
Beispiel #11
0
 def mirror_id(self):
     repo_ref = self.commit or self.tag or self.branch or 'HEAD'
     if self.submodules:
         repo_ref += '_submodules'
     if self.get_full_repo:
         repo_ref += '_full'
     repo_path = url_util.parse(self.url).path
     result = os.path.sep.join(['git', repo_path, repo_ref])
     return result
Beispiel #12
0
def push_url_from_directory(output_directory):
    """Given a directory in the local filesystem, return the URL on
    which to push binary packages.
    """
    scheme = url_util.parse(output_directory, scheme='<missing>').scheme
    if scheme != '<missing>':
        raise ValueError('expected a local path, but got a URL instead')
    mirror_url = 'file://' + output_directory
    mirror = spack.mirror.MirrorCollection().lookup(mirror_url)
    return url_util.format(mirror.push_url)
Beispiel #13
0
def list_url(url):
    url = url_util.parse(url)

    local_path = url_util.local_file_path(url)
    if local_path:
        return os.listdir(local_path)

    if url.scheme == 's3':
        s3 = s3_util.create_s3_session(url)
        return list(
            set(key.split('/', 1)[0] for key in _iter_s3_prefix(s3, url)))
Beispiel #14
0
def remove_url(url, recursive=False):
    url = url_util.parse(url)

    local_path = url_util.local_file_path(url)
    if local_path:
        if recursive:
            shutil.rmtree(local_path)
        else:
            os.remove(local_path)
        return

    if url.scheme == 's3':
        # Try to find a mirror for potential connection information
        s3 = s3_util.create_s3_session(
            url, connection=s3_util.get_mirror_connection(url))  # noqa: E501
        bucket = url.netloc
        if recursive:
            # Because list_objects_v2 can only return up to 1000 items
            # at a time, we have to paginate to make sure we get it all
            prefix = url.path.strip('/')
            paginator = s3.get_paginator('list_objects_v2')
            pages = paginator.paginate(Bucket=bucket, Prefix=prefix)

            delete_request = {'Objects': []}
            for item in pages.search('Contents'):
                if not item:
                    continue

                delete_request['Objects'].append({'Key': item['Key']})

                # Make sure we do not try to hit S3 with a list of more
                # than 1000 items
                if len(delete_request['Objects']) >= 1000:
                    r = s3.delete_objects(Bucket=bucket, Delete=delete_request)
                    _debug_print_delete_results(r)
                    delete_request = {'Objects': []}

            # Delete any items that remain
            if len(delete_request['Objects']):
                r = s3.delete_objects(Bucket=bucket, Delete=delete_request)
                _debug_print_delete_results(r)
        else:
            s3.delete_object(Bucket=bucket, Key=url.path.lstrip('/'))
        return

    elif url.scheme == 'gs':
        if recursive:
            bucket = gcs_util.GCSBucket(url)
            bucket.destroy(recursive=recursive)
        else:
            blob = gcs_util.GCSBlob(url)
            blob.delete_blob()
        return
Beispiel #15
0
Datei: web.py Projekt: eic/spack
def remove_url(url):
    url = url_util.parse(url)

    local_path = url_util.local_file_path(url)
    if local_path:
        os.remove(local_path)
        return

    if url.scheme == 's3':
        s3 = s3_util.create_s3_session(url)
        s3.delete_object(Bucket=url.netloc, Key=url.path)
        return
Beispiel #16
0
def test_url_parse():

    parsed = url_util.parse('/path/to/resource', scheme='fake')
    assert(parsed.scheme == 'fake')
    assert(parsed.netloc == '')
    assert(parsed.path == '/path/to/resource')

    parsed = url_util.parse('file:///path/to/resource')
    assert(parsed.scheme == 'file')
    assert(parsed.netloc == '')
    assert(parsed.path == '/path/to/resource')

    parsed = url_util.parse('file:///path/to/resource', scheme='fake')
    assert(parsed.scheme == 'file')
    assert(parsed.netloc == '')
    assert(parsed.path == '/path/to/resource')

    parsed = url_util.parse('file://path/to/resource')
    assert(parsed.scheme == 'file')
    expected = convert_to_posix_path(
        os.path.abspath(
            posixpath.join('path', 'to', 'resource')))
    if is_windows:
        expected = expected.lstrip(drive)
    assert(parsed.path == expected)

    if is_windows:
        parsed = url_util.parse('file://%s\\path\\to\\resource' % drive)
        assert(parsed.scheme == 'file')
        expected = '/' + posixpath.join('path', 'to', 'resource')
        assert parsed.path == expected

    parsed = url_util.parse('https://path/to/resource')
    assert(parsed.scheme == 'https')
    assert(parsed.netloc == 'path')
    assert(parsed.path == '/to/resource')

    parsed = url_util.parse('gs://path/to/resource')
    assert(parsed.scheme == 'gs')
    assert(parsed.netloc == 'path')
    assert(parsed.path == '/to/resource')

    spack_root = spack.paths.spack_root
    parsed = url_util.parse('file://$spack')
    assert(parsed.scheme == 'file')

    if is_windows:
        spack_root = '/' + convert_to_posix_path(spack_root)

    assert(parsed.netloc + parsed.path == spack_root)
Beispiel #17
0
def create(path, specs):
    """Create a directory to be used as a spack mirror, and fill it with
    package archives.

    Arguments:
        path: Path to create a mirror directory hierarchy in.
        specs: Any package versions matching these specs will be added \
            to the mirror.

    Return Value:
        Returns a tuple of lists: (present, mirrored, error)

        * present:  Package specs that were already present.
        * mirrored: Package specs that were successfully mirrored.
        * error:    Package specs that failed to mirror due to some error.

    This routine iterates through all known package versions, and
    it creates specs for those versions.  If the version satisfies any spec
    in the specs list, it is downloaded and added to the mirror.
    """
    parsed = url_util.parse(path)
    mirror_root = url_util.local_file_path(parsed)
    if not mirror_root:
        raise spack.error.SpackError(
            'MirrorCaches only work with file:// URLs')

    # automatically spec-ify anything in the specs array.
    specs = [
        s if isinstance(s, spack.spec.Spec) else spack.spec.Spec(s)
        for s in specs
    ]

    # Get the absolute path of the root before we start jumping around.
    if not os.path.isdir(mirror_root):
        try:
            mkdirp(mirror_root)
        except OSError as e:
            raise MirrorError("Cannot create directory '%s':" % mirror_root,
                              str(e))

    mirror_cache = spack.caches.MirrorCache(mirror_root)
    mirror_stats = MirrorStats()
    try:
        spack.caches.mirror_cache = mirror_cache
        # Iterate through packages and download all safe tarballs for each
        for spec in specs:
            mirror_stats.next_spec(spec)
            add_single_spec(spec, mirror_root, mirror_stats)
    finally:
        spack.caches.mirror_cache = None

    return mirror_stats.stats()
Beispiel #18
0
Datei: web.py Projekt: eic/spack
def uses_ssl(parsed_url):
    if parsed_url.scheme == 'https':
        return True

    if parsed_url.scheme == 's3':
        endpoint_url = os.environ.get('S3_ENDPOINT_URL')
        if not endpoint_url:
            return True

        if url_util.parse(endpoint_url, scheme='https').scheme == 'https':
            return True

    return False
Beispiel #19
0
def spider(root, depth=0):
    """Gets web pages from a root URL.

       If depth is specified (e.g., depth=2), then this will also follow
       up to <depth> levels of links from the root.

       This will spawn processes to fetch the children, for much improved
       performance over a sequential fetch.

    """
    root = url_util.parse(root)
    pages, links = _spider(root, set(), root, 0, depth, False)
    return pages, links
Beispiel #20
0
def gcs_open(req, *args, **kwargs):
    """Open a reader stream to a blob object on GCS
    """
    import spack.util.gcs as gcs_util

    url = url_util.parse(req.get_full_url())
    gcsblob = gcs_util.GCSBlob(url)

    if not gcsblob.exists():
        raise web_util.SpackWebError('GCS blob {0} does not exist'.format(
            gcsblob.blob_path))
    stream = gcsblob.get_blob_byte_stream()
    headers = gcsblob.get_blob_headers()

    return urllib_response.addinfourl(stream, headers, url)
Beispiel #21
0
def test_url_parse():
    parsed = url_util.parse('/path/to/resource')
    assert (parsed.scheme == 'file')
    assert (parsed.netloc == '')
    assert (parsed.path == '/path/to/resource')

    parsed = url_util.parse('/path/to/resource', scheme='fake')
    assert (parsed.scheme == 'fake')
    assert (parsed.netloc == '')
    assert (parsed.path == '/path/to/resource')

    parsed = url_util.parse('file:///path/to/resource')
    assert (parsed.scheme == 'file')
    assert (parsed.netloc == '')
    assert (parsed.path == '/path/to/resource')

    parsed = url_util.parse('file:///path/to/resource', scheme='fake')
    assert (parsed.scheme == 'file')
    assert (parsed.netloc == '')
    assert (parsed.path == '/path/to/resource')

    parsed = url_util.parse('file://path/to/resource')
    assert (parsed.scheme == 'file')
    assert (parsed.netloc == '')
    expected = os.path.abspath(os.path.join('path', 'to', 'resource'))
    assert (parsed.path == expected)

    parsed = url_util.parse('https://path/to/resource')
    assert (parsed.scheme == 'https')
    assert (parsed.netloc == 'path')
    assert (parsed.path == '/to/resource')

    spack_root = spack.paths.spack_root
    parsed = url_util.parse('$spack')
    assert (parsed.scheme == 'file')
    assert (parsed.netloc == '')
    assert (parsed.path == spack_root)

    parsed = url_util.parse('/a/b/c/$spack')
    assert (parsed.scheme == 'file')
    assert (parsed.netloc == '')
    expected = os.path.abspath(
        os.path.join('/', 'a', 'b', 'c', './' + spack_root))
    assert (parsed.path == expected)
Beispiel #22
0
def _s3_open(url):
    parsed = url_util.parse(url)
    s3 = s3_util.create_s3_session(parsed)

    bucket = parsed.netloc
    key = parsed.path

    if key.startswith('/'):
        key = key[1:]

    obj = s3.get_object(Bucket=bucket, Key=key)

    # NOTE(opadron): Apply workaround here (see above)
    stream = WrapStream(obj['Body'])
    headers = obj['ResponseMetadata']['HTTPHeaders']

    return url, headers, stream
Beispiel #23
0
def uses_ssl(parsed_url):
    if parsed_url.scheme == 'https':
        return True

    if parsed_url.scheme == 's3':
        endpoint_url = os.environ.get('S3_ENDPOINT_URL')
        if not endpoint_url:
            return True

        if url_util.parse(endpoint_url, scheme='https').scheme == 'https':
            return True

    elif parsed_url.scheme == 'gs':
        tty.debug("(uses_ssl) GCS Blob is https")
        return True

    return False
Beispiel #24
0
def push_to_url(local_file_path, remote_path, **kwargs):
    keep_original = kwargs.get('keep_original', True)

    remote_url = url_util.parse(remote_path)
    verify_ssl = spack.config.get('config:verify_ssl')

    if __UNABLE_TO_VERIFY_SSL and verify_ssl and uses_ssl(remote_url):
        warn_no_ssl_cert_checking()

    remote_file_path = url_util.local_file_path(remote_url)
    if remote_file_path is not None:
        mkdirp(os.path.dirname(remote_file_path))
        if keep_original:
            shutil.copy(local_file_path, remote_file_path)
        else:
            try:
                os.rename(local_file_path, remote_file_path)
            except OSError as e:
                if e.errno == errno.EXDEV:
                    # NOTE(opadron): The above move failed because it crosses
                    # filesystem boundaries.  Copy the file (plus original
                    # metadata), and then delete the original.  This operation
                    # needs to be done in separate steps.
                    shutil.copy2(local_file_path, remote_file_path)
                    os.remove(local_file_path)

    elif remote_url.scheme == 's3':
        extra_args = kwargs.get('extra_args', {})

        remote_path = remote_url.path
        while remote_path.startswith('/'):
            remote_path = remote_path[1:]

        s3 = s3_util.create_s3_session(remote_url)
        s3.upload_file(local_file_path,
                       remote_url.netloc,
                       remote_path,
                       ExtraArgs=extra_args)

        if not keep_original:
            os.remove(local_file_path)

    else:
        raise NotImplementedError('Unrecognized URL scheme: {SCHEME}'.format(
            SCHEME=remote_url.scheme))
Beispiel #25
0
def list_url(url, recursive=False):
    url = url_util.parse(url)

    local_path = url_util.local_file_path(url)
    if local_path:
        if recursive:
            return list(_iter_local_prefix(local_path))
        return [subpath for subpath in os.listdir(local_path)
                if os.path.isfile(os.path.join(local_path, subpath))]

    if url.scheme == 's3':
        s3 = s3_util.create_s3_session(url)
        if recursive:
            return list(_iter_s3_prefix(s3, url))

        return list(set(
            key.split('/', 1)[0]
            for key in _iter_s3_prefix(s3, url)))
Beispiel #26
0
def create_s3_session(url, connection={}):
    url = url_util.parse(url)
    if url.scheme != 's3':
        raise ValueError(
            'Can not create S3 session from URL with scheme: {SCHEME}'.format(
                SCHEME=url.scheme))

    # NOTE(opadron): import boto and friends as late as possible.  We don't
    # want to require boto as a dependency unless the user actually wants to
    # access S3 mirrors.
    from boto3 import Session
    from botocore.exceptions import ClientError

    s3_connection = {}

    if connection:
        if connection['access_token']:
            s3_connection["aws_session_token"] = connection["access_token"]
        if connection["access_pair"][0]:
            s3_connection["aws_access_key_id"] = connection["access_pair"][0]
            s3_connection["aws_secret_access_key"] = connection["access_pair"][
                1]
        if connection["profile"]:
            s3_connection["profile_name"] = connection["profile"]

    session = Session(**s3_connection)
    s3_client_args = {"use_ssl": spack.config.get('config:verify_ssl')}

    endpoint_url = os.environ.get('S3_ENDPOINT_URL')
    if endpoint_url:
        s3_client_args['endpoint_url'] = _parse_s3_endpoint_url(endpoint_url)
    elif connection and 'endpoint_url' in connection:
        s3_client_args["endpoint_url"] = _parse_s3_endpoint_url(
            connection["endpoint_url"])  # noqa: E501
    # if no access credentials provided above, then access anonymously
    if not session.get_credentials():
        from botocore import UNSIGNED
        from botocore.client import Config

        s3_client_args["config"] = Config(signature_version=UNSIGNED)

    client = session.client('s3', **s3_client_args)
    client.ClientError = ClientError
    return client
Beispiel #27
0
Datei: web.py Projekt: eic/spack
def _urlopen(req, *args, **kwargs):
    """Wrapper for compatibility with old versions of Python."""
    url = req
    try:
        url = url.get_full_url()
    except AttributeError:
        pass

    # We don't pass 'context' parameter because it was only introduced starting
    # with versions 2.7.9 and 3.4.3 of Python.
    if 'context' in kwargs:
        del kwargs['context']

    opener = urlopen
    if url_util.parse(url).scheme == 's3':
        import spack.s3_handler
        opener = spack.s3_handler.open

    return opener(req, *args, **kwargs)
Beispiel #28
0
def list_url(url, recursive=False):
    url = url_util.parse(url)

    local_path = url_util.local_file_path(url)
    if local_path:
        if recursive:
            return list(_iter_local_prefix(local_path))
        return [
            subpath for subpath in os.listdir(local_path)
            if os.path.isfile(os.path.join(local_path, subpath))
        ]

    if url.scheme == 's3':
        s3 = s3_util.create_s3_session(
            url, connection=s3_util.get_mirror_connection(url))  # noqa: E501
        if recursive:
            return list(_iter_s3_prefix(s3, url))

        return list(
            set(key.split('/', 1)[0] for key in _iter_s3_prefix(s3, url)))

    elif url.scheme == 'gs':
        gcs = gcs_util.GCSBucket(url)
        return gcs.get_all_blobs(recursive=recursive)
Beispiel #29
0
def url_exists(url):
    url = url_util.parse(url)
    local_path = url_util.local_file_path(url)
    if local_path:
        return os.path.exists(local_path)

    if url.scheme == 's3':
        s3 = s3_util.create_s3_session(url)
        from botocore.exceptions import ClientError
        try:
            s3.get_object(Bucket=url.netloc, Key=url.path)
            return True
        except ClientError as err:
            if err.response['Error']['Code'] == 'NoSuchKey':
                return False
            raise err

    # otherwise, just try to "read" from the URL, and assume that *any*
    # non-throwing response contains the resource represented by the URL
    try:
        read_from_url(url)
        return True
    except URLError:
        return False
Beispiel #30
0
Datei: web.py Projekt: eic/spack
def spider(root_urls, depth=0, concurrency=32):
    """Get web pages from root URLs.

    If depth is specified (e.g., depth=2), then this will also follow
    up to <depth> levels of links from each root.

    Args:
        root_urls (str or list of str): root urls used as a starting point
            for spidering
        depth (int): level of recursion into links
        concurrency (int): number of simultaneous requests that can be sent

    Returns:
        A dict of pages visited (URL) mapped to their full text and the
        set of visited links.
    """
    # Cache of visited links, meant to be captured by the closure below
    _visited = set()

    def _spider(url, collect_nested):
        """Fetches URL and any pages it links to.

        Prints out a warning only if the root can't be fetched; it ignores
        errors with pages that the root links to.

        Args:
            url (str): url being fetched and searched for links
            collect_nested (bool): whether we want to collect arguments
                for nested spidering on the links found in this url

        Returns:
            A tuple of:
            - pages: dict of pages visited (URL) mapped to their full text.
            - links: set of links encountered while visiting the pages.
            - spider_args: argument for subsequent call to spider
        """
        pages = {}  # dict from page URL -> text content.
        links = set()  # set of all links seen on visited pages.
        subcalls = []

        try:
            response_url, _, response = read_from_url(url, 'text/html')
            if not response_url or not response:
                return pages, links, subcalls

            page = codecs.getreader('utf-8')(response).read()
            pages[response_url] = page

            # Parse out the links in the page
            link_parser = LinkParser()
            link_parser.feed(page)

            while link_parser.links:
                raw_link = link_parser.links.pop()
                abs_link = url_util.join(response_url,
                                         raw_link.strip(),
                                         resolve_href=True)
                links.add(abs_link)

                # Skip stuff that looks like an archive
                if any(raw_link.endswith(s) for s in ALLOWED_ARCHIVE_TYPES):
                    continue

                # Skip already-visited links
                if abs_link in _visited:
                    continue

                # If we're not at max depth, follow links.
                if collect_nested:
                    subcalls.append((abs_link, ))
                    _visited.add(abs_link)

        except URLError as e:
            tty.debug(str(e))

            if hasattr(e, 'reason') and isinstance(e.reason, ssl.SSLError):
                tty.warn("Spack was unable to fetch url list due to a "
                         "certificate verification problem. You can try "
                         "running spack -k, which will not check SSL "
                         "certificates. Use this at your own risk.")

        except HTMLParseError as e:
            # This error indicates that Python's HTML parser sucks.
            msg = "Got an error parsing HTML."

            # Pre-2.7.3 Pythons in particular have rather prickly HTML parsing.
            if sys.version_info[:3] < (2, 7, 3):
                msg += " Use Python 2.7.3 or newer for better HTML parsing."

            tty.warn(msg, url, "HTMLParseError: " + str(e))

        except Exception as e:
            # Other types of errors are completely ignored,
            # except in debug mode
            tty.debug("Error in _spider: %s:%s" % (type(e), str(e)),
                      traceback.format_exc())

        finally:
            tty.debug("SPIDER: [url={0}]".format(url))

        return pages, links, subcalls

    if isinstance(root_urls, six.string_types):
        root_urls = [root_urls]

    # Clear the local cache of visited pages before starting the search
    _visited.clear()

    current_depth = 0
    pages, links, spider_args = {}, set(), []

    collect = current_depth < depth
    for root in root_urls:
        root = url_util.parse(root)
        spider_args.append((root, collect))

    tp = multiprocessing.pool.ThreadPool(processes=concurrency)
    try:
        while current_depth <= depth:
            tty.debug("SPIDER: [depth={0}, max_depth={1}, urls={2}]".format(
                current_depth, depth, len(spider_args)))
            results = tp.map(llnl.util.lang.star(_spider), spider_args)
            spider_args = []
            collect = current_depth < depth
            for sub_pages, sub_links, sub_spider_args in results:
                sub_spider_args = [x + (collect, ) for x in sub_spider_args]
                pages.update(sub_pages)
                links.update(sub_links)
                spider_args.extend(sub_spider_args)

            current_depth += 1
    finally:
        tp.terminate()
        tp.join()

    return pages, links