Beispiel #1
0
def test_archive_file_errors(tmpdir, mock_archive, _fetch_method):
    """Ensure FetchStrategy commands may only be used as intended"""
    testpath = str(tmpdir)
    with spack.config.override('config:url_fetch_method', _fetch_method):
        fetcher = fs.URLFetchStrategy(url=mock_archive.url)
        assert fetcher is not None
        with pytest.raises(fs.FailedDownloadError):
            with Stage(fetcher, path=testpath) as stage:
                assert stage is not None
                assert fetcher.archive_file is None
                with pytest.raises(fs.NoArchiveFileError):
                    fetcher.archive(testpath)
                with pytest.raises(fs.NoArchiveFileError):
                    fetcher.expand()
                with pytest.raises(fs.NoArchiveFileError):
                    fetcher.reset()
                stage.fetch()
                with pytest.raises(fs.NoDigestError):
                    fetcher.check()
                assert fetcher.archive_file is not None
                fetcher._fetch_from_url('file:///does-not-exist')
Beispiel #2
0
def test_missing_curl(tmpdir, monkeypatch):
    """Ensure a fetch involving missing curl package reports the error."""
    err_fmt = 'No such command {0}'

    def _which(*args, **kwargs):
        err_msg = err_fmt.format(args[0])
        raise spack.util.executable.CommandNotFoundError(err_msg)

    # Patching the 'which' symbol imported by fetch_strategy works
    # since it is too late in import processing to patch the defining
    # (spack.util.executable) module's symbol.
    monkeypatch.setattr(fs, 'which', _which)

    testpath = str(tmpdir)
    url = 'http://github.com/spack/spack'
    with spack.config.override('config:url_fetch_method', 'curl'):
        fetcher = fs.URLFetchStrategy(url=url)
        assert fetcher is not None
        with pytest.raises(TypeError, match='object is not callable'):
            with Stage(fetcher, path=testpath) as stage:
                out = stage.fetch()
            assert err_fmt.format('curl') in out
Beispiel #3
0
def get_checksums_for_versions(url_dict, name, **kwargs):
    """Fetches and checksums archives from URLs.

    This function is called by both ``spack checksum`` and ``spack
    create``.  The ``first_stage_function`` argument allows the caller to
    inspect the first downloaded archive, e.g., to determine the build
    system.

    Args:
        url_dict (dict): A dictionary of the form: version -> URL
        name (str): The name of the package
        first_stage_function (typing.Callable): function that takes a Stage and a URL;
            this is run on the stage of the first URL downloaded
        keep_stage (bool): whether to keep staging area when command completes
        batch (bool): whether to ask user how many versions to fetch (false)
            or fetch all versions (true)
        latest (bool): whether to take the latest version (true) or all (false)
        fetch_options (dict): Options used for the fetcher (such as timeout
            or cookies)

    Returns:
        (str): A multi-line string containing versions and corresponding hashes

    """
    batch = kwargs.get('batch', False)
    fetch_options = kwargs.get('fetch_options', None)
    first_stage_function = kwargs.get('first_stage_function', None)
    keep_stage = kwargs.get('keep_stage', False)
    latest = kwargs.get('latest', False)

    sorted_versions = sorted(url_dict.keys(), reverse=True)
    if latest:
        sorted_versions = sorted_versions[:1]

    # Find length of longest string in the list for padding
    max_len = max(len(str(v)) for v in sorted_versions)
    num_ver = len(sorted_versions)

    tty.msg(
        'Found {0} version{1} of {2}:'.format(num_ver,
                                              '' if num_ver == 1 else 's',
                                              name), '',
        *llnl.util.lang.elide_list([
            '{0:{1}}  {2}'.format(str(v), max_len, url_dict[v])
            for v in sorted_versions
        ]))
    print()

    if batch or latest:
        archives_to_fetch = len(sorted_versions)
    else:
        archives_to_fetch = tty.get_number(
            "How many would you like to checksum?", default=1, abort='q')

    if not archives_to_fetch:
        tty.die("Aborted.")

    versions = sorted_versions[:archives_to_fetch]
    urls = [url_dict[v] for v in versions]

    tty.debug('Downloading...')
    version_hashes = []
    i = 0
    errors = []
    for url, version in zip(urls, versions):
        # Wheels should not be expanded during staging
        expand_arg = ''
        if url.endswith('.whl') or '.whl#' in url:
            expand_arg = ', expand=False'
        try:
            if fetch_options:
                url_or_fs = fs.URLFetchStrategy(url,
                                                fetch_options=fetch_options)
            else:
                url_or_fs = url
            with Stage(url_or_fs, keep=keep_stage) as stage:
                # Fetch the archive
                stage.fetch()
                if i == 0 and first_stage_function:
                    # Only run first_stage_function the first time,
                    # no need to run it every time
                    first_stage_function(stage, url)

                # Checksum the archive and add it to the list
                version_hashes.append(
                    (version,
                     spack.util.crypto.checksum(hashlib.sha256,
                                                stage.archive_file)))
                i += 1
        except FailedDownloadError:
            errors.append('Failed to fetch {0}'.format(url))
        except Exception as e:
            tty.msg('Something failed on {0}, skipping.  ({1})'.format(url, e))

    for msg in errors:
        tty.debug(msg)

    if not version_hashes:
        tty.die("Could not fetch any versions for {0}".format(name))

    # Find length of longest string in the list for padding
    max_len = max(len(str(v)) for v, h in version_hashes)

    # Generate the version directives to put in a package.py
    version_lines = "\n".join([
        "    version('{0}', {1}sha256='{2}'{3})".format(
            v, ' ' * (max_len - len(str(v))), h, expand_arg)
        for v, h in version_hashes
    ])

    num_hash = len(version_hashes)
    tty.debug('Checksummed {0} version{1} of {2}:'.format(
        num_hash, '' if num_hash == 1 else 's', name))

    return version_lines
Beispiel #4
0
def test_urlfetchstrategy_sans_url(_fetch_method):
    """Ensure constructor with no URL fails."""
    with spack.config.override('config:url_fetch_method', _fetch_method):
        with pytest.raises(ValueError):
            with fs.URLFetchStrategy(None):
                pass
Beispiel #5
0
    def fetch(self, mirror_only=False):
        """Downloads an archive or checks out code from a repository."""
        self.chdir()

        fetchers = []
        if not mirror_only:
            fetchers.append(self.default_fetcher)

        # TODO: move mirror logic out of here and clean it up!
        # TODO: Or @alalazo may have some ideas about how to use a
        # TODO: CompositeFetchStrategy here.
        self.skip_checksum_for_mirror = True
        if self.mirror_path:
            mirrors = spack.config.get_config('mirrors')

            # Join URLs of mirror roots with mirror paths. Because
            # urljoin() will strip everything past the final '/' in
            # the root, so we add a '/' if it is not present.
            mirror_roots = [root if root.endswith('/') else root + '/'
                            for root in mirrors.values()]
            urls = [urljoin(root, self.mirror_path) for root in mirror_roots]

            # If this archive is normally fetched from a tarball URL,
            # then use the same digest.  `spack mirror` ensures that
            # the checksum will be the same.
            digest = None
            expand = True
            extension = None
            if isinstance(self.default_fetcher, fs.URLFetchStrategy):
                digest = self.default_fetcher.digest
                expand = self.default_fetcher.expand_archive
                extension = self.default_fetcher.extension

            # Have to skip the checksum for things archived from
            # repositories.  How can this be made safer?
            self.skip_checksum_for_mirror = not bool(digest)

            # Add URL strategies for all the mirrors with the digest
            for url in urls:
                fetchers.insert(
                    0, fs.URLFetchStrategy(
                        url, digest, expand=expand, extension=extension))
            if self.default_fetcher.cachable:
                fetchers.insert(
                    0, spack.fetch_cache.fetcher(
                        self.mirror_path, digest, expand=expand,
                        extension=extension))

        def generate_fetchers():
            for fetcher in fetchers:
                yield fetcher
            # The search function may be expensive, so wait until now to
            # call it so the user can stop if a prior fetcher succeeded
            if self.search_fn and not mirror_only:
                dynamic_fetchers = self.search_fn()
                for fetcher in dynamic_fetchers:
                    yield fetcher

        for fetcher in generate_fetchers():
            try:
                fetcher.set_stage(self)
                self.fetcher = fetcher
                self.fetcher.fetch()
                break
            except spack.fetch_strategy.NoCacheError as e:
                # Don't bother reporting when something is not cached.
                continue
            except spack.error.SpackError as e:
                tty.msg("Fetching from %s failed." % fetcher)
                tty.debug(e)
                continue
        else:
            errMessage = "All fetchers failed for %s" % self.name
            self.fetcher = self.default_fetcher
            raise fs.FetchError(errMessage, None)
Beispiel #6
0
def test_urlfetchstrategy_sans_url():
    """Ensure constructor with no URL fails."""
    with pytest.raises(ValueError):
        with fs.URLFetchStrategy(None):
            pass
Beispiel #7
0
    def fetch(self, mirror_only=False):
        """Downloads an archive or checks out code from a repository."""
        self.chdir()

        fetchers = []
        if not mirror_only:
            fetchers.append(self.default_fetcher)

        # TODO: move mirror logic out of here and clean it up!
        # TODO: Or @alalazo may have some ideas about how to use a
        # TODO: CompositeFetchStrategy here.
        self.skip_checksum_for_mirror = True
        if self.mirror_path:
            mirrors = spack.config.get_config('mirrors')

            # Join URLs of mirror roots with mirror paths. Because
            # urljoin() will strip everything past the final '/' in
            # the root, so we add a '/' if it is not present.
            mirror_roots = [
                root if root.endswith('/') else root + '/'
                for root in mirrors.values()
            ]
            urls = [urljoin(root, self.mirror_path) for root in mirror_roots]

            # If this archive is normally fetched from a tarball URL,
            # then use the same digest.  `spack mirror` ensures that
            # the checksum will be the same.
            digest = None
            expand = True
            extension = None
            if isinstance(self.default_fetcher, fs.URLFetchStrategy):
                digest = self.default_fetcher.digest
                expand = self.default_fetcher.expand_archive
                extension = self.default_fetcher.extension

            # Have to skip the checksum for things archived from
            # repositories.  How can this be made safer?
            self.skip_checksum_for_mirror = not bool(digest)

            # Add URL strategies for all the mirrors with the digest
            for url in urls:
                fetchers.insert(
                    0,
                    fs.URLFetchStrategy(url,
                                        digest,
                                        expand=expand,
                                        extension=extension))
            fetchers.insert(
                0,
                spack.fetch_cache.fetcher(self.mirror_path,
                                          digest,
                                          expand=expand,
                                          extension=extension))

            # Look for the archive in list_url
            package_name = os.path.dirname(self.mirror_path)
            pkg = spack.repo.get(package_name)
            if pkg.list_url is not None and pkg.url is not None:
                try:
                    archive_version = spack.url.parse_version(
                        self.default_fetcher.url)
                    versions = pkg.fetch_remote_versions()
                    try:
                        url_from_list = versions[Version(archive_version)]
                        fetchers.append(
                            fs.URLFetchStrategy(url_from_list, digest))
                    except KeyError:
                        tty.msg("Can not find version %s in url_list" %
                                archive_version)
                except:
                    tty.msg("Could not determine url from list_url.")

        for fetcher in fetchers:
            try:
                fetcher.set_stage(self)
                self.fetcher = fetcher
                self.fetcher.fetch()
                break
            except spack.error.SpackError as e:
                tty.msg("Fetching from %s failed." % fetcher)
                tty.debug(e)
                continue
        else:
            errMessage = "All fetchers failed for %s" % self.name
            self.fetcher = self.default_fetcher
            raise fs.FetchError(errMessage, None)