Ejemplo n.º 1
0
    def fetch(self, mirror_only=False):
        """Downloads an archive or checks out code from a repository."""
        self.chdir()

        fetchers = []
        if not mirror_only:
            fetchers.append(self.default_fetcher)

        # TODO: move mirror logic out of here and clean it up!
        # TODO: Or @alalazo may have some ideas about how to use a
        # TODO: CompositeFetchStrategy here.
        self.skip_checksum_for_mirror = True
        if self.mirror_path:
            mirrors = spack.config.get_config('mirrors')

            # Join URLs of mirror roots with mirror paths. Because
            # urljoin() will strip everything past the final '/' in
            # the root, so we add a '/' if it is not present.
            mirror_roots = [
                root if root.endswith('/') else root + '/'
                for root in mirrors.values()
            ]
            urls = [urljoin(root, self.mirror_path) for root in mirror_roots]

            # If this archive is normally fetched from a tarball URL,
            # then use the same digest.  `spack mirror` ensures that
            # the checksum will be the same.
            digest = None
            if isinstance(self.default_fetcher, fs.URLFetchStrategy):
                digest = self.default_fetcher.digest

            # Have to skip the checksum for things archived from
            # repositories.  How can this be made safer?
            self.skip_checksum_for_mirror = not bool(digest)

            # Add URL strategies for all the mirrors with the digest
            for url in urls:
                fetchers.insert(0, fs.URLFetchStrategy(url, digest))

        for fetcher in fetchers:
            try:
                fetcher.set_stage(self)
                self.fetcher = fetcher
                self.fetcher.fetch()
                break
            except spack.error.SpackError as e:
                tty.msg("Fetching from %s failed." % fetcher)
                tty.debug(e)
                continue
        else:
            errMessage = "All fetchers failed for %s" % self.name
            self.fetcher = self.default_fetcher
            raise fs.FetchError(errMessage, None)
Ejemplo n.º 2
0
    def fetch(self, mirror_only=False, err_msg=None):
        """Retrieves the code or archive

        Args:
            mirror_only (bool): only fetch from a mirror
            err_msg (str or None): the error message to display if all fetchers
                fail or ``None`` for the default fetch failure message
        """
        fetchers = []
        if not mirror_only:
            fetchers.append(self.default_fetcher)

        # TODO: move mirror logic out of here and clean it up!
        # TODO: Or @alalazo may have some ideas about how to use a
        # TODO: CompositeFetchStrategy here.
        self.skip_checksum_for_mirror = True
        if self.mirror_paths:
            # Join URLs of mirror roots with mirror paths. Because
            # urljoin() will strip everything past the final '/' in
            # the root, so we add a '/' if it is not present.
            mirror_urls = {}
            for mirror in spack.mirror.MirrorCollection().values():
                for rel_path in self.mirror_paths:
                    mirror_url = url_util.join(mirror.fetch_url, rel_path)
                    mirror_urls[mirror_url] = {}
                    if mirror.get_access_pair("fetch") or \
                       mirror.get_access_token("fetch") or \
                       mirror.get_profile("fetch"):
                        mirror_urls[mirror_url] = {
                            "access_token": mirror.get_access_token("fetch"),
                            "access_pair": mirror.get_access_pair("fetch"),
                            "access_profile": mirror.get_profile("fetch"),
                            "endpoint_url": mirror.get_endpoint_url("fetch")
                        }

            # If this archive is normally fetched from a tarball URL,
            # then use the same digest.  `spack mirror` ensures that
            # the checksum will be the same.
            digest = None
            expand = True
            extension = None
            if isinstance(self.default_fetcher, fs.URLFetchStrategy):
                digest = self.default_fetcher.digest
                expand = self.default_fetcher.expand_archive
                extension = self.default_fetcher.extension

            # Have to skip the checksum for things archived from
            # repositories.  How can this be made safer?
            self.skip_checksum_for_mirror = not bool(digest)

            # Add URL strategies for all the mirrors with the digest
            # Insert fetchers in the order that the URLs are provided.
            for url in reversed(list(mirror_urls.keys())):
                fetchers.insert(
                    0,
                    fs.from_url_scheme(url,
                                       digest,
                                       expand=expand,
                                       extension=extension,
                                       connection=mirror_urls[url]))

            if self.default_fetcher.cachable:
                for rel_path in reversed(list(self.mirror_paths)):
                    cache_fetcher = spack.caches.fetch_cache.fetcher(
                        rel_path, digest, expand=expand, extension=extension)
                    fetchers.insert(0, cache_fetcher)

        def generate_fetchers():
            for fetcher in fetchers:
                yield fetcher
            # The search function may be expensive, so wait until now to
            # call it so the user can stop if a prior fetcher succeeded
            if self.search_fn and not mirror_only:
                dynamic_fetchers = self.search_fn()
                for fetcher in dynamic_fetchers:
                    yield fetcher

        def print_errors(errors):
            for msg in errors:
                tty.debug(msg)

        errors = []
        for fetcher in generate_fetchers():
            try:
                fetcher.stage = self
                self.fetcher = fetcher
                self.fetcher.fetch()
                break
            except spack.fetch_strategy.NoCacheError:
                # Don't bother reporting when something is not cached.
                continue
            except spack.error.SpackError as e:
                errors.append('Fetching from {0} failed.'.format(fetcher))
                tty.debug(e)
                continue
        else:
            print_errors(errors)

            self.fetcher = self.default_fetcher
            default_msg = 'All fetchers failed for {0}'.format(self.name)
            raise fs.FetchError(err_msg or default_msg, None)

        print_errors(errors)
Ejemplo n.º 3
0
    def fetch(self, mirror_only=False):
        """Downloads an archive or checks out code from a repository."""
        fetchers = []
        if not mirror_only:
            fetchers.append(self.default_fetcher)

        # TODO: move mirror logic out of here and clean it up!
        # TODO: Or @alalazo may have some ideas about how to use a
        # TODO: CompositeFetchStrategy here.
        self.skip_checksum_for_mirror = True
        if self.mirror_paths:
            # Join URLs of mirror roots with mirror paths. Because
            # urljoin() will strip everything past the final '/' in
            # the root, so we add a '/' if it is not present.
            urls = []
            for mirror in spack.mirror.MirrorCollection().values():
                for rel_path in self.mirror_paths:
                    urls.append(url_util.join(mirror.fetch_url, rel_path))

            # If this archive is normally fetched from a tarball URL,
            # then use the same digest.  `spack mirror` ensures that
            # the checksum will be the same.
            digest = None
            expand = True
            extension = None
            if isinstance(self.default_fetcher, fs.URLFetchStrategy):
                digest = self.default_fetcher.digest
                expand = self.default_fetcher.expand_archive
                extension = self.default_fetcher.extension

            # Have to skip the checksum for things archived from
            # repositories.  How can this be made safer?
            self.skip_checksum_for_mirror = not bool(digest)

            # Add URL strategies for all the mirrors with the digest
            for url in urls:
                fetchers.insert(
                    0,
                    fs.from_url_scheme(url,
                                       digest,
                                       expand=expand,
                                       extension=extension))

            if self.default_fetcher.cachable:
                for rel_path in reversed(list(self.mirror_paths)):
                    cache_fetcher = spack.caches.fetch_cache.fetcher(
                        rel_path, digest, expand=expand, extension=extension)
                    fetchers.insert(0, cache_fetcher)

        def generate_fetchers():
            for fetcher in fetchers:
                yield fetcher
            # The search function may be expensive, so wait until now to
            # call it so the user can stop if a prior fetcher succeeded
            if self.search_fn and not mirror_only:
                dynamic_fetchers = self.search_fn()
                for fetcher in dynamic_fetchers:
                    yield fetcher

        for fetcher in generate_fetchers():
            try:
                fetcher.set_stage(self)
                self.fetcher = fetcher
                self.fetcher.fetch()
                break
            except spack.fetch_strategy.NoCacheError:
                # Don't bother reporting when something is not cached.
                continue
            except spack.error.SpackError as e:
                tty.msg("Fetching from %s failed." % fetcher)
                tty.debug(e)
                continue
        else:
            err_msg = "All fetchers failed for %s" % self.name
            self.fetcher = self.default_fetcher
            raise fs.FetchError(err_msg, None)
Ejemplo n.º 4
0
    def fetch(self, mirror_only=False):
        """Downloads an archive or checks out code from a repository."""
        self.chdir()

        fetchers = []
        if not mirror_only:
            fetchers.append(self.default_fetcher)

        # TODO: move mirror logic out of here and clean it up!
        # TODO: Or @alalazo may have some ideas about how to use a
        # TODO: CompositeFetchStrategy here.
        self.skip_checksum_for_mirror = True
        if self.mirror_path:
            mirrors = spack.config.get_config('mirrors')

            # Join URLs of mirror roots with mirror paths. Because
            # urljoin() will strip everything past the final '/' in
            # the root, so we add a '/' if it is not present.
            mirror_roots = [
                root if root.endswith('/') else root + '/'
                for root in mirrors.values()
            ]
            urls = [urljoin(root, self.mirror_path) for root in mirror_roots]

            # If this archive is normally fetched from a tarball URL,
            # then use the same digest.  `spack mirror` ensures that
            # the checksum will be the same.
            digest = None
            expand = True
            extension = None
            if isinstance(self.default_fetcher, fs.URLFetchStrategy):
                digest = self.default_fetcher.digest
                expand = self.default_fetcher.expand_archive
                extension = self.default_fetcher.extension

            # Have to skip the checksum for things archived from
            # repositories.  How can this be made safer?
            self.skip_checksum_for_mirror = not bool(digest)

            # Add URL strategies for all the mirrors with the digest
            for url in urls:
                fetchers.insert(
                    0,
                    fs.URLFetchStrategy(url,
                                        digest,
                                        expand=expand,
                                        extension=extension))
            fetchers.insert(
                0,
                spack.fetch_cache.fetcher(self.mirror_path,
                                          digest,
                                          expand=expand,
                                          extension=extension))

            # Look for the archive in list_url
            package_name = os.path.dirname(self.mirror_path)
            pkg = spack.repo.get(package_name)
            if pkg.list_url is not None and pkg.url is not None:
                try:
                    archive_version = spack.url.parse_version(
                        self.default_fetcher.url)
                    versions = pkg.fetch_remote_versions()
                    try:
                        url_from_list = versions[Version(archive_version)]
                        fetchers.append(
                            fs.URLFetchStrategy(url_from_list, digest))
                    except KeyError:
                        tty.msg("Can not find version %s in url_list" %
                                archive_version)
                except:
                    tty.msg("Could not determine url from list_url.")

        for fetcher in fetchers:
            try:
                fetcher.set_stage(self)
                self.fetcher = fetcher
                self.fetcher.fetch()
                break
            except spack.error.SpackError as e:
                tty.msg("Fetching from %s failed." % fetcher)
                tty.debug(e)
                continue
        else:
            errMessage = "All fetchers failed for %s" % self.name
            self.fetcher = self.default_fetcher
            raise fs.FetchError(errMessage, None)