示例#1
0
    def fetch_archive(self, archive_url, strip_level, dest):
        try:
            archiver = archiver_for_path(archive_url)
        except ValueError:
            raise FetchError(
                f"Don't know how to unpack archive at url {archive_url}")

        with self._fetch(archive_url) as archive:
            if strip_level == 0:
                archiver.extract(archive, dest)
            else:
                with temporary_dir() as scratch:
                    archiver.extract(archive, scratch)
                    for dirpath, dirnames, filenames in os.walk(scratch,
                                                                topdown=True):
                        if dirpath != scratch:
                            relpath = os.path.relpath(dirpath, scratch)
                            relpath_components = relpath.split(os.sep)
                            if len(relpath_components) == strip_level and (
                                    dirnames or filenames):
                                for path in dirnames + filenames:
                                    src = os.path.join(dirpath, path)
                                    dst = os.path.join(dest, path)
                                    shutil.move(src, dst)
                                del dirnames[:]  # Stops the walk.
示例#2
0
 def fetch(self, dest, rev=None):
     imported_repo = self._meta_tag_reader.get_imported_repo(
         self.import_path)
     if not imported_repo:
         raise FetchError(
             'No <meta name="go-import"> tag found, so cannot fetch repo '
             'at {}'.format(self.import_path))
     if imported_repo.vcs != 'git':
         # TODO: Support other vcs systems as needed.
         raise FetchError("Don't know how to fetch for vcs type {}.".format(
             imported_repo.vcs))
     # TODO: Do this in a workunit (see https://github.com/pantsbuild/pants/issues/3502).
     logger.info('Cloning {} into {}'.format(imported_repo.url, dest))
     repo = Git.clone(imported_repo.url, dest)
     if rev:
         repo.set_state(rev)
示例#3
0
 def root(self):
     imported_repo = self._meta_tag_reader.get_imported_repo(
         self.import_path)
     if imported_repo:
         return imported_repo.import_prefix
     else:
         raise FetchError(
             f'No <meta name="go-import"> tag found at {self.import_path}')
示例#4
0
 def _download(self, url):
   # TODO(jsirois): Wrap with workunits, progress meters, checksums.
   logger.info('Downloading {}...'.format(url))
   with closing(self._session().get(url, stream=True)) as res:
     if res.status_code != requests.codes.ok:
       raise FetchError('Failed to download {} ({} error)'.format(url, res.status_code))
     with temporary_file() as archive_fp:
       # NB: Archives might be very large so we play it safe and buffer them to disk instead of
       # memory before unpacking.
       for chunk in res.iter_content(chunk_size=self.get_options().buffer_size):
         archive_fp.write(chunk)
       archive_fp.close()
       res.close()
       yield archive_fp.name
示例#5
0
 def get_fetcher(self, import_path):
   for matcher, unexpanded_url_info in self._matchers:
     # Note that the url_formats are filled in in two stages. We match.expand them here,
     # and the ArchiveFetcher applies .format() later, when it knows the rev.
     match = matcher.match(import_path)
     if match:
       expanded_url_info = ArchiveFetcher.UrlInfo(match.expand(unexpanded_url_info.url_format),
                                                  unexpanded_url_info.default_rev,
                                                  unexpanded_url_info.strip_level)
       return ArchiveFetcher(import_path, match.group(0), expanded_url_info,
                             ArchiveRetriever.global_instance())
   if self.get_options().disallow_cloning_fetcher:
     raise FetchError('Cannot fetch {}. No archive match, and remote repo cloning '
                      'disallowed.'.format(import_path))
   return CloningFetcher(import_path, GoImportMetaTagReader.global_instance())