Esempio n. 1
0
    def _bootstrap_ivy(self, bootstrap_jar_path):
        if not os.path.exists(bootstrap_jar_path):
            with temporary_file() as bootstrap_jar:
                fetcher = Fetcher()
                checksummer = fetcher.ChecksumListener(digest=hashlib.sha1())
                try:
                    logger.info('\nDownloading {}'.format(
                        self._ivy_subsystem.get_options().bootstrap_jar_url))
                    # TODO: Capture the stdout of the fetcher, instead of letting it output
                    # to the console directly.
                    fetcher.download(
                        self._ivy_subsystem.get_options().bootstrap_jar_url,
                        listener=fetcher.ProgressListener().wrap(checksummer),
                        path_or_fd=bootstrap_jar,
                        timeout_secs=self._ivy_subsystem.get_options(
                        ).bootstrap_fetch_timeout_secs)
                    logger.info('sha1: {}'.format(checksummer.checksum))
                    bootstrap_jar.close()
                    touch(bootstrap_jar_path)
                    shutil.move(bootstrap_jar.name, bootstrap_jar_path)
                except fetcher.Error as e:
                    raise self.Error(
                        'Problem fetching the ivy bootstrap jar! {}'.format(e))

        return Ivy(bootstrap_jar_path,
                   ivy_settings=self._ivy_subsystem.get_options().ivy_settings,
                   ivy_cache_dir=self._ivy_subsystem.get_options().cache_dir)
Esempio n. 2
0
    def _select_binary_stream(self, name, urls):
        """Download a file from a list of urls, yielding a stream after downloading the file.

    URLs are tried in order until they succeed.

    :raises: :class:`BinaryToolFetcher.BinaryNotFound` if requests to all the given urls fail.
    """
        downloaded_successfully = False
        accumulated_errors = []
        for url in OrderedSet(
                urls):  # De-dup URLS: we only want to try each URL once.
            logger.info(
                'Attempting to fetch {name} binary from: {url} ...'.format(
                    name=name, url=url))
            try:
                with temporary_file() as dest:
                    logger.debug(
                        "in BinaryToolFetcher: url={}, timeout_secs={}".format(
                            url, self._timeout_secs))
                    self._fetcher.download(url,
                                           listener=Fetcher.ProgressListener(),
                                           path_or_fd=dest,
                                           timeout_secs=self._timeout_secs)
                    logger.info('Fetched {name} binary from: {url} .'.format(
                        name=name, url=url))
                    downloaded_successfully = True
                    dest.seek(0)
                    yield dest
                    break
            except (IOError, Fetcher.Error, ValueError) as e:
                accumulated_errors.append(
                    'Failed to fetch binary from {url}: {error}'.format(
                        url=url, error=e))
        if not downloaded_successfully:
            raise self.BinaryNotFound(name, accumulated_errors)
Esempio n. 3
0
  def _select_binary_stream(self, name, binary_path, fetcher=None):
    """Select a binary matching the current os and architecture.

    :param string binary_path: The path to the binary to fetch.
    :param fetcher: Optional argument used only for testing, to 'pretend' to open urls.
    :returns: a 'stream' to download it from a support directory. The returned 'stream' is actually
      a lambda function which returns the files binary contents.
    :raises: :class:`pants.binary_util.BinaryUtil.BinaryNotFound` if no binary of the given version
      and name could be found for the current platform.
    """

    if not self._baseurls:
      raise self.NoBaseUrlsError(
          'No urls are defined for the --pants-support-baseurls option.')
    downloaded_successfully = False
    accumulated_errors = []
    for baseurl in OrderedSet(self._baseurls):  # Wrap in OrderedSet because duplicates are wasteful.
      url = posixpath.join(baseurl, binary_path)
      logger.info('Attempting to fetch {name} binary from: {url} ...'.format(name=name, url=url))
      try:
        with temporary_file() as dest:
          fetcher = fetcher or Fetcher()
          fetcher.download(url, listener=Fetcher.ProgressListener(), path_or_fd=dest)
          logger.info('Fetched {name} binary from: {url} .'.format(name=name, url=url))
          downloaded_successfully = True
          dest.seek(0)
          yield lambda: dest.read()
          break
      except (IOError, Fetcher.Error, ValueError) as e:
        accumulated_errors.append('Failed to fetch binary from {url}: {error}'
                                  .format(url=url, error=e))
    if not downloaded_successfully:
      raise self.BinaryNotFound(binary_path, accumulated_errors)
Esempio n. 4
0
  def bootstrap_coursier(self, workunit_factory):

    opts = self.get_options()
    bootstrap_url = opts.bootstrap_jar_url

    coursier_bootstrap_dir = os.path.join(opts.pants_bootstrapdir,
                                          'tools', 'jvm', 'coursier',
                                          opts.version)

    bootstrap_jar_path = os.path.join(coursier_bootstrap_dir, 'coursier.jar')

    if not os.path.exists(bootstrap_jar_path):
      with workunit_factory(name='bootstrap-coursier', labels=[WorkUnitLabel.TOOL]) as workunit:
        with safe_concurrent_creation(bootstrap_jar_path) as temp_path:
          fetcher = Fetcher(get_buildroot())
          checksummer = fetcher.ChecksumListener(digest=hashlib.sha1())
          try:
            logger.info('\nDownloading {}'.format(bootstrap_url))
            # TODO: Capture the stdout of the fetcher, instead of letting it output
            # to the console directly.
            fetcher.download(bootstrap_url,
                             listener=fetcher.ProgressListener().wrap(checksummer),
                             path_or_fd=temp_path,
                             timeout_secs=opts.bootstrap_fetch_timeout_secs)
            logger.info('sha1: {}'.format(checksummer.checksum))
          except fetcher.Error as e:
            workunit.set_outcome(WorkUnit.FAILURE)
            raise self.Error('Problem fetching the coursier bootstrap jar! {}'.format(e))
          else:
            workunit.set_outcome(WorkUnit.SUCCESS)

    return bootstrap_jar_path
Esempio n. 5
0
    def test_progress_listener(self, timer):
        timer.side_effect = [0, 1.137]

        stream = BytesIO()
        progress_listener = Fetcher.ProgressListener(width=5,
                                                     chunk_size_bytes=1,
                                                     stream=stream)

        with self.expect_get('http://baz',
                             chunk_size_bytes=1,
                             timeout_secs=37,
                             chunks=[[1]] * 1024) as (chunks,
                                                      expected_listener_calls):

            self.fetcher.fetch('http://baz',
                               progress_listener.wrap(self.listener),
                               chunk_size_bytes=1,
                               timeout_secs=37)

        self.assert_listener_calls(expected_listener_calls, chunks)

        # We just test the last progress line which should indicate a 100% complete download.
        # We control progress bar width (5 dots), size (1KB) and total time downloading (fake 1.137s).
        self.assertEqual('100% ..... 1 KB 1.137s\n',
                         stream.getvalue().decode('utf-8').split('\r')[-1])
Esempio n. 6
0
    def resolve_target(self, node_task, target, results_dir, node_paths):
        self._copy_sources(target, results_dir)

        with temporary_dir() as temp_dir:
            archive_file_name = urllib_parse.urlsplit(
                target.dependencies_archive_url).path.split('/')[-1]
            if not archive_file_name:
                raise TaskError(
                    'Could not determine archive file name for {target} from {url}'
                    .format(target=target.address.reference(),
                            url=target.dependencies_archive_url))

            download_path = os.path.join(temp_dir, archive_file_name)

            logger.info(
                'Downloading archive {archive_file_name} from '
                '{dependencies_archive_url} to {path}'.format(
                    archive_file_name=archive_file_name,
                    dependencies_archive_url=target.dependencies_archive_url,
                    path=download_path))

            try:
                Fetcher(get_buildroot()).download(
                    target.dependencies_archive_url,
                    listener=Fetcher.ProgressListener(),
                    path_or_fd=download_path,
                    timeout_secs=self.get_options().fetch_timeout_secs)
            except Fetcher.Error as error:
                raise TaskError(
                    'Failed to fetch preinstalled node_modules for {target} from {url}: {error}'
                    .format(target=target.address.reference(),
                            url=target.dependencies_archive_url,
                            error=error))

            logger.info(
                'Fetched archive {archive_file_name} from {dependencies_archive_url} to {path}'
                .format(
                    archive_file_name=archive_file_name,
                    dependencies_archive_url=target.dependencies_archive_url,
                    path=download_path))

            archiver_for_path(archive_file_name).extract(
                download_path, temp_dir)

            extracted_node_modules = os.path.join(temp_dir, 'node_modules')
            if not os.path.isdir(extracted_node_modules):
                raise TaskError(
                    'Did not find an extracted node_modules directory for {target} '
                    'inside {dependencies_archive_url}'.format(
                        target=target.address.reference(),
                        dependencies_archive_url=target.
                        dependencies_archive_url))

            shutil.move(extracted_node_modules,
                        os.path.join(results_dir, 'node_modules'))
    def resolve_target(self,
                       node_task,
                       target,
                       results_dir,
                       node_paths,
                       resolve_locally=False,
                       **kwargs):
        if not resolve_locally:
            self._copy_sources(target, results_dir)

        with temporary_dir() as temp_dir:
            archive_file_name = urllib.parse.urlsplit(
                target.dependencies_archive_url).path.split("/")[-1]
            if not archive_file_name:
                raise TaskError(
                    "Could not determine archive file name for {target} from {url}"
                    .format(target=target.address.reference(),
                            url=target.dependencies_archive_url))

            download_path = os.path.join(temp_dir, archive_file_name)

            node_task.context.log.info(
                "Downloading archive {archive_file_name} from "
                "{dependencies_archive_url} to {path}".format(
                    archive_file_name=archive_file_name,
                    dependencies_archive_url=target.dependencies_archive_url,
                    path=download_path,
                ))

            try:
                Fetcher(get_buildroot()).download(
                    target.dependencies_archive_url,
                    listener=Fetcher.ProgressListener(),
                    path_or_fd=download_path,
                    timeout_secs=self.get_options().fetch_timeout_secs,
                )
            except Fetcher.Error as error:
                raise TaskError(
                    "Failed to fetch preinstalled node_modules for {target} from {url}: {error}"
                    .format(
                        target=target.address.reference(),
                        url=target.dependencies_archive_url,
                        error=error,
                    ))

            node_task.context.log.info(
                "Fetched archive {archive_file_name} from {dependencies_archive_url} to {path}"
                .format(
                    archive_file_name=archive_file_name,
                    dependencies_archive_url=target.dependencies_archive_url,
                    path=download_path,
                ))

            archiver_for_path(archive_file_name).extract(
                download_path, temp_dir)

            extracted_node_modules = os.path.join(temp_dir, "node_modules")
            if not os.path.isdir(extracted_node_modules):
                raise TaskError(
                    "Did not find an extracted node_modules directory for {target} "
                    "inside {dependencies_archive_url}".format(
                        target=target.address.reference(),
                        dependencies_archive_url=target.
                        dependencies_archive_url,
                    ))

            # shutil.move doesn't handle directory collision nicely. This is mainly to address
            # installing within the source directory for local resolves.
            node_modules_path = os.path.join(results_dir, "node_modules")
            safe_rmtree(node_modules_path)
            shutil.move(extracted_node_modules, node_modules_path)