コード例 #1
0
ファイル: pants_daemon.py プロジェクト: thoward/pants
 def write(self, msg):
     msg = ensure_text(msg)
     for line in msg.rstrip().splitlines():
         # The log only accepts text, and will raise a decoding error if the default encoding is ascii
         # if provided a bytes input for unicode text.
         line = ensure_text(line)
         self._logger.log(self._log_level, line.rstrip())
コード例 #2
0
ファイル: pants_daemon.py プロジェクト: jsirois/pants
 def write(self, msg):
   msg = ensure_text(msg)
   for line in msg.rstrip().splitlines():
     # The log only accepts text, and will raise a decoding error if the default encoding is ascii
     # if provided a bytes input for unicode text.
     line = ensure_text(line)
     self._logger.log(self._log_level, line.rstrip())
コード例 #3
0
 def create(self, basedir, outdir, name, prefix=None):
     basedir = ensure_text(basedir)
     tarpath = os.path.join(outdir,
                            '%s.%s' % (ensure_text(name), self.extension))
     with open_tar(tarpath, self.mode, dereference=True,
                   errorlevel=1) as tar:
         tar.add(basedir, arcname=prefix or '.')
     return tarpath
コード例 #4
0
ファイル: archive.py プロジェクト: Gointer/pants
 def create(self, basedir, outdir, name, prefix=None):
   """
   :API: public
   """
   basedir = ensure_text(basedir)
   tarpath = os.path.join(outdir, '{}.{}'.format(ensure_text(name), self.extension))
   with open_tar(tarpath, self.mode, dereference=True, errorlevel=1) as tar:
     tar.add(basedir, arcname=prefix or '.')
   return tarpath
コード例 #5
0
ファイル: archive.py プロジェクト: spring-team/pants
  def create(self, basedir, outdir, name, prefix=None, dereference=True):
    """
    :API: public
    """

    basedir = ensure_text(basedir)
    tarpath = os.path.join(outdir, '{}.{}'.format(ensure_text(name), self.extension))
    with open_tar(tarpath, self.mode, dereference=dereference, errorlevel=1) as tar:
      tar.add(basedir, arcname=prefix or '.')
    return tarpath
コード例 #6
0
ファイル: archive.py プロジェクト: youprofit/pants
 def create(self, basedir, outdir, name, prefix=None):
   zippath = os.path.join(outdir, '{}.zip'.format(name))
   with open_zip(zippath, 'w', compression=ZIP_DEFLATED) as zip:
     for root, _, files in safe_walk(basedir):
       root = ensure_text(root)
       for file in files:
         file = ensure_text(file)
         full_path = os.path.join(root, file)
         relpath = os.path.relpath(full_path, basedir)
         if prefix:
           relpath = os.path.join(ensure_text(prefix), relpath)
         zip.write(full_path, relpath)
   return zippath
コード例 #7
0
    def create(self, basedir, outdir, name, prefix=None, dereference=True):
        """
        :API: public
        """

        basedir = ensure_text(basedir)
        tarpath = os.path.join(
            outdir, "{}.{}".format(ensure_text(name), self.extension))
        with closing(
                TarFile.open(tarpath,
                             self.mode,
                             dereference=dereference,
                             errorlevel=1)) as tar:
            tar.add(basedir, arcname=prefix or ".")
        return tarpath
コード例 #8
0
 def create(self, basedir, outdir, name, prefix=None):
   zippath = os.path.join(outdir, '{}.{}'.format(name, self.extension))
   with open_zip(zippath, 'w', compression=self.compression) as zip:
     # For symlinks, we want to archive the actual content of linked files but
     # under the relpath derived from symlink.
     for root, _, files in safe_walk(basedir, followlinks=True):
       root = ensure_text(root)
       for file in files:
         file = ensure_text(file)
         full_path = os.path.join(root, file)
         relpath = os.path.relpath(full_path, basedir)
         if prefix:
           relpath = os.path.join(ensure_text(prefix), relpath)
         zip.write(full_path, relpath)
   return zippath
コード例 #9
0
ファイル: plugin_resolver.py プロジェクト: mcguigan/pants
  def _resolve_plugin_locations(self):
    hasher = hashlib.sha1()

    # Assume we have platform-specific plugin requirements and pessimistically mix the ABI
    # identifier into the hash to ensure re-resolution of plugins for different interpreter ABIs.
    hasher.update(self._interpreter.identity.abi_tag.encode())  # EG: cp36m

    for req in sorted(self._plugin_requirements):
      hasher.update(req.encode())
    resolve_hash = hasher.hexdigest()
    resolved_plugins_list = os.path.join(self.plugin_cache_dir, f'plugins-{resolve_hash}.txt')

    if self._plugins_force_resolve:
      safe_delete(resolved_plugins_list)

    if not os.path.exists(resolved_plugins_list):
      tmp_plugins_list = f'{resolved_plugins_list}.{uuid.uuid4().hex}'
      with safe_open(tmp_plugins_list, 'w') as fp:
        for plugin in self._resolve_plugins():
          fp.write(ensure_text(plugin.location))
          fp.write('\n')
      os.rename(tmp_plugins_list, resolved_plugins_list)
    with open(resolved_plugins_list, 'r') as fp:
      for plugin_location in fp:
        yield plugin_location.strip()
コード例 #10
0
ファイル: custom_types.py プロジェクト: adamchainz/pants
  def create(cls, value):
    """Interpret value as either a list or something to extend another list with.

    Note that we accept tuple literals, but the internal value is always a list.

    :param value: The value to convert.  Can be an instance of ListValueComponent, a list, a tuple,
                  a string representation (possibly prefixed by +) of a list or tuple, or any
                  allowed member_type.
    :rtype: `ListValueComponent`
    """
    if isinstance(value, six.string_types):
      value = ensure_text(value)
    if isinstance(value, cls):  # Ensure idempotency.
      action = value.action
      val = value.val
    elif isinstance(value, (list, tuple)):  # Ensure we can handle list-typed default values.
      action = cls.REPLACE
      val = value
    elif value.startswith('[') or value.startswith('('):
      action = cls.REPLACE
      val = _convert(value, (list, tuple))
    elif value.startswith('+[') or value.startswith('+('):
      action = cls.EXTEND
      val = _convert(value[1:], (list, tuple))
    elif isinstance(value, six.string_types):
      action = cls.EXTEND
      val = [value]
    else:
      action = cls.EXTEND
      val = _convert('[{}]'.format(value), list)
    return cls(action, list(val))
コード例 #11
0
ファイル: classpath_util.py プロジェクト: baroquebobcat/pants
  def classpath_entries_contents(cls, classpath_entries):
    """Provide a generator over the contents (classes/resources) of a classpath.

    Subdirectories are included and differentiated via a trailing forward slash (for symmetry
    across ZipFile.namelist and directory walks).

    :param classpath_entries: A sequence of classpath_entries. Non-jars/dirs are ignored.
    :returns: An iterator over all classpath contents, one directory, class or resource relative
              path per iteration step.
    :rtype: :class:`collections.Iterator` of string
    """
    for entry in classpath_entries:
      if cls.is_jar(entry):
        # Walk the jar namelist.
        with open_zip(entry, mode='r') as jar:
          for name in jar.namelist():
            yield ensure_text(name)
      elif os.path.isdir(entry):
        # Walk the directory, including subdirs.
        def rel_walk_name(abs_sub_dir, name):
          return fast_relpath(os.path.join(abs_sub_dir, name), entry)
        for abs_sub_dir, dirnames, filenames in safe_walk(entry):
          for name in dirnames:
            yield '{}/'.format(rel_walk_name(abs_sub_dir, name))
          for name in filenames:
            yield rel_walk_name(abs_sub_dir, name)
      else:
        # non-jar and non-directory classpath entries should be ignored
        pass
コード例 #12
0
    def create(cls, value):
        """Interpret value as either a list or something to extend another list with.

    Note that we accept tuple literals, but the internal value is always a list.

    :param value: The value to convert.  Can be an instance of ListValueComponent, a list, a tuple,
                  a string representation (possibly prefixed by +) of a list or tuple, or any
                  allowed member_type.
    :rtype: `ListValueComponent`
    """
        if isinstance(value, six.string_types):
            value = ensure_text(value)
        if isinstance(value, cls):  # Ensure idempotency.
            action = value.action
            val = value.val
        elif isinstance(
                value,
            (list, tuple)):  # Ensure we can handle list-typed default values.
            action = cls.REPLACE
            val = value
        elif value.startswith('[') or value.startswith('('):
            action = cls.REPLACE
            val = _convert(value, (list, tuple))
        elif value.startswith('+[') or value.startswith('+('):
            action = cls.EXTEND
            val = _convert(value[1:], (list, tuple))
        elif isinstance(value, six.string_types):
            action = cls.EXTEND
            val = [value]
        else:
            action = cls.EXTEND
            val = _convert('[{}]'.format(value), list)
        return cls(action, list(val))
コード例 #13
0
ファイル: pants_daemon.py プロジェクト: jsirois/pants
  def _run_services(self, pants_services):
    """Service runner main loop."""
    if not pants_services.services:
      self._logger.critical('no services to run, bailing!')
      return

    service_thread_map = {service: self._make_thread(service)
                          for service in pants_services.services}

    # Start services.
    for service, service_thread in service_thread_map.items():
      self._logger.info('starting service {}'.format(service))
      try:
        service_thread.start()
      except (RuntimeError, FSEventService.ServiceError):
        self.shutdown(service_thread_map)
        raise PantsDaemon.StartupFailure('service {} failed to start, shutting down!'.format(service))

    # Once all services are started, write our pid.
    self.write_pid()
    self.write_metadata_by_name('pantsd', self.FINGERPRINT_KEY, ensure_text(self.options_fingerprint))

    # Monitor services.
    while not self.is_killed:
      for service, service_thread in service_thread_map.items():
        if not service_thread.is_alive():
          self.shutdown(service_thread_map)
          raise PantsDaemon.RuntimeFailure('service failure for {}, shutting down!'.format(service))
        else:
          # Avoid excessive CPU utilization.
          service_thread.join(self.JOIN_TIMEOUT_SECONDS)
コード例 #14
0
  def _run_services(self, pants_services):
    """Service runner main loop."""
    if not pants_services.services:
      self._logger.critical('no services to run, bailing!')
      return

    service_thread_map = {service: self._make_thread(service)
                          for service in pants_services.services}

    # Start services.
    for service, service_thread in service_thread_map.items():
      self._logger.info('starting service {}'.format(service))
      try:
        service_thread.start()
      except (RuntimeError, FSEventService.ServiceError):
        self.shutdown(service_thread_map)
        raise PantsDaemon.StartupFailure('service {} failed to start, shutting down!'.format(service))

    # Once all services are started, write our pid.
    self.write_pid()
    self.write_metadata_by_name('pantsd', self.FINGERPRINT_KEY, ensure_text(self.options_fingerprint))

    # Monitor services.
    while not self.is_killed:
      for service, service_thread in service_thread_map.items():
        if not service_thread.is_alive():
          self.shutdown(service_thread_map)
          raise PantsDaemon.RuntimeFailure('service failure for {}, shutting down!'.format(service))
        else:
          # Avoid excessive CPU utilization.
          service_thread.join(self.JOIN_TIMEOUT_SECONDS)
コード例 #15
0
ファイル: custom_types.py プロジェクト: xeno-by/pants
    def create(cls, value):
        """Interpret value as either a dict or something to extend another dict with.

    :param value: The value to convert.  Can be an instance of DictValueComponent, a dict,
                  or a string representation (possibly prefixed by +) of a dict.
    :rtype: `DictValueComponent`
    """
        if isinstance(value, six.string_types):
            value = ensure_text(value)
        if isinstance(value, cls):  # Ensure idempotency.
            action = value.action
            val = value.val
        elif isinstance(
                value,
                dict):  # Ensure we can handle dict-typed default values.
            action = cls.REPLACE
            val = value
        elif value.startswith('{'):
            action = cls.REPLACE
            val = _convert(value, dict)
        elif value.startswith('+{'):
            action = cls.EXTEND
            val = _convert(value[1:], dict)
        else:
            raise ParseError('Invalid dict value: {}'.format(value))
        return cls(action, dict(val))
コード例 #16
0
ファイル: jar_publish.py プロジェクト: wiwa/pants
 def changelog(self, target, sha):
     # Filter synthetic files.
     files = [
         filename for filename in target.sources_relative_to_buildroot()
         if not filename.startswith(os.pardir)
     ]
     return ensure_text(self.scm.changelog(from_commit=sha, files=files))
コード例 #17
0
    def classpath_entries_contents(cls, classpath_entries):
        """Provide a generator over the contents (classes/resources) of a classpath.

    Subdirectories are included and differentiated via a trailing forward slash (for symmetry
    across ZipFile.namelist and directory walks).

    :param classpath_entries: A sequence of classpath_entries. Non-jars/dirs are ignored.
    :returns: An iterator over all classpath contents, one directory, class or resource relative
              path per iteration step.
    :rtype: :class:`collections.Iterator` of string
    """
        for entry in classpath_entries:
            if cls.is_jar(entry):
                # Walk the jar namelist.
                with open_zip(entry, mode='r') as jar:
                    for name in jar.namelist():
                        yield ensure_text(name)
            elif os.path.isdir(entry):
                # Walk the directory, including subdirs.
                def rel_walk_name(abs_sub_dir, name):
                    return fast_relpath(os.path.join(abs_sub_dir, name), entry)

                for abs_sub_dir, dirnames, filenames in safe_walk(entry):
                    for name in dirnames:
                        yield f'{rel_walk_name(abs_sub_dir, name)}/'
                    for name in filenames:
                        yield rel_walk_name(abs_sub_dir, name)
            else:
                # non-jar and non-directory classpath entries should be ignored
                pass
コード例 #18
0
    def filter_namespace_packages(self, root_target, inits):
        args = list(inits)
        with self.context.new_workunit(
                name='find-namespace-packages',
                cmd=' '.join(self.nsutil_pex.cmdline(args=args)),
                labels=[WorkUnitLabel.TOOL]) as workunit:

            process = self.nsutil_pex.run(args=args,
                                          stdout=subprocess.PIPE,
                                          stderr=subprocess.PIPE,
                                          blocking=False)

            stdout, stderr = process.communicate()

            # TODO(John Sirois): Find a way to tee a workunit output instead of buffering up all output
            # and then writing it out after the process has finished like we do here.
            def write(stream_name, data):
                stream = workunit.output(stream_name)
                stream.write(ensure_binary(data) if PY2 else ensure_text(data))
                stream.flush()

            write('stdout', stdout)
            write('stderr', stderr)

            exit_code = process.returncode
            if exit_code != 0:
                raise TaskError(
                    'Failure trying to detect namespace packages when constructing setup.py '
                    'project for {}:\n{}'.format(
                        root_target.address.reference(), stderr),
                    exit_code=exit_code,
                    failed_targets=[root_target])

            return ensure_text(stdout).splitlines()
コード例 #19
0
ファイル: setup_py.py プロジェクト: cosmicexplorer/pants
  def filter_namespace_packages(self, root_target, inits):
    args = list(inits)
    with self.context.new_workunit(name='find-namespace-packages',
                                   cmd=' '.join(self.nsutil_pex.cmdline(args=args)),
                                   labels=[WorkUnitLabel.TOOL]) as workunit:

      process = self.nsutil_pex.run(args=args,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE,
                                    blocking=False)

      stdout, stderr = process.communicate()

      # TODO(John Sirois): Find a way to tee a workunit output instead of buffering up all output
      # and then writing it out after the process has finished like we do here.
      def write(stream_name, data):
        stream = workunit.output(stream_name)
        stream.write(ensure_binary(data))
        stream.flush()

      write('stdout', stdout)
      write('stderr', stderr)

      exit_code = process.returncode
      if exit_code != 0:
        raise TaskError('Failure trying to detect namespace packages when constructing setup.py '
                        'project for {}:\n{}'.format(root_target.address.reference(), stderr),
                        exit_code=exit_code,
                        failed_targets=[root_target])

      return ensure_text(stdout).splitlines()
コード例 #20
0
 def filecontent_for(path):
     is_executable = os.stat(
         path).st_mode & stat.S_IXUSR == stat.S_IXUSR
     return FileContent(
         ensure_text(path),
         read_file(path, binary_mode=True),
         is_executable=is_executable,
     )
コード例 #21
0
  def test_all(self):
    testsuites = JUnitHtmlReport.create(self._JUNIT_XML_DIR)._parse_xml_files()
    self.assertEqual(7, len(testsuites))

    with temporary_dir() as output_dir:
      junit_html_report = JUnitHtmlReport.create(xml_dir=self._JUNIT_XML_DIR, open_report=True)
      with open(junit_html_report.report(output_dir)) as html_file:
        html_data = ensure_text(html_file.read())
        self.assertIn(u'</span>&nbsp;org.pantsbuild.PåssingTest', html_data)
        self.assertIn(u'</span>&nbsp;testTwö</td>', html_data)
        self.assertIn(u'at org.pantsbuild.PåssingTest.testTwö(ErrorTest.java:29)', html_data)
コード例 #22
0
ファイル: pants_daemon.py プロジェクト: matze999/pants
    def _initialize_pid(self):
        """Writes out our pid and metadata.

        Once written, does a one-time read of the pid to confirm that we haven't raced another
        process starting.
        """

        # Write the pidfile. The SchedulerService will monitor it after a grace period.
        pid = os.getpid()
        self.write_pid(pid=pid)
        self.write_metadata_by_name("pantsd", self.FINGERPRINT_KEY,
                                    ensure_text(self.options_fingerprint))
コード例 #23
0
    def _read_object_from_repo(self, rev=None, relpath=None, sha=None):
        """Read an object from the git repo.
    This is implemented via a pipe to git cat-file --batch
    """
        if sha:
            spec = sha + b'\n'
        else:
            assert rev is not None
            assert relpath is not None
            rev = ensure_text(rev)
            relpath = ensure_text(relpath)
            relpath = self._fixup_dot_relative(relpath)
            spec = f'{rev}:{relpath}\n'.encode()

        self._maybe_start_cat_file_process()
        self._cat_file_process.stdin.write(spec)
        self._cat_file_process.stdin.flush()
        header = None
        while not header:
            header = self._cat_file_process.stdout.readline()
            if self._cat_file_process.poll() is not None:
                raise self.GitDiedException(
                    "Git cat-file died while trying to read '{}'.".format(
                        spec))

        header = header.rstrip()
        parts = header.rsplit(SPACE, 2)
        if len(parts) == 2:
            assert parts[1] == b'missing'
            raise self.MissingFileException(rev, relpath)

        _, object_type, object_len = parts

        # Read the object data
        blob = bytes(self._cat_file_process.stdout.read(int(object_len)))

        # Read the trailing newline
        assert self._cat_file_process.stdout.read(1) == b'\n'
        assert len(blob) == int(object_len)
        return object_type, blob
コード例 #24
0
ファイル: git.py プロジェクト: foursquare/pants
  def _read_object_from_repo(self, rev=None, relpath=None, sha=None):
    """Read an object from the git repo.
    This is implemented via a pipe to git cat-file --batch
    """
    if sha:
      spec = sha + b'\n'
    else:
      assert rev is not None
      assert relpath is not None
      rev = ensure_text(rev)
      relpath = ensure_text(relpath)
      relpath = self._fixup_dot_relative(relpath)
      spec = '{}:{}\n'.format(rev, relpath).encode('utf-8')

    self._maybe_start_cat_file_process()
    self._cat_file_process.stdin.write(spec)
    self._cat_file_process.stdin.flush()
    header = None
    while not header:
      header = self._cat_file_process.stdout.readline()
      if self._cat_file_process.poll() is not None:
        raise self.GitDiedException("Git cat-file died while trying to read '{}'.".format(spec))

    header = header.rstrip()
    parts = header.rsplit(SPACE, 2)
    if len(parts) == 2:
      assert parts[1] == b'missing'
      raise self.MissingFileException(rev, relpath)

    _, object_type, object_len = parts

    # Read the object data
    blob = bytes(self._cat_file_process.stdout.read(int(object_len)))

    # Read the trailing newline
    assert self._cat_file_process.stdout.read(1) == b'\n'
    assert len(blob) == int(object_len)
    return object_type, blob
コード例 #25
0
    def _write_repr(o, indent=False, level=0):
        pad = ' ' * 4 * level
        if indent:
            _write(pad)
        level += 1

        if isinstance(o, (bytes, str)):
            # The py2 repr of str (unicode) is `u'...'` and we don't want the `u` prefix; likewise,
            # the py3 repr of bytes is `b'...'` and we don't want the `b` prefix so we hand-roll a
            # repr here.
            if linesep in o:
                _write('"""{}"""'.format(
                    ensure_text(o.replace('"""', r'\"\"\"'))))
            else:
                _write("'{}'".format(ensure_text(o.replace("'", r"\'"))))
        elif isinstance(o, collections.Mapping):
            _write('{' + linesep)
            for k, v in o.items():
                _write_repr(k, indent=True, level=level)
                _write(': ')
                _write_repr(v, indent=False, level=level)
                _write(',' + linesep)
            _write(pad + '}')
        elif isinstance(o, collections.Iterable):
            if isinstance(o, collections.MutableSequence):
                open_collection, close_collection = '[]'
            elif isinstance(o, collections.Set):
                open_collection, close_collection = '{}'
            else:
                open_collection, close_collection = '()'

            _write(open_collection + linesep)
            for i in o:
                _write_repr(i, indent=True, level=level)
                _write(',' + linesep)
            _write(pad + close_collection)
        else:
            _write(repr(o))  # Numbers and bools.
コード例 #26
0
ファイル: pants_daemon.py プロジェクト: jsirois/pants
    def _initialize_metadata(self) -> None:
        """Writes out our pid and other metadata.

        Order matters a bit here, because technically all that is necessary to connect is the port,
        and Services are lazily initialized by the core when a connection is established. Our pid
        needs to be on disk before that happens.
        """

        # Write the pidfile. The SchedulerService will monitor it after a grace period.
        self.write_pid()
        self.write_process_name()
        self.write_fingerprint(ensure_text(self.options_fingerprint))
        self._logger.debug(f"pantsd running with PID: {self.pid}")
        self.write_socket(self._server.port())
コード例 #27
0
ファイル: dirutil.py プロジェクト: Gabriel439/pants
def safe_walk(path, **kwargs):
  """Just like os.walk, but ensures that the returned values are unicode objects.

    This isn't strictly safe, in that it is possible that some paths
    will not be decodeable, but that case is rare, and the only
    alternative is to somehow avoid all interaction between paths and
    unicode objects, which seems especially tough in the presence of
    unicode_literals. See e.g.
    https://mail.python.org/pipermail/python-dev/2008-December/083856.html

  """
  # If os.walk is given a text argument, it yields text values; if it
  # is given a binary argument, it yields binary values.
  return os.walk(ensure_text(path), **kwargs)
コード例 #28
0
def safe_walk(path, **kwargs):
    """Just like os.walk, but ensures that the returned values are unicode objects.

    This isn't strictly safe, in that it is possible that some paths
    will not be decodeable, but that case is rare, and the only
    alternative is to somehow avoid all interaction between paths and
    unicode objects, which seems especially tough in the presence of
    unicode_literals. See e.g.
    https://mail.python.org/pipermail/python-dev/2008-December/083856.html

  """
    # If os.walk is given a text argument, it yields text values; if it
    # is given a binary argument, it yields binary values.
    return os.walk(ensure_text(path), **kwargs)
コード例 #29
0
ファイル: setup_py.py プロジェクト: cosmicexplorer/pants
  def _write_repr(o, indent=False, level=0):
    pad = ' ' * 4 * level
    if indent:
      _write(pad)
    level += 1

    if isinstance(o, (bytes, str)):
      # The py2 repr of str (unicode) is `u'...'` and we don't want the `u` prefix; likewise,
      # the py3 repr of bytes is `b'...'` and we don't want the `b` prefix so we hand-roll a
      # repr here.
      if linesep in o:
        _write('"""{}"""'.format(ensure_text(o.replace('"""', r'\"\"\"'))))
      else:
        _write("'{}'".format(ensure_text(o.replace("'", r"\'"))))
    elif isinstance(o, Mapping):
      _write('{' + linesep)
      for k, v in o.items():
        _write_repr(k, indent=True, level=level)
        _write(': ')
        _write_repr(v, indent=False, level=level)
        _write(',' + linesep)
      _write(pad + '}')
    elif isinstance(o, Iterable):
      if isinstance(o, MutableSequence):
        open_collection, close_collection = '[]'
      elif isinstance(o, Set):
        open_collection, close_collection = '{}'
      else:
        open_collection, close_collection = '()'

      _write(open_collection + linesep)
      for i in o:
        _write_repr(i, indent=True, level=level)
        _write(',' + linesep)
      _write(pad + close_collection)
    else:
      _write(repr(o))  # Numbers and bools.
コード例 #30
0
  def test_all(self):
    test_dir = os.path.join(self.real_build_root,
                 'tests/python/pants_test/backend/jvm/tasks/reports/junit_html_report_resources')
    testsuites = JUnitHtmlReport().parse_xml_files(test_dir)
    self.assertEqual(7, len(testsuites))

    with temporary_dir() as output_dir:
      output_file = os.path.join(output_dir, 'junit-report.html')
      JUnitHtmlReport().report(test_dir, output_dir)
      self.assertTrue(os.path.exists(output_file))
      with open(output_file) as html_file:
        html_data = ensure_text(html_file.read())
        self.assertIn(u'</span>&nbsp;org.pantsbuild.PåssingTest', html_data)
        self.assertIn(u'</span>&nbsp;testTwö</td>', html_data)
        self.assertIn(u'at org.pantsbuild.PåssingTest.testTwö(ErrorTest.java:29)', html_data)
コード例 #31
0
    def test_all(self):
        testsuites = JUnitHtmlReport.create(
            self._JUNIT_XML_DIR)._parse_xml_files()
        self.assertEqual(7, len(testsuites))

        with temporary_dir() as output_dir:
            junit_html_report = JUnitHtmlReport.create(
                xml_dir=self._JUNIT_XML_DIR, open_report=True)
            with open(junit_html_report.report(output_dir)) as html_file:
                html_data = ensure_text(html_file.read())
                self.assertIn(u'</span>&nbsp;org.pantsbuild.PåssingTest',
                              html_data)
                self.assertIn(u'</span>&nbsp;testTwö</td>', html_data)
                self.assertIn(
                    u'at org.pantsbuild.PåssingTest.testTwö(ErrorTest.java:29)',
                    html_data)
コード例 #32
0
ファイル: plugin_resolver.py プロジェクト: thoward/pants
    def _resolve_exact_plugin_locations(self):
        hasher = hashlib.sha1()
        for req in sorted(self._plugin_requirements):
            hasher.update(req.encode('utf-8'))
        resolve_hash = hasher.hexdigest()
        resolved_plugins_list = os.path.join(
            self.plugin_cache_dir, 'plugins-{}.txt'.format(resolve_hash))

        if not os.path.exists(resolved_plugins_list):
            tmp_plugins_list = resolved_plugins_list + '~'
            with safe_open(tmp_plugins_list, 'w') as fp:
                for plugin in self._resolve_plugins():
                    fp.write(ensure_text(plugin.location))
                    fp.write('\n')
            os.rename(tmp_plugins_list, resolved_plugins_list)
        with open(resolved_plugins_list, 'r') as fp:
            for plugin_location in fp:
                yield plugin_location.strip()
コード例 #33
0
ファイル: pants_daemon.py プロジェクト: MEDIARITHMICS/pants
    def _initialize_pid(self):
        """Writes out our pid and metadata, and begin watching it for validity.

        Once written and watched, does a one-time read of the pid to confirm that we haven't raced
        another process starting.

        All services must already have been initialized before this is called.
        """

        # Write the pidfile.
        pid = os.getpid()
        self.write_pid(pid=pid)
        self.write_metadata_by_name("pantsd", self.FINGERPRINT_KEY,
                                    ensure_text(self.options_fingerprint))
        scheduler_services = [
            s for s in self._services.services
            if isinstance(s, SchedulerService)
        ]
        for scheduler_service in scheduler_services:
            scheduler_service.begin_monitoring_memory_usage(pid)

        # If we can, add the pidfile to watching via the scheduler.
        pidfile_absolute = self._metadata_file_path("pantsd", "pid")
        if pidfile_absolute.startswith(self._build_root):
            for scheduler_service in scheduler_services:
                scheduler_service.add_invalidation_glob(
                    os.path.relpath(pidfile_absolute, self._build_root))
        else:
            logging.getLogger(__name__).warning(
                "Not watching pantsd pidfile because subprocessdir is outside of buildroot. Having "
                "subprocessdir be a child of buildroot (as it is by default) may help avoid stray "
                "pantsd processes.")

        # Finally, once watched, confirm that we didn't race another process.
        try:
            with open(pidfile_absolute, "r") as f:
                pid_from_file = f.read()
        except IOError:
            raise Exception(
                f"Could not read pants pidfile at {pidfile_absolute}.")
        if int(pid_from_file) != os.getpid():
            raise Exception(
                f"Another instance of pantsd is running at {pid_from_file}")
コード例 #34
0
ファイル: custom_types.py プロジェクト: xeno-by/pants
    def create(cls, value):
        """Interpret value as either a list or something to extend another list with.

    Note that we accept tuple literals, but the internal value is always a list.

    :param value: The value to convert.  Can be an instance of ListValueComponent, a list, a tuple,
                  a string representation of a list or tuple (possibly prefixed by + or -
                  indicating modification instead of replacement), or any allowed member_type.
                  May also be a comma-separated sequence of modifications.
    :rtype: `ListValueComponent`
    """
        if isinstance(value, six.string_types):
            value = ensure_text(value)
            comma_separated_exprs = cls._split_modifier_expr(value)
            if len(comma_separated_exprs) > 1:
                return cls.merge(
                    [cls.create(x) for x in comma_separated_exprs])

        action = cls.MODIFY
        appends = []
        filters = []
        if isinstance(value, cls):  # Ensure idempotency.
            action = value._action
            appends = value._appends
            filters = value._filters
        elif isinstance(
                value,
            (list, tuple)):  # Ensure we can handle list-typed default values.
            action = cls.REPLACE
            appends = value
        elif value.startswith('[') or value.startswith('('):
            action = cls.REPLACE
            appends = _convert(value, (list, tuple))
        elif value.startswith('+[') or value.startswith('+('):
            appends = _convert(value[1:], (list, tuple))
        elif value.startswith('-[') or value.startswith('-('):
            filters = _convert(value[1:], (list, tuple))
        elif isinstance(value, six.string_types):
            appends = [value]
        else:
            appends = _convert('[{}]'.format(value), list)
        return cls(action, list(appends), list(filters))
コード例 #35
0
ファイル: custom_types.py プロジェクト: foursquare/pants
  def create(cls, value):
    """Interpret value as either a list or something to extend another list with.

    Note that we accept tuple literals, but the internal value is always a list.

    :param value: The value to convert.  Can be an instance of ListValueComponent, a list, a tuple,
                  a string representation of a list or tuple (possibly prefixed by + or -
                  indicating modification instead of replacement), or any allowed member_type.
                  May also be a comma-separated sequence of modifications.
    :rtype: `ListValueComponent`
    """
    if isinstance(value, six.string_types):
      value = ensure_text(value)
      comma_separated_exprs = cls._split_modifier_expr(value)
      if len(comma_separated_exprs) > 1:
        return cls.merge([cls.create(x) for x in comma_separated_exprs])

    action = cls.MODIFY
    appends = []
    filters = []
    if isinstance(value, cls):  # Ensure idempotency.
      action = value._action
      appends = value._appends
      filters = value._filters
    elif isinstance(value, (list, tuple)):  # Ensure we can handle list-typed default values.
      action = cls.REPLACE
      appends = value
    elif value.startswith('[') or value.startswith('('):
      action = cls.REPLACE
      appends = _convert(value, (list, tuple))
    elif value.startswith('+[') or value.startswith('+('):
      appends = _convert(value[1:], (list, tuple))
    elif value.startswith('-[') or value.startswith('-('):
      filters = _convert(value[1:], (list, tuple))
    elif isinstance(value, six.string_types):
      appends = [value]
    else:
      appends = _convert('[{}]'.format(value), list)
    return cls(action, list(appends), list(filters))
コード例 #36
0
    def _write_repr(o, indent=False, level=0):
        pad = " " * 4 * level
        if indent:
            _write(pad)
        level += 1

        if isinstance(o, (bytes, str)):
            # The py2 repr of str (unicode) is `u'...'` and we don't want the `u` prefix; likewise,
            # the py3 repr of bytes is `b'...'` and we don't want the `b` prefix so we hand-roll a
            # repr here.
            o_txt = ensure_text(o)
            if linesep in o_txt:
                _write('"""{}"""'.format(o_txt.replace('"""', r"\"\"\"")))
            else:
                _write("'{}'".format(o_txt.replace("'", r"\'")))
        elif isinstance(o, abc.Mapping):
            _write("{" + linesep)
            for k, v in o.items():
                _write_repr(k, indent=True, level=level)
                _write(": ")
                _write_repr(v, indent=False, level=level)
                _write("," + linesep)
            _write(pad + "}")
        elif isinstance(o, abc.Iterable):
            if isinstance(o, abc.MutableSequence):
                open_collection, close_collection = "[]"
            elif isinstance(o, abc.Set):
                open_collection, close_collection = "{}"
            else:
                open_collection, close_collection = "()"

            _write(open_collection + linesep)
            for i in o:
                _write_repr(i, indent=True, level=level)
                _write("," + linesep)
            _write(pad + close_collection)
        else:
            _write(repr(o))  # Numbers and bools.
コード例 #37
0
ファイル: custom_types.py プロジェクト: foursquare/pants
  def create(cls, value):
    """Interpret value as either a dict or something to extend another dict with.

    :param value: The value to convert.  Can be an instance of DictValueComponent, a dict,
                  or a string representation (possibly prefixed by +) of a dict.
    :rtype: `DictValueComponent`
    """
    if isinstance(value, six.string_types):
      value = ensure_text(value)
    if isinstance(value, cls):  # Ensure idempotency.
      action = value.action
      val = value.val
    elif isinstance(value, dict):  # Ensure we can handle dict-typed default values.
      action = cls.REPLACE
      val = value
    elif value.startswith('{'):
      action = cls.REPLACE
      val = _convert(value, dict)
    elif value.startswith('+{'):
      action = cls.EXTEND
      val = _convert(value[1:], dict)
    else:
      raise ParseError('Invalid dict value: {}'.format(value))
    return cls(action, dict(val))
コード例 #38
0
    def _initialize_pid(self):
        """Writes out our pid and metadata.

        Once written, does a one-time read of the pid to confirm that we haven't raced another
        process starting.
        """

        # Write the pidfile.
        pid = os.getpid()
        self.write_pid(pid=pid)
        self.write_metadata_by_name("pantsd", self.FINGERPRINT_KEY,
                                    ensure_text(self.options_fingerprint))
        pidfile_absolute = self._metadata_file_path("pantsd", "pid")

        # Finally, once watched, confirm that we didn't race another process.
        try:
            with open(pidfile_absolute, "r") as f:
                pid_from_file = f.read()
        except IOError:
            raise Exception(
                f"Could not read pants pidfile at {pidfile_absolute}.")
        if int(pid_from_file) != os.getpid():
            raise Exception(
                f"Another instance of pantsd is running at {pid_from_file}")
コード例 #39
0
ファイル: test_strutil.py プロジェクト: cosmicexplorer/pants
 def test_ensure_text(self):
   bytes_val = bytes(bytearray([0xe5, 0xbf, 0xab]))
   self.assertEqual(u'快', ensure_text(bytes_val))
   with self.assertRaises(TypeError):
     ensure_text(45)
コード例 #40
0
ファイル: jar_publish.py プロジェクト: caveness/pants
 def changelog(self, target, sha):
   # Filter synthetic files.
   files = filter(lambda filename: not filename.startswith(os.pardir), target.sources_relative_to_buildroot())
   return ensure_text(self.scm.changelog(from_commit=sha, files=files))
コード例 #41
0
 def test_ensure_text(self):
     bytes_val = bytes(bytearray([0xE5, 0xBF, 0xAB]))
     self.assertEquals("快", ensure_text(bytes_val))
     with self.assertRaises(TypeError):
         ensure_text(45)
コード例 #42
0
 def filecontent_for(path):
   return FileContent(ensure_text(path), read_file(path, binary_mode=True))
コード例 #43
0
ファイル: setup_py.py プロジェクト: cosmicexplorer/pants
 def _write(data):
   output.write(ensure_text(data))
コード例 #44
0
 def visit_Str(self, node) -> None:
     val = ensure_text(node.s)
     self.maybe_add_inferred_import(val)
コード例 #45
0
ファイル: jar_publish.py プロジェクト: dominichamon/pants
 def changelog(self, target, sha):
   return ensure_text(self.scm.changelog(from_commit=sha,
                                         files=target.sources_relative_to_buildroot()))
コード例 #46
0
 def test_ensure_text(self) -> None:
     bytes_val = bytes(bytearray([0xE5, 0xBF, 0xAB]))
     self.assertEqual("快", ensure_text(bytes_val))
     with self.assertRaises(TypeError):
         ensure_text(
             45)  # type: ignore[arg-type] # intended to fail type check
コード例 #47
0
ファイル: options_bootstrapper.py プロジェクト: gatesn/pants
 def filecontent_for(path: str) -> FileContent:
     return FileContent(
         ensure_text(path),
         read_file(path, binary_mode=True),
     )
コード例 #48
0
ファイル: parsers.py プロジェクト: tpasternak/pants
    def parse(self, filepath, filecontent):
        """Parse the given json encoded string into a list of top-level objects found.

    The parser accepts both blank lines and comment lines (those beginning with optional whitespace
    followed by the '#' character) as well as more than one top-level JSON object.

    The parse also supports a simple protocol for serialized types that have an `_asdict` method.
    This includes `namedtuple` subtypes as well as any custom class with an `_asdict` method defined;
    see :class:`pants.engine.serializable.Serializable`.
    """
        json = ensure_text(filecontent)

        decoder = self._decoder

        # Strip comment lines and blank lines, which we allow, but preserve enough information about the
        # stripping to constitute a reasonable error message that can be used to find the portion of the
        # JSON document containing the error.

        def non_comment_line(l):
            stripped = l.lstrip()
            return stripped if (stripped
                                and not stripped.startswith('#')) else None

        offset = 0
        objects = []
        while True:
            lines = json[offset:].splitlines()
            if not lines:
                break

            # Strip whitespace and comment lines preceding the next JSON object.
            while True:
                line = non_comment_line(lines[0])
                if not line:
                    comment_line = lines.pop(0)
                    offset += len(comment_line) + 1
                elif line.startswith('{') or line.startswith('['):
                    # Account for leading space in this line that starts off the JSON object.
                    offset += len(lines[0]) - len(line)
                    break
                else:
                    raise ParseError(f'Unexpected json line:\n{lines[0]}')

            lines = json[offset:].splitlines()
            if not lines:
                break

            # Prepare the JSON blob for parsing - strip blank and comment lines recording enough information
            # To reconstitute original offsets after the parse.
            comment_lines = []
            non_comment_lines = []
            for line_number, line in enumerate(lines):
                if non_comment_line(line):
                    non_comment_lines.append(line)
                else:
                    comment_lines.append((line_number, line))

            data = '\n'.join(non_comment_lines)
            try:
                obj, idx = decoder.raw_decode(data)
                objects.append(obj)
                if idx >= len(data):
                    break
                offset += idx

                # Add back in any parsed blank or comment line offsets.
                parsed_line_count = len(data[:idx].splitlines())
                for line_number, line in comment_lines:
                    if line_number >= parsed_line_count:
                        break
                    offset += len(line) + 1
                    parsed_line_count += 1
            except ValueError as e:
                json_lines = data.splitlines()
                col_width = len(str(len(json_lines)))

                col_padding = ' ' * col_width

                def format_line(line):
                    return f'{col_padding}  {line}'

                header_lines = [
                    format_line(line) for line in json[:offset].splitlines()
                ]

                formatted_json_lines = [
                    ('{line_number:{col_width}}: {line}'.format(
                        col_width=col_width,
                        line_number=line_number,
                        line=line))
                    for line_number, line in enumerate(json_lines, start=1)
                ]

                for line_number, line in comment_lines:
                    formatted_json_lines.insert(line_number, format_line(line))

                raise ParseError(
                    '{error}\nIn document at {filepath}:\n{json_data}'.format(
                        error=e,
                        filepath=filepath,
                        json_data='\n'.join(header_lines +
                                            formatted_json_lines)))

        return objects
コード例 #49
0
ファイル: parsers.py プロジェクト: cosmicexplorer/pants
  def parse(self, filepath, filecontent):
    """Parse the given json encoded string into a list of top-level objects found.

    The parser accepts both blank lines and comment lines (those beginning with optional whitespace
    followed by the '#' character) as well as more than one top-level JSON object.

    The parse also supports a simple protocol for serialized types that have an `_asdict` method.
    This includes `namedtuple` subtypes as well as any custom class with an `_asdict` method defined;
    see :class:`pants.engine.serializable.Serializable`.
    """
    json = ensure_text(filecontent)

    decoder = self._decoder

    # Strip comment lines and blank lines, which we allow, but preserve enough information about the
    # stripping to constitute a reasonable error message that can be used to find the portion of the
    # JSON document containing the error.

    def non_comment_line(l):
      stripped = l.lstrip()
      return stripped if (stripped and not stripped.startswith('#')) else None

    offset = 0
    objects = []
    while True:
      lines = json[offset:].splitlines()
      if not lines:
        break

      # Strip whitespace and comment lines preceding the next JSON object.
      while True:
        line = non_comment_line(lines[0])
        if not line:
          comment_line = lines.pop(0)
          offset += len(comment_line) + 1
        elif line.startswith('{') or line.startswith('['):
          # Account for leading space in this line that starts off the JSON object.
          offset += len(lines[0]) - len(line)
          break
        else:
          raise ParseError('Unexpected json line:\n{}'.format(lines[0]))

      lines = json[offset:].splitlines()
      if not lines:
        break

      # Prepare the JSON blob for parsing - strip blank and comment lines recording enough information
      # To reconstitute original offsets after the parse.
      comment_lines = []
      non_comment_lines = []
      for line_number, line in enumerate(lines):
        if non_comment_line(line):
          non_comment_lines.append(line)
        else:
          comment_lines.append((line_number, line))

      data = '\n'.join(non_comment_lines)
      try:
        obj, idx = decoder.raw_decode(data)
        objects.append(obj)
        if idx >= len(data):
          break
        offset += idx

        # Add back in any parsed blank or comment line offsets.
        parsed_line_count = len(data[:idx].splitlines())
        for line_number, line in comment_lines:
          if line_number >= parsed_line_count:
            break
          offset += len(line) + 1
          parsed_line_count += 1
      except ValueError as e:
        json_lines = data.splitlines()
        col_width = len(str(len(json_lines)))

        col_padding = ' ' * col_width

        def format_line(line):
          return '{col_padding}  {line}'.format(col_padding=col_padding, line=line)

        header_lines = [format_line(line) for line in json[:offset].splitlines()]

        formatted_json_lines = [('{line_number:{col_width}}: {line}'
                                .format(col_width=col_width, line_number=line_number, line=line))
                                for line_number, line in enumerate(json_lines, start=1)]

        for line_number, line in comment_lines:
          formatted_json_lines.insert(line_number, format_line(line))

        raise ParseError('{error}\nIn document at {filepath}:\n{json_data}'
                        .format(error=e,
                                filepath=filepath,
                                json_data='\n'.join(header_lines + formatted_json_lines)))

    return objects
コード例 #50
0
ファイル: zinc_compile.py プロジェクト: cosmicexplorer/pants
  def compile(self, ctx, args, dependency_classpath, upstream_analysis,
              settings, compiler_option_sets, zinc_file_manager,
              javac_plugin_map, scalac_plugin_map):
    absolute_classpath = (ctx.classes_dir.path,) + tuple(ce.path for ce in dependency_classpath)

    if self.get_options().capture_classpath:
      self._record_compile_classpath(absolute_classpath, ctx.target, ctx.classes_dir.path)

    self._verify_zinc_classpath(absolute_classpath, allow_dist=(self.execution_strategy != self.HERMETIC))
    # TODO: Investigate upstream_analysis for hermetic compiles
    self._verify_zinc_classpath(upstream_analysis.keys())

    def relative_to_exec_root(path):
      # TODO: Support workdirs not nested under buildroot by path-rewriting.
      return fast_relpath(path, get_buildroot())

    classes_dir = ctx.classes_dir.path
    analysis_cache = ctx.analysis_file

    analysis_cache = relative_to_exec_root(analysis_cache)
    classes_dir = relative_to_exec_root(classes_dir)
    # TODO: Have these produced correctly, rather than having to relativize them here
    relative_classpath = tuple(relative_to_exec_root(c) for c in absolute_classpath)

    # list of classpath entries
    scalac_classpath_entries = self.scalac_classpath_entries()
    scala_path = [classpath_entry.path for classpath_entry in scalac_classpath_entries]

    zinc_args = []
    zinc_args.extend([
      '-log-level', self.get_options().level,
      '-analysis-cache', analysis_cache,
      '-classpath', ':'.join(relative_classpath),
      '-d', classes_dir,
    ])
    if not self.get_options().colors:
      zinc_args.append('-no-color')

    compiler_bridge_classpath_entry = self._zinc.compile_compiler_bridge(self.context)
    zinc_args.extend(['-compiled-bridge-jar', relative_to_exec_root(compiler_bridge_classpath_entry.path)])
    zinc_args.extend(['-scala-path', ':'.join(scala_path)])

    zinc_args.extend(self._javac_plugin_args(javac_plugin_map))
    # Search for scalac plugins on the classpath.
    # Note that:
    # - We also search in the extra scalac plugin dependencies, if specified.
    # - In scala 2.11 and up, the plugin's classpath element can be a dir, but for 2.10 it must be
    #   a jar.  So in-repo plugins will only work with 2.10 if --use-classpath-jars is true.
    # - We exclude our own classes_dir/jar_file, because if we're a plugin ourselves, then our
    #   classes_dir doesn't have scalac-plugin.xml yet, and we don't want that fact to get
    #   memoized (which in practice will only happen if this plugin uses some other plugin, thus
    #   triggering the plugin search mechanism, which does the memoizing).
    scalac_plugin_search_classpath = (
      (set(absolute_classpath) | set(self.scalac_plugin_classpath_elements())) -
      {ctx.classes_dir.path, ctx.jar_file.path}
    )
    zinc_args.extend(self._scalac_plugin_args(scalac_plugin_map, scalac_plugin_search_classpath))
    if upstream_analysis:
      zinc_args.extend(['-analysis-map',
                        ','.join('{}:{}'.format(
                          relative_to_exec_root(k),
                          relative_to_exec_root(v)
                        ) for k, v in upstream_analysis.items())])

    zinc_args.extend(args)
    zinc_args.extend(self._get_zinc_arguments(settings))
    zinc_args.append('-transactional')

    compiler_option_sets_args = self.get_merged_args_for_compiler_option_sets(compiler_option_sets)
    zinc_args.extend(compiler_option_sets_args)

    if not self._clear_invalid_analysis:
      zinc_args.append('-no-clear-invalid-analysis')

    if not zinc_file_manager:
      zinc_args.append('-no-zinc-file-manager')

    jvm_options = []

    if self.javac_classpath():
      # Make the custom javac classpath the first thing on the bootclasspath, to ensure that
      # it's the one javax.tools.ToolProvider.getSystemJavaCompiler() loads.
      # It will probably be loaded even on the regular classpath: If not found on the bootclasspath,
      # getSystemJavaCompiler() constructs a classloader that loads from the JDK's tools.jar.
      # That classloader will first delegate to its parent classloader, which will search the
      # regular classpath.  However it's harder to guarantee that our javac will preceed any others
      # on the classpath, so it's safer to prefix it to the bootclasspath.
      jvm_options.extend(['-Xbootclasspath/p:{}'.format(':'.join(self.javac_classpath()))])

    jvm_options.extend(self._jvm_options)

    zinc_args.extend(ctx.sources)

    self.log_zinc_file(ctx.analysis_file)
    with open(ctx.zinc_args_file, 'w') as fp:
      for arg in zinc_args:
        # NB: in Python 2, options are stored sometimes as bytes and sometimes as unicode in the OptionValueContainer.
        # This is due to how Python 2 natively stores attributes as a map of `str` (aka `bytes`) to their value. So,
        # the setattr() and getattr() functions sometimes use bytes.
        if PY2:
          arg = ensure_text(arg)
        fp.write(arg)
        fp.write('\n')

    return self.execution_strategy_enum.resolve_for_enum_variant({
      self.HERMETIC: lambda: self._compile_hermetic(
        jvm_options, ctx, classes_dir, zinc_args, compiler_bridge_classpath_entry,
        dependency_classpath, scalac_classpath_entries),
      self.SUBPROCESS: lambda: self._compile_nonhermetic(jvm_options, zinc_args),
      self.NAILGUN: lambda: self._compile_nonhermetic(jvm_options, zinc_args),
    })()