Example #1
0
  def test_sibling_references(self):
    with temporary_dir() as root_dir:
      buildfile = create_buildfile(root_dir, 'a', name='BUILD',
        content=dedent("""
          dependencies(name='util',
            dependencies=[
              jar(org='com.twitter', name='util', rev='0.0.1')
            ]
          )
        """).strip()
      )
      sibling = create_buildfile(root_dir, 'a', name='BUILD.sibling',
        content=dedent("""
          dependencies(name='util-ex',
            dependencies=[
              pants(':util'),
              jar(org='com.twitter', name='util-ex', rev='0.0.1')
            ]
          )
        """).strip()
      )
      ParseContext(buildfile).parse()

      utilex = Target.get(Address.parse(root_dir, 'a:util-ex', is_relative=False))
      utilex_deps = set(utilex.resolve())

      util = Target.get(Address.parse(root_dir, 'a:util', is_relative=False))
      util_deps = set(util.resolve())

      self.assertEquals(util_deps, util_deps.intersection(utilex_deps))
Example #2
0
 def parse_jarcoordinate(coordinate):
     components = coordinate.split('#', 1)
     if len(components) == 2:
         org, name = components
         return org, name
     else:
         try:
             # TODO(Eric Ayers) This code is suspect.  Target.get() is a very old method and almost certainly broken.
             # Refactor to use methods from BuildGraph or BuildFileAddressMapper
             address = Address.parse(get_buildroot(), coordinate)
             target = Target.get(address)
             if not target:
                 siblings = Target.get_all_addresses(address.build_file)
                 prompt = 'did you mean' if len(
                     siblings) == 1 else 'maybe you meant one of these'
                 raise TaskError('%s => %s?:\n    %s' %
                                 (address, prompt, '\n    '.join(
                                     str(a) for a in siblings)))
             if not target.is_exported:
                 raise TaskError('%s is not an exported target' %
                                 coordinate)
             return target.provides.org, target.provides.name
         except (BuildFile.BuildFileError,
                 BuildFileParser.BuildFileParserError,
                 AddressLookupError) as e:
             raise TaskError(
                 '{message}\n  Problem with BUILD file  at {coordinate}'
                 .format(message=e, coordinate=coordinate))
Example #3
0
 def parse_jarcoordinate(coordinate):
     components = coordinate.split('#', 1)
     if len(components) == 2:
         org, name = components
         return org, name
     else:
         try:
             address = Address.parse(get_buildroot(), coordinate)
             try:
                 target = Target.get(address)
                 if not target:
                     siblings = Target.get_all_addresses(
                         address.buildfile)
                     prompt = 'did you mean' if len(
                         siblings
                     ) == 1 else 'maybe you meant one of these'
                     raise TaskError('%s => %s?:\n    %s' %
                                     (address, prompt, '\n    '.join(
                                         str(a) for a in siblings)))
                 if not target.is_exported:
                     raise TaskError('%s is not an exported target' %
                                     coordinate)
                 return target.provides.org, target.provides.name
             except (ImportError, SyntaxError, TypeError):
                 raise TaskError('Failed to parse %s' %
                                 address.buildfile.relpath)
         except IOError:
             raise TaskError('No BUILD file could be found at %s' %
                             coordinate)
Example #4
0
  def __init__(self, name, sources=None, exclusives=None):
    Target.__init__(self, name, exclusives=exclusives)

    self.add_labels('sources')
    self.target_base = SourceRoot.find(self)
    self._unresolved_sources = sources or []
    self._resolved_sources = None
Example #5
0
 def _find_targets(self):
   if len(self.context.target_roots) > 0:
     for target in self.context.target_roots:
       yield target
   else:
     for buildfile in BuildFile.scan_buildfiles(get_buildroot()):
       target_addresses = Target.get_all_addresses(buildfile)
       for target_address in target_addresses:
         yield Target.get(target_address)
Example #6
0
 def __init__(self, name, url_builder, exclusives=None):
   """
   :param string name: The name of this target, which combined with this
     build file defines the target :class:`pants.base.address.Address`.
   :param url_builder: Function that accepts a page target and an optional wiki config dict.
   :returns: A tuple of (alias, fully qualified url).
   """
   Target.__init__(self, name, exclusives=exclusives)
   self.url_builder = url_builder
Example #7
0
 def _walk(self, walked, work, predicate=None):
   Target._walk(self, walked, work, predicate)
   for dep in self.dependencies:
     if isinstance(dep, Target) and not dep in walked:
       walked.add(dep)
       if not predicate or predicate(dep):
         additional_targets = work(dep)
         dep._walk(walked, work, predicate)
         if additional_targets:
           for additional_target in additional_targets:
             additional_target._walk(walked, work, predicate)
Example #8
0
 def __init__(self, name, username=None, password=None,
              exclusives=None):
   """
   :param string name: The name of these credentials.
   :param username: Either a constant username value or else a callable that can fetch one.
   :type username: string or callable
   :param password: Either a constant password value or else a callable that can fetch one.
   :type password: string or callable
   """
   Target.__init__(self, name, exclusives=exclusives)
   self._username = username if callable(username) else lambda: username
   self._password = password if callable(password) else lambda: password
Example #9
0
 def _parse_addresses(self, spec):
   if spec.endswith('::'):
     dir = self._get_dir(spec[:-len('::')])
     for buildfile in BuildFile.scan_buildfiles(self._root_dir, os.path.join(self._root_dir, dir)):
       for address in Target.get_all_addresses(buildfile):
         yield address
   elif spec.endswith(':'):
     dir = self._get_dir(spec[:-len(':')])
     for address in Target.get_all_addresses(BuildFile(self._root_dir, dir)):
       yield address
   else:
     yield Address.parse(self._root_dir, spec)
Example #10
0
 def __init__(self, requirement, name=None, repository=None, version_filter=None, use_2to3=False,
              compatibility=None, exclusives=None):
   # TODO(wickman) Allow PythonRequirements to be specified using pip-style vcs or url identifiers,
   # e.g. git+https or just http://...
   self._requirement = Requirement.parse(requirement)
   self._repository = repository
   self._name = name or self._requirement.project_name
   self._use_2to3 = use_2to3
   self._version_filter = version_filter or (lambda py, pl: True)
   # TODO(wickman) Unify this with PythonTarget .compatibility
   self.compatibility = compatibility or ['']
   Target.__init__(self, self._name, exclusives=exclusives)
Example #11
0
  def _owning_targets(self, path):
    for build_file in self._candidate_owners(path):
      is_build_file = (build_file.full_path == os.path.join(get_buildroot(), path))
      for address in Target.get_all_addresses(build_file):
        target = Target.get(address)

        # A synthesized target can never own permanent files on disk
        if target != target.derived_from:
          # TODO(John Sirois): tighten up the notion of targets written down in a BUILD by a user
          # vs. targets created by pants at runtime.
          continue

        if target and (is_build_file or ((target.has_sources() or target.has_resources)
                                         and self._owns(target, path))):
          yield target
Example #12
0
  def __init__(self, context):
    ConsoleTask.__init__(self, context)

    self._print_uptodate = context.options.check_deps_print_uptodate
    self.repos = context.config.getdict('jar-publish', 'repos')
    self._artifacts_to_targets = {}
    all_addresses = (address for buildfile in BuildFile.scan_buildfiles(get_buildroot())
                     for address in Target.get_all_addresses(buildfile))
    for address in all_addresses:
      target = Target.get(address)
      if target.is_exported:
        provided_jar, _, _ = target.get_artifact_info()
        artifact = (provided_jar.org, provided_jar.name)
        if not artifact in self._artifacts_to_targets:
          self._artifacts_to_targets[artifact] = target
Example #13
0
  def _maybe_emit_coverage_data(self, targets, chroot, pex, stdout, stderr):
    coverage = os.environ.get('PANTS_PY_COVERAGE')
    if coverage is None:
      yield []
      return

    def read_coverage_list(prefix):
      return coverage[len(prefix):].split(',')

    coverage_modules = None
    if coverage.startswith('modules:'):
      # NB: pytest-cov maps these modules to the `[run] sources` config.  So for
      # `modules:pants.base,pants.util` the config emitted has:
      # [run]
      # source =
      #   pants.base
      #   pants.util
      #
      # Now even though these are not paths, coverage sees the dots and switches to a module
      # prefix-matching mode.  Unfortunately, neither wildcards nor top-level module prefixes
      # like `pants.` serve to engage this module prefix-matching as one might hope.  It
      # appears that `pants.` is treated as a path and `pants.*` is treated as a literal
      # module prefix name.
      coverage_modules = read_coverage_list('modules:')
    elif coverage.startswith('paths:'):
      coverage_modules = []
      for path in read_coverage_list('paths:'):
        if not os.path.exists(path) and not os.path.isabs(path):
          # Look for the source in the PEX chroot since its not available from CWD.
          path = os.path.join(chroot, path)
        coverage_modules.append(path)

    with self._cov_setup(targets,
                         chroot,
                         coverage_modules=coverage_modules) as (args, coverage_rc):
      try:
        yield args
      finally:
        with environment_as(PEX_MODULE='coverage.cmdline:main'):
          # Normalize .coverage.raw paths using combine and `paths` config in the rc file.
          # This swaps the /tmp pex chroot source paths for the local original source paths
          # the pex was generated from and which the user understands.
          shutil.move('.coverage', '.coverage.raw')
          pex.run(args=['combine', '--rcfile', coverage_rc], stdout=stdout, stderr=stderr)

          pex.run(args=['report', '-i', '--rcfile', coverage_rc], stdout=stdout, stderr=stderr)

          # TODO(wickman): If coverage is enabled and we are not using fast mode, write an
          # intermediate .html that points to each of the coverage reports generated and
          # webbrowser.open to that page.
          # TODO(John Sirois): Possibly apply the same logic to the console report.  In fact,
          # consider combining coverage files from all runs in this Tasks's execute and then
          # producing just 1 console and 1 html report whether or not the tests are run in fast
          # mode.
          relpath = Target.maybe_readable_identify(targets)
          pants_distdir = Config.from_cache().getdefault('pants_distdir')
          target_dir = os.path.join(pants_distdir, 'coverage', relpath)
          safe_mkdir(target_dir)
          pex.run(args=['html', '-i', '--rcfile', coverage_rc, '-d', target_dir],
                  stdout=stdout, stderr=stderr)
Example #14
0
  def execute(self, **pex_run_kwargs):
    (accept_predicate, reject_predicate) = Target.lang_discriminator('python')
    targets = self.require_homogeneous_targets(accept_predicate, reject_predicate)
    if targets:
      # We can't throw if the target isn't a python target, because perhaps we were called on a
      # JVM target, in which case we have to no-op and let scala repl do its thing.
      # TODO(benjy): Some more elegant way to coordinate how tasks claim targets.
      interpreter = self.select_interpreter_for_targets(targets)

      extra_requirements = []
      if self.get_options().ipython:
        entry_point = self.get_options().ipython_entry_point
        for req in self.get_options().ipython_requirements:
          extra_requirements.append(PythonRequirement(req))
      else:
        entry_point = 'code:interact'

      pex_info = PexInfo.default()
      pex_info.entry_point = entry_point
      with self.temporary_chroot(interpreter=interpreter,
                                 pex_info=pex_info,
                                 targets=targets,
                                 platforms=None,
                                 extra_requirements=extra_requirements) as chroot:
        pex = chroot.pex()
        self.context.release_lock()
        with stty_utils.preserve_stty_settings():
          with self.context.new_workunit(name='run', labels=[WorkUnit.RUN]):
            po = pex.run(blocking=False, **pex_run_kwargs)
            try:
              return po.wait()
            except KeyboardInterrupt:
              pass
Example #15
0
 def resolve(self):
   # De-reference this pants pointer to an actual parsed target.
   resolved = Target.get(self.address)
   if not resolved:
     raise TargetDefinitionException(self, '%s%s' % (self._DEFINITION_ERROR_MSG, self.address))
   for dep in resolved.resolve():
     yield dep
Example #16
0
    def execute(self):
        (accept_predicate,
         reject_predicate) = Target.lang_discriminator('java')
        targets = self.require_homogeneous_targets(accept_predicate,
                                                   reject_predicate)
        if targets:
            tools_classpath = self.tool_classpath('scala-repl')
            self.context.release_lock()
            with preserve_stty_settings():
                classpath = self.classpath(targets, cp=tools_classpath)

                # The scala repl requires -Dscala.usejavacp=true since Scala 2.8 when launching in the way
                # we do here (not passing -classpath as a program arg to scala.tools.nsc.MainGenericRunner).
                jvm_options = self.jvm_options
                if not any(
                        opt.startswith('-Dscala.usejavacp=')
                        for opt in jvm_options):
                    jvm_options.append('-Dscala.usejavacp=true')

                print('')  # Start REPL output on a new line.
                try:
                    # NOTE: We execute with no workunit, as capturing REPL output makes it very sluggish.
                    execute_java(classpath=classpath,
                                 main=self.get_options().main,
                                 jvm_options=jvm_options,
                                 args=self.args)
                except KeyboardInterrupt:
                    # TODO(John Sirois): Confirm with Steve Gury that finally does not work on mac and an
                    # explicit catch of KeyboardInterrupt is required.
                    pass
Example #17
0
 def test_validation(self):
     target = Target(name='mybird',
                     address=SyntheticAddress.parse('//:mybird'),
                     build_graph=self.build_graph)
     # jars attribute must contain only JarLibrary instances
     with self.assertRaises(TargetDefinitionException):
         JarLibrary(name="test", jars=[target])
Example #18
0
 def identify(self, targets):
     targets = list(targets)
     if len(targets) == 1 and hasattr(targets[0],
                                      'provides') and targets[0].provides:
         return targets[0].provides.org, targets[0].provides.name
     else:
         return 'internal', Target.maybe_readable_identify(targets)
Example #19
0
  def execute(self):
    (accept_predicate, reject_predicate) = Target.lang_discriminator('java')
    targets = self.require_homogeneous_targets(accept_predicate, reject_predicate)
    if targets:
      tools_classpath = self.tool_classpath('scala-repl')
      self.context.release_lock()
      with preserve_stty_settings():
        classpath = self.classpath(targets, cp=tools_classpath)

        # The scala repl requires -Dscala.usejavacp=true since Scala 2.8 when launching in the way
        # we do here (not passing -classpath as a program arg to scala.tools.nsc.MainGenericRunner).
        jvm_options = self.jvm_options
        if not any(opt.startswith('-Dscala.usejavacp=') for opt in jvm_options):
          jvm_options.append('-Dscala.usejavacp=true')

        print('')  # Start REPL output on a new line.
        try:
          # NOTE: We execute with no workunit, as capturing REPL output makes it very sluggish.
          DistributionLocator.cached().execute_java(classpath=classpath,
                                                    main=self.get_options().main,
                                                    jvm_options=jvm_options,
                                                    args=self.args)
        except KeyboardInterrupt:
          # TODO(John Sirois): Confirm with Steve Gury that finally does not work on mac and an
          # explicit catch of KeyboardInterrupt is required.
          pass
Example #20
0
  def execute(self, **pex_run_kwargs):
    (accept_predicate, reject_predicate) = Target.lang_discriminator('python')
    targets = self.require_homogeneous_targets(accept_predicate, reject_predicate)
    if targets:
      # We can't throw if the target isn't a python target, because perhaps we were called on a
      # JVM target, in which case we have to no-op and let scala repl do its thing.
      # TODO(benjy): Some more elegant way to coordinate how tasks claim targets.
      interpreter = self.select_interpreter_for_targets(targets)

      extra_requirements = []
      if self.get_options().ipython:
        entry_point = self.get_options().ipython_entry_point
        for req in self.get_options().ipython_requirements:
          extra_requirements.append(PythonRequirement(req))
      else:
        entry_point = 'code:interact'

      pex_info = PexInfo.default()
      pex_info.entry_point = entry_point
      with self.cached_chroot(interpreter=interpreter,
                              pex_info=pex_info,
                              targets=targets,
                              platforms=None,
                              extra_requirements=extra_requirements) as chroot:
        pex = chroot.pex()
        self.context.release_lock()
        with stty_utils.preserve_stty_settings():
          with self.context.new_workunit(name='run', labels=[WorkUnitLabel.RUN]):
            po = pex.run(blocking=False, **pex_run_kwargs)
            try:
              return po.wait()
            except KeyboardInterrupt:
              pass
Example #21
0
    def configure_project(self, targets, checkstyle_suppression_files,
                          debug_port):

        jvm_targets = Target.extract_jvm_targets(targets)
        if self.intransitive:
            jvm_targets = set(
                self.context.target_roots).intersection(jvm_targets)
        project = Project(self.project_name,
                          self.python, self.skip_java, self.skip_scala,
                          get_buildroot(), checkstyle_suppression_files,
                          debug_port, jvm_targets, not self.intransitive,
                          self.context.new_workunit)

        if self.python:
            python_source_paths = self.context.config.getlist(
                'ide', 'python_source_paths', default=[])
            python_test_paths = self.context.config.getlist(
                'ide', 'python_test_paths', default=[])
            python_lib_paths = self.context.config.getlist('ide',
                                                           'python_lib_paths',
                                                           default=[])
            project.configure_python(python_source_paths, python_test_paths,
                                     python_lib_paths)

        extra_source_paths = self.context.config.getlist(
            'ide', 'extra_jvm_source_paths', default=[])
        extra_test_paths = self.context.config.getlist('ide',
                                                       'extra_jvm_test_paths',
                                                       default=[])
        all_targets = project.configure_jvm(extra_source_paths,
                                            extra_test_paths)
        return all_targets, project
Example #22
0
File: paths.py Project: kn/pants
    def _find_path(cls, from_target, to_target, log):
        from_target, to_target = cls._coerce_to_targets(from_target, to_target)

        log.debug("Looking for path from %s to %s" % (from_target.address.reference(), to_target.address.reference()))

        queue = [([from_target], 0)]
        while True:
            if not queue:
                print("no path found from %s to %s!" % (from_target.address.reference(), to_target.address.reference()))
                break

            path, indent = queue.pop(0)
            next_target = path[-1]
            if next_target in cls.examined_targets:
                continue
            cls.examined_targets.add(next_target)

            log.debug("%sexamining %s" % ("  " * indent, next_target))

            if next_target == to_target:
                print("")
                for target in path:
                    print("%s" % target.address.reference())
                break

            if hasattr(next_target, "dependency_addresses"):
                for address in next_target.dependency_addresses:
                    dep = Target.get(address)
                    queue.append((path + [dep], indent + 1))
Example #23
0
    def combine_cache_keys(cache_keys):
        """Returns a cache key for a list of target sets that already have cache keys.

    This operation is 'idempotent' in the sense that if cache_keys contains a single key
    then that key is returned.

    Note that this operation is commutative but not associative.  We use the term 'combine' rather
    than 'merge' or 'union' to remind the user of this. Associativity is not a necessary property,
    in practice.
    """
        if len(cache_keys) == 1:
            return cache_keys[0]
        else:
            combined_id = Target.maybe_readable_combine_ids(
                cache_key.id for cache_key in cache_keys)
            combined_hash = hash_all(
                sorted(cache_key.hash for cache_key in cache_keys))
            combined_payloads = sorted(
                list(
                    itertools.chain(
                        *[cache_key.payloads for cache_key in cache_keys])))
            summed_chunking_units = sum(
                [cache_key.num_chunking_units for cache_key in cache_keys])
            return CacheKey(combined_id, combined_hash, summed_chunking_units,
                            combined_payloads)
Example #24
0
    def execute(self):
        (accept_predicate,
         reject_predicate) = Target.lang_discriminator('java')
        targets = self.require_homogeneous_targets(accept_predicate,
                                                   reject_predicate)
        if targets:
            tools_classpath = self.tool_classpath(self._bootstrap_key)
            self.context.release_lock()
            with preserve_stty_settings():
                exclusives_classpath = self.get_base_classpath_for_target(
                    targets[0])
                classpath = self.classpath(
                    tools_classpath,
                    confs=self.confs,
                    exclusives_classpath=exclusives_classpath)

                print('')  # Start REPL output on a new line.
                try:
                    # NOTE: We execute with no workunit, as capturing REPL output makes it very sluggish.
                    execute_java(classpath=classpath,
                                 main=self.main,
                                 jvm_options=self.jvm_options,
                                 args=self.args)
                except KeyboardInterrupt:
                    # TODO(John Sirois): Confirm with Steve Gury that finally does not work on mac and an
                    # explicit catch of KeyboardInterrupt is required.
                    pass
Example #25
0
 def identify(targets):
     targets = list(targets)
     if len(targets) == 1 and targets[0].is_jvm and getattr(
             targets[0], 'provides', None):
         return targets[0].provides.org, targets[0].provides.name
     else:
         return 'internal', Target.maybe_readable_identify(targets)
Example #26
0
  def test_register_bad_target_alias(self):
    with self.assertRaises(TypeError):
      self.build_configuration.register_target_alias('fred', object())

    target = Target('fred', SyntheticAddress.parse('a:b'), BuildGraph(address_mapper=None))
    with self.assertRaises(TypeError):
      self.build_configuration.register_target_alias('fred', target)
Example #27
0
  def _maybe_emit_coverage_data(self, targets, chroot, pex, stdout, stderr):
    coverage = os.environ.get('PANTS_PY_COVERAGE')
    if coverage is None:
      yield []
      return

    def read_coverage_list(prefix):
      return coverage[len(prefix):].split(',')

    coverage_modules = None
    if coverage.startswith('modules:'):
      # NB: pytest-cov maps these modules to the `[run] sources` config.  So for
      # `modules:pants.base,pants.util` the config emitted has:
      # [run]
      # source =
      #   pants.base
      #   pants.util
      #
      # Now even though these are not paths, coverage sees the dots and switches to a module
      # prefix-matching mode.  Unfortunately, neither wildcards nor top-level module prefixes
      # like `pants.` serve to engage this module prefix-matching as one might hope.  It
      # appears that `pants.` is treated as a path and `pants.*` is treated as a literal
      # module prefix name.
      coverage_modules = read_coverage_list('modules:')
    elif coverage.startswith('paths:'):
      coverage_modules = []
      for path in read_coverage_list('paths:'):
        if not os.path.exists(path) and not os.path.isabs(path):
          # Look for the source in the PEX chroot since its not available from CWD.
          path = os.path.join(chroot, path)
        coverage_modules.append(path)

    with self._cov_setup(targets,
                         chroot,
                         coverage_modules=coverage_modules) as (args, coverage_rc):
      try:
        yield args
      finally:
        with environment_as(PEX_MODULE='coverage.cmdline:main'):
          # Normalize .coverage.raw paths using combine and `paths` config in the rc file.
          # This swaps the /tmp pex chroot source paths for the local original source paths
          # the pex was generated from and which the user understands.
          shutil.move('.coverage', '.coverage.raw')
          pex.run(args=['combine', '--rcfile', coverage_rc], stdout=stdout, stderr=stderr)

          pex.run(args=['report', '-i', '--rcfile', coverage_rc], stdout=stdout, stderr=stderr)

          # TODO(wickman): If coverage is enabled and we are not using fast mode, write an
          # intermediate .html that points to each of the coverage reports generated and
          # webbrowser.open to that page.
          # TODO(John Sirois): Possibly apply the same logic to the console report.  In fact,
          # consider combining coverage files from all runs in this Tasks's execute and then
          # producing just 1 console and 1 html report whether or not the tests are run in fast
          # mode.
          relpath = Target.maybe_readable_identify(targets)
          pants_distdir = Config.from_cache().getdefault('pants_distdir')
          target_dir = os.path.join(pants_distdir, 'coverage', relpath)
          safe_mkdir(target_dir)
          pex.run(args=['html', '-i', '--rcfile', coverage_rc, '-d', target_dir],
                  stdout=stdout, stderr=stderr)
Example #28
0
  def configure_project(self, targets, checkstyle_suppression_files, debug_port):

    jvm_targets = Target.extract_jvm_targets(targets)
    if self.intransitive:
      jvm_targets = set(self.context.target_roots).intersection(jvm_targets)
    project = Project(self.project_name,
                      self.python,
                      self.skip_java,
                      self.skip_scala,
                      get_buildroot(),
                      checkstyle_suppression_files,
                      debug_port,
                      jvm_targets,
                      not self.intransitive,
                      self.context.new_workunit)

    if self.python:
      python_source_paths = self.context.config.getlist('ide', 'python_source_paths', default=[])
      python_test_paths = self.context.config.getlist('ide', 'python_test_paths', default=[])
      python_lib_paths = self.context.config.getlist('ide', 'python_lib_paths', default=[])
      project.configure_python(python_source_paths, python_test_paths, python_lib_paths)

    extra_source_paths = self.context.config.getlist('ide', 'extra_jvm_source_paths', default=[])
    extra_test_paths = self.context.config.getlist('ide', 'extra_jvm_test_paths', default=[])
    all_targets = project.configure_jvm(extra_source_paths, extra_test_paths)
    return all_targets, project
Example #29
0
        def configure_target(target):
            if target not in analyzed:
                analyzed.add(target)

                self.has_scala = not self.skip_scala and (self.has_scala
                                                          or is_scala(target))

                if target.has_resources:
                    resources_by_basedir = defaultdict(set)
                    for resources in target.resources:
                        resources_by_basedir[resources.target_base].update(
                            resources.sources)
                    for basedir, resources in resources_by_basedir.items():
                        self.resource_extensions.update(
                            Project.extract_resource_extensions(resources))
                        configure_source_sets(basedir,
                                              resources,
                                              is_test=False)

                if target.sources:
                    test = target.is_test
                    self.has_tests = self.has_tests or test
                    configure_source_sets(target.target_base,
                                          target.sources,
                                          is_test=test)

                # Other BUILD files may specify sources in the same directory as this target.  Those BUILD
                # files might be in parent directories (globs('a/b/*.java')) or even children directories if
                # this target globs children as well.  Gather all these candidate BUILD files to test for
                # sources they own that live in the directories this targets sources live in.
                target_dirset = find_source_basedirs(target)
                candidates = Target.get_all_addresses(target.address.buildfile)
                for ancestor in target.address.buildfile.ancestors():
                    candidates.update(Target.get_all_addresses(ancestor))
                for sibling in target.address.buildfile.siblings():
                    candidates.update(Target.get_all_addresses(sibling))
                for descendant in target.address.buildfile.descendants():
                    candidates.update(Target.get_all_addresses(descendant))

                def is_sibling(target):
                    return source_target(
                        target) and target_dirset.intersection(
                            find_source_basedirs(target))

                return filter(
                    is_sibling,
                    [Target.get(a) for a in candidates if a != target.address])
Example #30
0
 def extra_products(self, target):
   ret = []
   if isinstance(target, AnnotationProcessor) and target.processors:
     root = os.path.join(self._resources_dir, Target.maybe_readable_identify([target]))
     processor_info_file = os.path.join(root, JavaCompile._PROCESSOR_INFO_FILE)
     self._write_processor_info(processor_info_file, target.processors)
     ret.append((root, [processor_info_file]))
   return ret
Example #31
0
 def _addresses(self):
   if self.context.target_roots:
     for target in self.context.target_roots:
       yield target.address
   else:
     for buildfile in BuildFile.scan_buildfiles(self._root_dir):
       for address in Target.get_all_addresses(buildfile):
         yield address
Example #32
0
 def identify(targets):
     targets = list(targets)
     if len(targets) == 1 and targets[0].is_jvm and getattr(
             targets[0], 'provides', None):
         return targets[0].provides.org, targets[0].provides.name
     else:
         return IvyUtils.INTERNAL_ORG_NAME, Target.maybe_readable_identify(
             targets)
Example #33
0
    def test_create_bad_targets(self):
        with self.assertRaises(TypeError):
            BuildFileAliases(targets={'fred': object()})

        target = Target('fred', Address.parse('a:b'),
                        BuildGraph(address_mapper=None))
        with self.assertRaises(TypeError):
            BuildFileAliases(targets={'fred': target})
Example #34
0
  def console_output(self, _):
    buildfiles = OrderedSet()
    if self._dependees_type:
      base_paths = OrderedSet()
      for dependees_type in self._dependees_type:
        try:
          # Try to do a fully qualified import 1st for filtering on custom types.
          from_list, module, type_name = dependees_type.rsplit('.', 2)
          __import__('%s.%s' % (from_list, module), fromlist=[from_list])
        except (ImportError, ValueError):
          # Fall back on pants provided target types.
          if hasattr(pants.base.build_file_context, dependees_type):
            type_name = getattr(pants.base.build_file_context, dependees_type)
          else:
            raise TaskError('Invalid type name: %s' % dependees_type)
        # Find the SourceRoot for the given input type
        base_paths.update(SourceRoot.roots(type_name))
      if not base_paths:
        raise TaskError('No SourceRoot set for any target type in %s.' % self._dependees_type +
                        '\nPlease define a source root in BUILD file as:' +
                        '\n\tsource_root(\'<src-folder>\', %s)' % ', '.join(self._dependees_type))
      for base_path in base_paths:
        buildfiles.update(BuildFile.scan_buildfiles(get_buildroot(), base_path))
    else:
      buildfiles = BuildFile.scan_buildfiles(get_buildroot())

    dependees_by_target = defaultdict(set)
    for buildfile in buildfiles:
      for address in Target.get_all_addresses(buildfile):
        for target in Target.get(address).resolve():
          # TODO(John Sirois): tighten up the notion of targets written down in a BUILD by a
          # user vs. targets created by pants at runtime.
          target = self.get_concrete_target(target)
          if hasattr(target, 'dependencies'):
            for dependencies in target.dependencies:
              for dependency in dependencies.resolve():
                dependency = self.get_concrete_target(dependency)
                dependees_by_target[dependency].add(target)

    roots = set(self.context.target_roots)
    if self._closed:
      for root in roots:
        yield str(root.address)

    for dependant in self.get_dependants(dependees_by_target, roots):
      yield str(dependant.address)
Example #35
0
 def extra_products(self, target):
   ret = []
   if target.is_apt and target.processors:
     root = os.path.join(self._resources_dir, Target.maybe_readable_identify([target]))
     processor_info_file = os.path.join(root, JavaCompile._PROCESSOR_INFO_FILE)
     self._write_processor_info(processor_info_file, target.processors)
     ret.append((root, [processor_info_file]))
   return ret
Example #36
0
  def target(cls, address):
    """Resolves the given target address to a Target object.

    address: The BUILD target address to resolve.

    Returns the corresponding Target or else None if the address does not point to a defined Target.
    """
    return Target.get(Address.parse(cls.build_root, address, is_relative=False))
Example #37
0
 def test_contains_address(self):
   a = Address.parse('a')
   self.assertFalse(self.build_graph.contains_address(a))
   target = Target(name='a',
                   address=a,
                   build_graph=self.build_graph)
   self.build_graph.inject_target(target)
   self.assertTrue(self.build_graph.contains_address(a))
Example #38
0
 def __getattr__(self, name):
   try:
     return Target.__getattribute__(self, name)
   except AttributeError as e:
     try:
       return getattr(self.get(), name)
     except (AttributeError, LookupError):
       raise e
Example #39
0
  def _alternate_target_roots(cls, options, address_mapper, build_graph):
    processed = set()
    for jvm_tool in JvmToolMixin.get_registered_tools():
      dep_spec = jvm_tool.dep_spec(options)
      dep_address = Address.parse(dep_spec)
      # Some JVM tools are requested multiple times, we only need to handle them once.
      if dep_address not in processed:
        processed.add(dep_address)
        try:
          if build_graph.contains_address(dep_address) or address_mapper.resolve(dep_address):
            # The user has defined a tool classpath override - we let that stand.
            continue
        except AddressLookupError as e:
          if jvm_tool.classpath is None:
            raise cls._tool_resolve_error(e, dep_spec, jvm_tool)
          else:
            if not jvm_tool.is_default(options):
              # The user specified a target spec for this jvm tool that doesn't actually exist.
              # We want to error out here instead of just silently using the default option while
              # appearing to respect their config.
              raise cls.ToolResolveError(dedent("""
                  Failed to resolve target for tool: {tool}. This target was obtained from
                  option {option} in scope {scope}.

                  Make sure you didn't make a typo in the tool's address. You specified that the
                  tool should use the target found at "{tool}".

                  This target has a default classpath configured, so you can simply remove:
                    [{scope}]
                    {option}: {tool}
                  from pants.ini (or any other config file) to use the default tool.

                  The default classpath is: {default_classpath}

                  Note that tool target addresses in pants.ini should be specified *without* quotes.
                """).strip().format(tool=dep_spec,
                                    option=jvm_tool.key,
                                    scope=jvm_tool.scope,
                                    default_classpath=':'.join(map(str, jvm_tool.classpath or ()))))
            if jvm_tool.classpath:
              tool_classpath_target = JarLibrary(name=dep_address.target_name,
                                                 address=dep_address,
                                                 build_graph=build_graph,
                                                 jars=jvm_tool.classpath)
            else:
              # The tool classpath is empty by default, so we just inject a dummy target that
              # ivy resolves as the empty list classpath.  JarLibrary won't do since it requires
              # one or more jars, so we just pick a target type ivy has no resolve work to do for.
              tool_classpath_target = Target(name=dep_address.target_name,
                                             address=dep_address,
                                             build_graph=build_graph)
            build_graph.inject_target(tool_classpath_target)

    # We use the trick of not returning alternate roots, but instead just filling the dep_spec
    # holes with a JarLibrary built from a tool's default classpath JarDependency list if there is
    # no over-riding targets present. This means we do modify the build_graph, but we at least do
    # it at a time in the engine lifecycle cut out for handling that.
    return None
Example #40
0
 def _maybe_emit_junit_xml(self, targets):
   args = []
   xml_base = os.getenv('JUNIT_XML_BASE')
   if xml_base and targets:
     xml_base = os.path.realpath(xml_base)
     xml_path = os.path.join(xml_base, Target.maybe_readable_identify(targets) + '.xml')
     safe_mkdir(os.path.dirname(xml_path))
     args.append('--junitxml=%s' % xml_path)
   yield args
Example #41
0
 def _maybe_emit_junit_xml(self, targets):
     args = []
     xml_base = self.get_options().junit_xml_dir
     if xml_base and targets:
         xml_base = os.path.realpath(xml_base)
         xml_path = os.path.join(xml_base, Target.maybe_readable_identify(targets) + ".xml")
         safe_mkdir(os.path.dirname(xml_path))
         args.append("--junitxml={}".format(xml_path))
     yield args
Example #42
0
  def parse(self, spec):
    """Parses the given target spec into one or more targets.

    Returns a generator of target, address pairs in which the target may be None if the address
    points to a non-existent target.
    """
    for address in self._parse_addresses(spec):
      target = Target.get(address)
      yield target, address
Example #43
0
def _get_target(address):
  try:
    address = Address.parse(get_buildroot(), address, is_relative=False)
  except IOError as e:
    raise TaskError('Failed to parse address: %s: %s' % (address, e))
  match = Target.get(address)
  if not match:
    raise TaskError('Invalid target address: %s' % address)
  return match
Example #44
0
  def scan_addresses(root_dir, base_path=None):
    """Parses all targets available in BUILD files under base_path and
    returns their addresses.  If no base_path is specified, root_dir is
    assumed to be the base_path"""

    addresses = OrderedSet()
    for buildfile in BuildFile.scan_buildfiles(root_dir, base_path):
      addresses.update(Target.get_all_addresses(buildfile))
    return addresses
Example #45
0
 def _maybe_emit_junit_xml(self, targets):
   args = []
   xml_base = os.getenv('JUNIT_XML_BASE')
   if xml_base and targets:
     xml_base = os.path.realpath(xml_base)
     xml_path = os.path.join(xml_base, Target.maybe_readable_identify(targets) + '.xml')
     safe_mkdir(os.path.dirname(xml_path))
     args.append('--junitxml={}'.format(xml_path))
   yield args
Example #46
0
    def scan_addresses(root_dir, base_path=None):
        """Parses all targets available in BUILD files under base_path and
    returns their addresses.  If no base_path is specified, root_dir is
    assumed to be the base_path"""

        addresses = OrderedSet()
        for buildfile in BuildFile.scan_buildfiles(root_dir, base_path):
            addresses.update(Target.get_all_addresses(buildfile))
        return addresses
Example #47
0
 def extra_products(self, target):
   """Override extra_products to produce an annotation processor information file."""
   ret = []
   if isinstance(target, AnnotationProcessor) and target.processors:
     root = os.path.join(self._processor_info_dir, Target.maybe_readable_identify([target]))
     processor_info_file = os.path.join(root, self._PROCESSOR_INFO_FILE)
     self._write_processor_info(processor_info_file, target.processors)
     ret.append((root, [processor_info_file]))
   return ret
Example #48
0
File: py.py Project: rgbenson/pants
  def execute(self):
    if self.old_options.pex and self.old_options.ipython:
      self.error('Cannot specify both --pex and --ipython!')

    if self.old_options.entry_point and self.old_options.ipython:
      self.error('Cannot specify both --entry_point and --ipython!')

    if self.old_options.verbose:
      print('Build operating on targets: %s' % ' '.join(str(target) for target in self.targets))


    builder = PEXBuilder(tempfile.mkdtemp(), interpreter=self.interpreter,
                         pex_info=self.binary.pexinfo if self.binary else None)

    if self.old_options.entry_point:
      builder.set_entry_point(self.old_options.entry_point)

    if self.old_options.ipython:
      if not self.config.has_section('python-ipython'):
        self.error('No python-ipython sections defined in your pants.ini!')

      builder.info.entry_point = self.config.get('python-ipython', 'entry_point')
      if builder.info.entry_point is None:
        self.error('Must specify entry_point for IPython in the python-ipython section '
                   'of your pants.ini!')

      requirements = self.config.getlist('python-ipython', 'requirements', default=[])

      for requirement in requirements:
        self.extra_requirements.append(PythonRequirement(requirement))

    executor = PythonChroot(
        targets=self.targets,
        extra_requirements=self.extra_requirements,
        builder=builder,
        platforms=self.binary.platforms if self.binary else None,
        interpreter=self.interpreter,
        conn_timeout=self.old_options.conn_timeout)

    executor.dump()

    if self.old_options.pex:
      pex_name = self.binary.name if self.binary else Target.maybe_readable_identify(self.targets)
      pex_path = os.path.join(self.root_dir, 'dist', '%s.pex' % pex_name)
      builder.build(pex_path)
      print('Wrote %s' % pex_path)
      return 0
    else:
      builder.freeze()
      pex = PEX(builder.path(), interpreter=self.interpreter)
      po = pex.run(args=list(self.args), blocking=False)
      try:
        return po.wait()
      except KeyboardInterrupt:
        po.send_signal(signal.SIGINT)
        raise
Example #49
0
 def extra_products(self, target):
     """Override extra_products to produce an annotation processor information file."""
     ret = []
     if isinstance(target, AnnotationProcessor) and target.processors:
         root = os.path.join(self._processor_info_dir,
                             Target.maybe_readable_identify([target]))
         processor_info_file = os.path.join(root, self._PROCESSOR_INFO_FILE)
         self._write_processor_info(processor_info_file, target.processors)
         ret.append((root, [processor_info_file]))
     return ret
Example #50
0
  def replace_targets(self, target_roots):
    """Replaces all targets in the context with the given roots and their transitive
    dependencies.
    """
    self._target_roots = list(target_roots)

    self._targets = OrderedSet()
    for target in self._target_roots:
      self.add_target(target)
    self.id = Target.identify(self._targets)
Example #51
0
 def _maybe_emit_junit_xml(self, targets):
     args = []
     xml_base = self.get_options().junit_xml_dir
     if xml_base and targets:
         xml_base = os.path.realpath(xml_base)
         xml_path = os.path.join(
             xml_base,
             Target.maybe_readable_identify(targets) + '.xml')
         safe_mkdir(os.path.dirname(xml_path))
         args.append('--junitxml={}'.format(xml_path))
     yield args
Example #52
0
 def add(self, targets, cache_key, valid, phase=None):
   if not phase:
     raise ValueError('Must specify a descriptive phase= value (e.g. "init", "pre-check", ...')
   # Manufacture an id from a hash of the target ids
   targets_hash = Target.identify(targets)
   self._entries.append(self.TaskEntry(targets_hash=targets_hash,
                                       target_ids=[t.id for t in targets],
                                       cache_key_id=cache_key.id,
                                       cache_key_hash=cache_key.hash,
                                       valid=valid,
                                       phase=phase))
Example #53
0
 def parse_url(spec):
   match = MarkdownToHtml.PANTS_LINK.match(spec)
   if match:
     page = Target.get(Address.parse(get_buildroot(), match.group(1)))
     anchor = match.group(2) or ''
     if not page:
       raise TaskError('Invalid link %s' % match.group(1))
     alias, url = url_builder(page, config=get_config(page))
     return alias, url + anchor
   else:
     return spec, spec
Example #54
0
 def parse_url(spec):
     match = MarkdownToHtml.PANTS_LINK.match(spec)
     if match:
         page = Target.get(
             Address.parse(get_buildroot(), match.group(1)))
         anchor = match.group(2) or ''
         if not page:
             raise TaskError('Invalid link %s' % match.group(1))
         alias, url = url_builder(page, config=get_config(page))
         return alias, url + anchor
     else:
         return spec, spec
Example #55
0
def generate_coverage_config(targets):
  cp = configparser.ConfigParser()
  cp.readfp(Compatibility.StringIO(DEFAULT_COVERAGE_CONFIG))
  cp.add_section('html')
  if len(targets) == 1:
    target = targets[0]
    relpath = os.path.join(os.path.dirname(target.address.buildfile.relpath), target.name)
  else:
    relpath = Target.maybe_readable_identify(targets)
  target_dir = os.path.join(Config.load().getdefault('pants_distdir'), 'coverage', relpath)
  safe_mkdir(target_dir)
  cp.set('html', 'directory', target_dir)
  return cp
Example #56
0
 def generate_junit_args(targets):
   args = []
   xml_base = os.getenv('JUNIT_XML_BASE')
   if xml_base and targets:
     xml_base = os.path.abspath(os.path.normpath(xml_base))
     if len(targets) == 1:
       target = targets[0]
       relpath = os.path.join(os.path.dirname(target.address.buildfile.relpath),
                              target.name + '.xml')
     else:
       relpath = Target.maybe_readable_identify(targets) + '.xml'
     xml_path = os.path.join(xml_base, relpath)
     safe_mkdir(os.path.dirname(xml_path))
     args.append('--junitxml=%s' % xml_path)
   return args
Example #57
0
    def execute(self):
        (accept_predicate,
         reject_predicate) = Target.lang_discriminator('python')
        targets = self.require_homogeneous_targets(accept_predicate,
                                                   reject_predicate)
        if targets:
            # We can't throw if the target isn't a python target, because perhaps we were called on a
            # JVM target, in which case we have to no-op and let scala repl do its thing.
            # TODO(benjy): Some more elegant way to coordinate how tasks claim targets.
            interpreter = self.select_interpreter_for_targets(targets)

            extra_requirements = []
            if self.context.options.python_repl_ipython:
                entry_point = self.context.config.get(
                    'python-ipython',
                    'entry_point',
                    default='IPython:start_ipython')
                ipython_requirements = self.context.config.getlist(
                    'python-ipython',
                    'requirements',
                    default=['ipython==1.0.0'])
                for req in ipython_requirements:
                    extra_requirements.append(PythonRequirement(req))
            else:
                entry_point = 'code:interact'

            with self.temporary_pex_builder(
                    interpreter=interpreter) as builder:
                builder.set_entry_point(entry_point)
                chroot = PythonChroot(targets=targets,
                                      extra_requirements=extra_requirements,
                                      builder=builder,
                                      interpreter=interpreter,
                                      conn_timeout=self.conn_timeout)

                chroot.dump()
                builder.freeze()
                pex = PEX(builder.path(), interpreter=interpreter)
                self.context.lock.release()
                with stty_utils.preserve_stty_settings():
                    with self.context.new_workunit(name='run',
                                                   labels=[WorkUnit.RUN]):
                        po = pex.run(blocking=False)
                        try:
                            return po.wait()
                        except KeyboardInterrupt:
                            pass
Example #58
0
    def execute(self, targets):
        # TODO(benjy): Add a pre-execute phase for injecting deps into targets, so e.g.,
        # we can inject a dep on the scala runtime library and still have it ivy-resolve.

        # In case we have no relevant targets and return early.
        self._create_empty_products()

        relevant_targets = [
            t for t in targets if t.has_sources(self._file_suffix)
        ]

        if not relevant_targets:
            return

        # Get the exclusives group for the targets to compile.
        # Group guarantees that they'll be a single exclusives key for them.
        egroups = self.context.products.get_data('exclusives_groups')
        group_id = egroups.get_group_key_for_target(relevant_targets[0])

        # Add resource dirs to the classpath for us and for downstream tasks.
        for conf in self._confs:
            egroups.update_compatible_classpaths(group_id,
                                                 [(conf, self._resources_dir)])

        # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
        classpath = egroups.get_classpath_for_group(group_id)

        # Add any extra compile-time classpath elements.
        for conf in self._confs:
            for jar in self.extra_compile_time_classpath_elements():
                classpath.insert(0, (conf, jar))

        # TODO(benjy): Should sources_by_target and locally_changed_targets be on all Tasks?

        # Target -> sources (relative to buildroot).
        sources_by_target = self._compute_current_sources_by_target(
            relevant_targets)

        # If needed, find targets that we've changed locally (as opposed to
        # changes synced in from the SCM).
        locally_changed_targets = None
        if self._locally_changed_targets_heuristic_limit:
            locally_changed_targets = self._find_locally_changed_targets(
                sources_by_target)
            if locally_changed_targets and \
                    len(locally_changed_targets) > self._locally_changed_targets_heuristic_limit:
                locally_changed_targets = None

        # Invalidation check. Everything inside the with block must succeed for the
        # invalid targets to become valid.
        with self.invalidated(relevant_targets,
                              invalidate_dependents=True,
                              partition_size_hint=self._partition_size_hint,
                              locally_changed_targets=locally_changed_targets
                              ) as invalidation_check:
            if invalidation_check.invalid_vts:
                # The analysis for invalid and deleted sources is no longer valid.
                invalid_targets = [
                    vt.target for vt in invalidation_check.invalid_vts
                ]
                invalid_sources_by_target = {}
                for tgt in invalid_targets:
                    invalid_sources_by_target[tgt] = sources_by_target[tgt]
                invalid_sources = list(
                    itertools.chain.from_iterable(
                        invalid_sources_by_target.values()))
                deleted_sources = self._deleted_sources()

                # Work in a tmpdir so we don't stomp the main analysis files on error.
                # The tmpdir is cleaned up in a shutdown hook, because background work
                # may need to access files we create here even after this method returns.
                self._ensure_analysis_tmpdir()
                tmpdir = os.path.join(self._analysis_tmpdir, str(uuid.uuid4()))
                os.mkdir(tmpdir)
                valid_analysis_tmp = os.path.join(tmpdir, 'valid_analysis')
                newly_invalid_analysis_tmp = os.path.join(
                    tmpdir, 'newly_invalid_analysis')
                invalid_analysis_tmp = os.path.join(tmpdir, 'invalid_analysis')
                if self._analysis_parser.is_nonempty_analysis(
                        self._analysis_file):
                    with self.context.new_workunit(name='prepare-analysis'):
                        self._analysis_tools.split_to_paths(
                            self._analysis_file,
                            [(invalid_sources + deleted_sources,
                              newly_invalid_analysis_tmp)], valid_analysis_tmp)
                        if self._analysis_parser.is_nonempty_analysis(
                                self._invalid_analysis_file):
                            self._analysis_tools.merge_from_paths([
                                self._invalid_analysis_file,
                                newly_invalid_analysis_tmp
                            ], invalid_analysis_tmp)
                        else:
                            invalid_analysis_tmp = newly_invalid_analysis_tmp

                        # Now it's OK to overwrite the main analysis files with the new state.
                        self.move(valid_analysis_tmp, self._analysis_file)
                        self.move(invalid_analysis_tmp,
                                  self._invalid_analysis_file)

                # Register products for all the valid targets.
                # We register as we go, so dependency checking code can use this data.
                valid_targets = list(
                    set(relevant_targets) - set(invalid_targets))
                self._register_products(valid_targets, sources_by_target,
                                        self._analysis_file)

                # Figure out the sources and analysis belonging to each partition.
                partitions = [
                ]  # Each element is a triple (vts, sources_by_target, analysis).
                for vts in invalidation_check.invalid_vts_partitioned:
                    partition_tmpdir = os.path.join(
                        tmpdir, Target.maybe_readable_identify(vts.targets))
                    os.mkdir(partition_tmpdir)
                    sources = list(
                        itertools.chain.from_iterable([
                            invalid_sources_by_target.get(t, [])
                            for t in vts.targets
                        ]))
                    de_duped_sources = list(OrderedSet(sources))
                    if len(sources) != len(de_duped_sources):
                        counts = [(src, len(list(srcs)))
                                  for src, srcs in groupby(sorted(sources))]
                        self.context.log.warn(
                            'De-duped the following sources:\n\t%s' %
                            '\n\t'.join(
                                sorted('%d %s' % (cnt, src)
                                       for src, cnt in counts if cnt > 1)))
                    analysis_file = os.path.join(partition_tmpdir, 'analysis')
                    partitions.append((vts, de_duped_sources, analysis_file))

                # Split per-partition files out of the global invalid analysis.
                if self._analysis_parser.is_nonempty_analysis(
                        self._invalid_analysis_file) and partitions:
                    with self.context.new_workunit(name='partition-analysis'):
                        splits = [(x[1], x[2]) for x in partitions]
                        # We have to pass the analysis for any deleted files through zinc, to give it
                        # a chance to delete the relevant class files.
                        if splits:
                            splits[0] = (splits[0][0] + deleted_sources,
                                         splits[0][1])
                        self._analysis_tools.split_to_paths(
                            self._invalid_analysis_file, splits)

                # Now compile partitions one by one.
                for partition in partitions:
                    (vts, sources, analysis_file) = partition
                    cp_entries = [
                        entry for conf, entry in classpath
                        if conf in self._confs
                    ]
                    self._process_target_partition(partition, cp_entries)
                    # No exception was thrown, therefore the compile succeded and analysis_file is now valid.
                    if os.path.exists(
                            analysis_file
                    ):  # The compilation created an analysis.
                        # Merge the newly-valid analysis with our global valid analysis.
                        new_valid_analysis = analysis_file + '.valid.new'
                        if self._analysis_parser.is_nonempty_analysis(
                                self._analysis_file):
                            with self.context.new_workunit(
                                    name='update-upstream-analysis'):
                                self._analysis_tools.merge_from_paths(
                                    [self._analysis_file, analysis_file],
                                    new_valid_analysis)
                        else:  # We need to keep analysis_file around. Background tasks may need it.
                            shutil.copy(analysis_file, new_valid_analysis)

                        # Move the merged valid analysis to its proper location.
                        # We do this before checking for missing dependencies, so that we can still
                        # enjoy an incremental compile after fixing missing deps.
                        self.move(new_valid_analysis, self._analysis_file)

                        # Update the products with the latest classes. Must happen before the
                        # missing dependencies check.
                        self._register_products(vts.targets, sources_by_target,
                                                analysis_file)
                        if self._dep_analyzer:
                            # Check for missing dependencies.
                            actual_deps = self._analysis_parser.parse_deps_from_path(
                                analysis_file, lambda: self.
                                _compute_classpath_elements_by_class(cp_entries
                                                                     ))
                            with self.context.new_workunit(
                                    name='find-missing-dependencies'):
                                self._dep_analyzer.check(sources, actual_deps)

                        # Kick off the background artifact cache write.
                        if self.artifact_cache_writes_enabled():
                            self._write_to_artifact_cache(
                                analysis_file, vts, invalid_sources_by_target)

                    if self._analysis_parser.is_nonempty_analysis(
                            self._invalid_analysis_file):
                        with self.context.new_workunit(
                                name='trim-downstream-analysis'):
                            # Trim out the newly-valid sources from our global invalid analysis.
                            new_invalid_analysis = analysis_file + '.invalid.new'
                            discarded_invalid_analysis = analysis_file + '.invalid.discard'
                            self._analysis_tools.split_to_paths(
                                self._invalid_analysis_file,
                                [(sources, discarded_invalid_analysis)],
                                new_invalid_analysis)
                            self.move(new_invalid_analysis,
                                      self._invalid_analysis_file)

                    # Record the built target -> sources mapping for future use.
                    for target in vts.targets:
                        self._record_sources_by_target(
                            target, sources_by_target.get(target, []))

                    # Now that all the analysis accounting is complete, and we have no missing deps,
                    # we can safely mark the targets as valid.
                    vts.update()
            else:
                # Nothing to build. Register products for all the targets in one go.
                self._register_products(relevant_targets, sources_by_target,
                                        self._analysis_file)

        # Update the classpath for downstream tasks.
        runtime_deps = self.tool_classpath(self._runtime_deps_key) \
          if self._runtime_deps_key else []
        for conf in self._confs:
            egroups.update_compatible_classpaths(group_id,
                                                 [(conf, self._classes_dir)])
            for dep in runtime_deps:
                # TODO(benjy): Make compile-time vs. runtime classpaths more explicit.
                egroups.update_compatible_classpaths(group_id, [(conf, dep)])

        self.post_process(relevant_targets)