Example #1
0
  def test_detect_cycle_direct(self):
    a = self.make_target(':a')

    # no cycles yet
    sort_targets([a])
    self.build_graph.inject_dependency(a.address, a.address)
    with self.assertRaises(CycleException):
      sort_targets([a])
Example #2
0
    def test_detect_cycle_direct(self):
        a = self.make_target(':a')

        # no cycles yet
        sort_targets([a])
        self.build_graph.inject_dependency(a.address, a.address)
        with self.assertRaises(CycleException):
            sort_targets([a])
Example #3
0
  def test_sort(self):
    a = self.make_target(':a')
    b = self.make_target(':b', dependencies=[a])
    c = self.make_target(':c', dependencies=[b])
    d = self.make_target(':d', dependencies=[c, a])
    e = self.make_target(':e', dependencies=[d])

    self.assertEquals(sort_targets([a, b, c, d, e]), [e, d, c, b, a])
    self.assertEquals(sort_targets([b, d, a, e, c]), [e, d, c, b, a])
    self.assertEquals(sort_targets([e, d, c, b, a]), [e, d, c, b, a])
Example #4
0
    def test_sort(self):
        a = self.make_target(":a")
        b = self.make_target(":b", dependencies=[a])
        c = self.make_target(":c", dependencies=[b])
        d = self.make_target(":d", dependencies=[c, a])
        e = self.make_target(":e", dependencies=[d])

        self.assertEqual(sort_targets([a, b, c, d, e]), [e, d, c, b, a])
        self.assertEqual(sort_targets([b, d, a, e, c]), [e, d, c, b, a])
        self.assertEqual(sort_targets([e, d, c, b, a]), [e, d, c, b, a])
Example #5
0
    def test_sort(self):
        a = self.make_target(':a')
        b = self.make_target(':b', dependencies=[a])
        c = self.make_target(':c', dependencies=[b])
        d = self.make_target(':d', dependencies=[c, a])
        e = self.make_target(':e', dependencies=[d])

        self.assertEqual(sort_targets([a, b, c, d, e]), [e, d, c, b, a])
        self.assertEqual(sort_targets([b, d, a, e, c]), [e, d, c, b, a])
        self.assertEqual(sort_targets([e, d, c, b, a]), [e, d, c, b, a])
Example #6
0
  def test_detect_cycle_indirect(self):
    c = self.make_target(':c')
    b = self.make_target(':b', dependencies=[c])
    a = self.make_target(':a', dependencies=[c, b])

    # no cycles yet
    sort_targets([a])

    self.build_graph.inject_dependency(c.address, a.address)
    with self.assertRaises(CycleException):
      sort_targets([a])
Example #7
0
    def test_detect_cycle_indirect(self):
        c = self.make_target(':c')
        b = self.make_target(':b', dependencies=[c])
        a = self.make_target(':a', dependencies=[c, b])

        # no cycles yet
        sort_targets([a])

        self.build_graph.inject_dependency(c.address, a.address)
        with self.assertRaises(CycleException):
            sort_targets([a])
Example #8
0
    def validate_platform_dependencies(self):
        """Check all jvm targets in the context, throwing an error or warning if there are bad
        targets.

        If there are errors, this method fails slow rather than fails fast -- that is, it continues
        checking the rest of the targets before spitting error messages. This is useful, because
        it's nice to have a comprehensive list of all errors rather than just the first one we
        happened to hit.
        """
        conflicts = []

        def is_conflicting(target, dependency):
            return self.jvm_version(dependency) > self.jvm_version(target)

        try:
            sort_targets(self.jvm_targets)
        except CycleException:
            self.context.log.warn(
                "Cannot validate dependencies when cycles exist in the build graph."
            )
            return

        try:
            with self.invalidated(
                self.jvm_targets,
                fingerprint_strategy=self.PlatformFingerprintStrategy(),
                invalidate_dependents=True,
            ) as vts:
                dependency_map = self.jvm_dependency_map
                for vts_target in vts.invalid_vts:
                    for target in vts_target.targets:
                        if target in dependency_map:
                            deps = dependency_map[target]
                            invalid_dependencies = [
                                dep for dep in deps if is_conflicting(target, dep)
                            ]
                            if invalid_dependencies:
                                conflicts.append((target, invalid_dependencies))
                if conflicts:
                    # NB(gmalmquist): It's important to unconditionally raise an exception, then decide later
                    # whether to continue raising it or just print a warning, to make sure the targets aren't
                    # marked as valid if there are invalid platform dependencies.
                    error_message = self._create_full_error_message(conflicts)
                    raise self.IllegalJavaTargetLevelDependency(error_message)
        except self.IllegalJavaTargetLevelDependency as e:
            if self.check == "fatal":
                raise e
            else:
                assert self.check == "warn"
                self.context.log.warn(error_message)
                return error_message
Example #9
0
  def exported_targets(self):
    candidates = set(self.get_targets())
    if not self.act_transitively:
      def get_synthetic(lang, target):
        mappings = self.context.products.get(lang).get(target)
        if mappings:
          for key, generated in mappings.items():
            for synthetic in generated:
              yield synthetic

      # Handle the case where a code gen target is in the listed roots and thus the publishable
      # target is a synthetic twin generated by a code gen task upstream.
      # TODO(benjyw): Create a unified mechanism for acting on derived targets.
      # See https://github.com/pantsbuild/pants/issues/5356.
      for candidate in self.context.target_roots:
        candidates.update(get_synthetic('java', candidate))
        candidates.update(get_synthetic('scala', candidate))

    def exportable(tgt):
      return tgt in candidates and self._is_exported(tgt)

    return OrderedSet(target for target in
                      reversed(sort_targets(candidate for candidate in candidates
                                            if exportable(candidate)))
                      if exportable(target))
Example #10
0
  def exported_targets(self):
    candidates = set()
    if self.transitive:
      candidates.update(self.context.targets())
    else:
      candidates.update(self.context.target_roots)

      def get_synthetic(lang, target):
        mappings = self.context.products.get(lang).get(target)
        if mappings:
          for key, generated in mappings.items():
            for synthetic in generated:
              yield synthetic

      # Handle the case where a code gen target is in the listed roots and thus the publishable
      # target is a synthetic twin generated by a code gen task upstream.
      for candidate in self.context.target_roots:
        candidates.update(get_synthetic('java', candidate))
        candidates.update(get_synthetic('scala', candidate))

    def exportable(tgt):
      return tgt in candidates and tgt.is_exported

    return OrderedSet(filter(exportable,
                             reversed(sort_targets(filter(exportable, candidates)))))
Example #11
0
  def exported_targets(self):
    candidates = set(self.get_targets())
    if not self.act_transitively:
      def get_synthetic(lang, target):
        mappings = self.context.products.get(lang).get(target)
        if mappings:
          for key, generated in mappings.items():
            for synthetic in generated:
              yield synthetic

      # Handle the case where a code gen target is in the listed roots and thus the publishable
      # target is a synthetic twin generated by a code gen task upstream.
      # TODO(benjyw): Create a unified mechanism for acting on derived targets.
      # See https://github.com/pantsbuild/pants/issues/5356.
      for candidate in self.context.target_roots:
        candidates.update(get_synthetic('java', candidate))
        candidates.update(get_synthetic('scala', candidate))

    def exportable(tgt):
      return tgt in candidates and self._is_exported(tgt)

    return OrderedSet(target for target in
                      reversed(sort_targets(candidate for candidate in candidates
                                            if exportable(candidate)))
                      if exportable(target))
Example #12
0
    def exported_targets(self):
        candidates = set()
        if self.transitive:
            candidates.update(self.context.targets())
        else:
            candidates.update(self.context.target_roots)

            def get_synthetic(lang, target):
                mappings = self.context.products.get(lang).get(target)
                if mappings:
                    for key, generated in mappings.items():
                        for synthetic in generated:
                            yield synthetic

            # Handle the case where a code gen target is in the listed roots and thus the publishable
            # target is a synthetic twin generated by a code gen task upstream.
            for candidate in self.context.target_roots:
                candidates.update(get_synthetic('java', candidate))
                candidates.update(get_synthetic('scala', candidate))

        def exportable(tgt):
            return tgt in candidates and tgt.is_exported

        return OrderedSet(
            filter(exportable,
                   reversed(sort_targets(filter(exportable, candidates)))))
Example #13
0
 def console_output(self, targets):
     sorted_targets = sort_targets(targets)
     # sort_targets already returns targets in reverse topologically sorted order.
     if not self.get_options().reverse:
         sorted_targets = reversed(sorted_targets)
     for target in sorted_targets:
         if target in self.context.target_roots:
             yield target.address.reference()
Example #14
0
 def execute_codegen(self, targets):
   with self._task.context.new_workunit(name='execute', labels=[WorkUnitLabel.MULTITOOL]):
     ordered = [target for target in reversed(sort_targets(targets)) if target in targets]
     for target in ordered:
       with self._task.context.new_workunit(name=target.address.spec):
         # TODO(gm): add a test-case to ensure this is correctly eliminating stale generated code.
         safe_rmtree(self._task.codegen_workdir(target))
         self._do_execute_codegen([target])
Example #15
0
 def console_output(self, targets):
     sorted_targets = sort_targets(targets)
     # sort_targets already returns targets in reverse topologically sorted order.
     if not self.get_options().reverse:
         sorted_targets = reversed(sorted_targets)
     for target in sorted_targets:
         if target in self.context.target_roots:
             yield target.address.reference()
Example #16
0
 def vt_iter():
   if topological_order:
     sorted_targets = [t for t in reversed(sort_targets(targets)) if t in targets]
   else:
     sorted_targets = sorted(targets)
   for target in sorted_targets:
     target_key = self._key_for(target)
     if target_key is not None:
       yield VersionedTarget(self, target, target_key)
  def validate_platform_dependencies(self):
    """Check all jvm targets in the context, throwing an error or warning if there are bad targets.

    If there are errors, this method fails slow rather than fails fast -- that is, it continues
    checking the rest of the targets before spitting error messages. This is useful, because it's
    nice to have a comprehensive list of all errors rather than just the first one we happened to
    hit.
    """
    conflicts = []

    def is_conflicting(target, dependency):
      return self.jvm_version(dependency) > self.jvm_version(target)

    try:
      sort_targets(self.jvm_targets)
    except CycleException:
      self.context.log.warn('Cannot validate dependencies when cycles exist in the build graph.')
      return

    try:
      with self.invalidated(self.jvm_targets,
                            fingerprint_strategy=self.PlatformFingerprintStrategy(),
                            invalidate_dependents=True) as vts:
        dependency_map = self.jvm_dependency_map
        for vts_target in vts.invalid_vts:
          for target in vts_target.targets:
            if target in dependency_map:
              deps = dependency_map[target]
              invalid_dependencies = [dep for dep in deps if is_conflicting(target, dep)]
              if invalid_dependencies:
                conflicts.append((target, invalid_dependencies))
        if conflicts:
          # NB(gmalmquist): It's important to unconditionally raise an exception, then decide later
          # whether to continue raising it or just print a warning, to make sure the targets aren't
          # marked as valid if there are invalid platform dependencies.
          error_message = self._create_full_error_message(conflicts)
          raise self.IllegalJavaTargetLevelDependency(error_message)
    except self.IllegalJavaTargetLevelDependency as e:
      if self.check == 'fatal':
        raise e
      else:
        assert self.check == 'warn'
        self.context.log.warn(error_message)
        return error_message
Example #18
0
  def execute(self):
    # We drive creation of setup.py distributions from the original target graph, grabbing codegen'd
    # sources when needed. We ignore PythonDistribution targets.
    def is_exported_python_target(t):
      return t.is_original and self.has_provides(t) and not is_local_python_dist(t)

    exported_python_targets = OrderedSet(t for t in self.context.target_roots
                                         if is_exported_python_target(t))
    if not exported_python_targets:
      raise TaskError('setup-py target(s) must provide an artifact.')

    dist_dir = self.get_options().pants_distdir

    # NB: We have to create and then run in 2 steps so that we can discover all exported targets
    # in-play in the creation phase which then allows a tsort of these exported targets in the run
    # phase to ensure an exported target is, for example (--run="sdist upload"), uploaded before any
    # exported target that depends on it is uploaded.

    created = {}

    def create(exported_python_target):
      if exported_python_target not in created:
        self.context.log.info('Creating setup.py project for {}'.format(exported_python_target))
        subject = self.derived_by_original.get(exported_python_target, exported_python_target)
        setup_dir, dependencies = self.create_setup_py(subject, dist_dir)
        created[exported_python_target] = setup_dir
        if self._recursive:
          for dep in dependencies:
            if is_exported_python_target(dep):
              create(dep)

    for exported_python_target in exported_python_targets:
      create(exported_python_target)

    interpreter = self.context.products.get_data(PythonInterpreter)
    python_dists = self.context.products.register_data(self.PYTHON_DISTS_PRODUCT, {})
    for exported_python_target in reversed(sort_targets(list(created.keys()))):
      setup_dir = created.get(exported_python_target)
      if setup_dir:
        if not self._run:
          self.context.log.info('Running packager against {}'.format(setup_dir))
          setup_runner = Packager(setup_dir, interpreter=interpreter)
          tgz_name = os.path.basename(setup_runner.sdist())
          sdist_path = os.path.join(dist_dir, tgz_name)
          self.context.log.info('Writing {}'.format(sdist_path))
          shutil.move(setup_runner.sdist(), sdist_path)
          safe_rmtree(setup_dir)
          python_dists[exported_python_target] = sdist_path
        else:
          self.context.log.info('Running {} against {}'.format(self._run, setup_dir))
          split_command = safe_shlex_split(self._run)
          setup_runner = SetupPyRunner(setup_dir, split_command, interpreter=interpreter)
          installed = setup_runner.run()
          if not installed:
            raise TaskError('Install failed.')
          python_dists[exported_python_target] = setup_dir
Example #19
0
  def execute(self):
    # We drive creation of setup.py distributions from the original target graph, grabbing codegen'd
    # sources when needed. We ignore PythonDistribution targets.
    def is_exported_python_target(t):
      return t.is_original and self.has_provides(t) and not is_local_python_dist(t)

    exported_python_targets = OrderedSet(t for t in self.context.target_roots
                                         if is_exported_python_target(t))
    if not exported_python_targets:
      raise TaskError('setup-py target(s) must provide an artifact.')

    dist_dir = self.get_options().pants_distdir

    # NB: We have to create and then run in 2 steps so that we can discover all exported targets
    # in-play in the creation phase which then allows a tsort of these exported targets in the run
    # phase to ensure an exported target is, for example (--run="sdist upload"), uploaded before any
    # exported target that depends on it is uploaded.

    created = {}

    def create(exported_python_target):
      if exported_python_target not in created:
        self.context.log.info('Creating setup.py project for {}'.format(exported_python_target))
        subject = self.derived_by_original.get(exported_python_target, exported_python_target)
        setup_dir, dependencies = self.create_setup_py(subject, dist_dir)
        created[exported_python_target] = setup_dir
        if self._recursive:
          for dep in dependencies:
            if is_exported_python_target(dep):
              create(dep)

    for exported_python_target in exported_python_targets:
      create(exported_python_target)

    interpreter = self.context.products.get_data(PythonInterpreter)
    python_dists = self.context.products.register_data(self.PYTHON_DISTS_PRODUCT, {})
    for exported_python_target in reversed(sort_targets(list(created.keys()))):
      setup_dir = created.get(exported_python_target)
      if setup_dir:
        if not self._run:
          self.context.log.info('Running packager against {}'.format(setup_dir))
          setup_runner = Packager(setup_dir, interpreter=interpreter)
          tgz_name = os.path.basename(setup_runner.sdist())
          sdist_path = os.path.join(dist_dir, tgz_name)
          self.context.log.info('Writing {}'.format(sdist_path))
          shutil.move(setup_runner.sdist(), sdist_path)
          safe_rmtree(setup_dir)
          python_dists[exported_python_target] = sdist_path
        else:
          self.context.log.info('Running {} against {}'.format(self._run, setup_dir))
          split_command = safe_shlex_split(self._run)
          setup_runner = SetupPyRunner(setup_dir, split_command, interpreter=interpreter)
          installed = setup_runner.run()
          if not installed:
            raise TaskError('Install failed.')
          python_dists[exported_python_target] = setup_dir
Example #20
0
    def execute(self):
        targets = [
            target for target in self.context.target_roots
            if self.has_provides(target)
        ]
        if not targets:
            raise TaskError('setup-py target(s) must provide an artifact.')

        dist_dir = self.get_options().pants_distdir

        # NB: We have to create and then run in 2 steps so that we can discover all exported targets
        # in-play in the creation phase which then allows a tsort of these exported targets in the run
        # phase to ensure an exported target is, for example (--run="sdist upload"), uploaded before any
        # exported target that depends on it is uploaded.

        created = {}

        def create(target):
            if target not in created:
                self.context.log.info(
                    'Creating setup.py project for {}'.format(target))
                setup_dir, dependencies = self.create_setup_py(
                    target, dist_dir)
                created[target] = setup_dir
                if self._recursive:
                    for dep in dependencies:
                        if self.has_provides(dep):
                            create(dep)

        for target in targets:
            create(target)

        executed = {
        }  # Collected and returned for tests, processed target -> sdist|setup_dir.
        for target in reversed(sort_targets(created.keys())):
            setup_dir = created.get(target)
            if setup_dir:
                if not self._run:
                    self.context.log.info(
                        'Running packager against {}'.format(setup_dir))
                    setup_runner = Packager(setup_dir)
                    tgz_name = os.path.basename(setup_runner.sdist())
                    sdist_path = os.path.join(dist_dir, tgz_name)
                    self.context.log.info('Writing {}'.format(sdist_path))
                    shutil.move(setup_runner.sdist(), sdist_path)
                    safe_rmtree(setup_dir)
                    executed[target] = sdist_path
                else:
                    self.context.log.info('Running {} against {}'.format(
                        self._run, setup_dir))
                    setup_runner = SetupPyRunner(setup_dir, self._run)
                    setup_runner.run()
                    executed[target] = setup_dir
        return executed
Example #21
0
  def execute(self):
    targets = [target for target in self.context.target_roots if self.has_provides(target)]
    if not targets:
      raise TaskError('setup-py target(s) must provide an artifact.')

    dist_dir = self.get_options().pants_distdir

    # NB: We have to create and then run in 2 steps so that we can discover all exported targets
    # in-play in the creation phase which then allows a tsort of these exported targets in the run
    # phase to ensure an exported target is, for example (--run="sdist upload"), uploaded before any
    # exported target that depends on it is uploaded.

    created = {}

    def create(target):
      if target not in created:
        self.context.log.info('Creating setup.py project for {}'.format(target))
        setup_dir, dependencies = self.create_setup_py(target, dist_dir)
        created[target] = setup_dir
        if self._recursive:
          for dep in dependencies:
            if self.has_provides(dep):
              create(dep)

    for target in targets:
      create(target)

    executed = {}  # Collected and returned for tests, processed target -> sdist|setup_dir.
    for target in reversed(sort_targets(created.keys())):
      setup_dir = created.get(target)
      if setup_dir:
        if not self._run:
          self.context.log.info('Running packager against {}'.format(setup_dir))
          setup_runner = Packager(setup_dir)
          tgz_name = os.path.basename(setup_runner.sdist())
          sdist_path = os.path.join(dist_dir, tgz_name)
          self.context.log.info('Writing {}'.format(sdist_path))
          shutil.move(setup_runner.sdist(), sdist_path)
          safe_rmtree(setup_dir)
          executed[target] = sdist_path
        else:
          self.context.log.info('Running {} against {}'.format(self._run, setup_dir))
          setup_runner = SetupPyRunner(setup_dir, self._run)
          setup_runner.run()
          executed[target] = setup_dir
    return executed
Example #22
0
  def _compute_transitive_deps_by_target(self):
    """Map from target to all the targets it depends on, transitively."""
    # Sort from least to most dependent.
    sorted_targets = reversed(sort_targets(self.context.targets()))
    transitive_deps_by_target = defaultdict(set)
    # Iterate in dep order, to accumulate the transitive deps for each target.
    for target in sorted_targets:
      transitive_deps = set()
      for dep in target.dependencies:
        transitive_deps.update(transitive_deps_by_target.get(dep, []))
        transitive_deps.add(dep)

      # Need to handle the case where a java_sources target has dependencies.
      # In particular if it depends back on the original target.
      if hasattr(target, 'java_sources'):
        for java_source_target in target.java_sources:
          for transitive_dep in java_source_target.dependencies:
            transitive_deps_by_target[java_source_target].add(transitive_dep)

      transitive_deps_by_target[target] = transitive_deps
    return transitive_deps_by_target
  def _compute_transitive_deps_by_target(self):
    """Map from target to all the targets it depends on, transitively."""
    # Sort from least to most dependent.
    sorted_targets = reversed(sort_targets(self.context.targets()))
    transitive_deps_by_target = defaultdict(set)
    # Iterate in dep order, to accumulate the transitive deps for each target.
    for target in sorted_targets:
      transitive_deps = set()
      for dep in target.dependencies:
        transitive_deps.update(transitive_deps_by_target.get(dep, []))
        transitive_deps.add(dep)

      # Need to handle the case where a java_sources target has dependencies.
      # In particular if it depends back on the original target.
      if hasattr(target, 'java_sources'):
        for java_source_target in target.java_sources:
          for transitive_dep in java_source_target.dependencies:
            transitive_deps_by_target[java_source_target].add(transitive_dep)

      transitive_deps_by_target[target] = transitive_deps
    return transitive_deps_by_target
Example #24
0
  def _topological_sort(self, targets):
    """Topologically order a list of targets"""

    target_set = set(targets)
    return [t for t in reversed(sort_targets(targets)) if t in target_set]
Example #25
0
  def execute(self):
    # We operate on the target roots, except that we replace codegen targets with their
    # corresponding synthetic targets, since those have the generated sources that actually
    # get published. Note that the "provides" attributed is copied from the original target
    # to the synthetic target,  so that the latter can be used as a direct stand-in for the
    # former here.
    preliminary_targets = set(t for t in self.context.target_roots if self.has_provides(t))
    targets = set(preliminary_targets)
    for t in self.context.targets():
      # A non-codegen target has derived_from equal to itself, so we check is_original
      # to ensure that the synthetic targets take precedence.
      # We check that the synthetic target has the same "provides" as the original, because
      # there are other synthetic targets in play (e.g., resources targets) to which this
      # substitution logic must not apply.
      if (t.derived_from in preliminary_targets and not t.is_original and
          self.has_provides(t) and t.provides == t.derived_from.provides):
        targets.discard(t.derived_from)
        targets.add(t)
    if not targets:
      raise TaskError('setup-py target(s) must provide an artifact.')

    dist_dir = self.get_options().pants_distdir

    # NB: We have to create and then run in 2 steps so that we can discover all exported targets
    # in-play in the creation phase which then allows a tsort of these exported targets in the run
    # phase to ensure an exported target is, for example (--run="sdist upload"), uploaded before any
    # exported target that depends on it is uploaded.

    created = {}

    def create(target):
      if target not in created:
        self.context.log.info('Creating setup.py project for {}'.format(target))
        setup_dir, dependencies = self.create_setup_py(target, dist_dir)
        created[target] = setup_dir
        if self._recursive:
          for dep in dependencies:
            if self.has_provides(dep):
              create(dep)

    for target in targets:
      create(target)

    interpreter = self.context.products.get_data(PythonInterpreter)
    python_dists = self.context.products.register_data(self.PYTHON_DISTS_PRODUCT, {})
    for target in reversed(sort_targets(created.keys())):
      setup_dir = created.get(target)
      if setup_dir:
        if not self._run:
          self.context.log.info('Running packager against {}'.format(setup_dir))
          setup_runner = Packager(setup_dir, interpreter=interpreter)
          tgz_name = os.path.basename(setup_runner.sdist())
          sdist_path = os.path.join(dist_dir, tgz_name)
          self.context.log.info('Writing {}'.format(sdist_path))
          shutil.move(setup_runner.sdist(), sdist_path)
          safe_rmtree(setup_dir)
          python_dists[target] = sdist_path
        else:
          self.context.log.info('Running {} against {}'.format(self._run, setup_dir))
          setup_runner = SetupPyRunner(setup_dir, self._run, interpreter=interpreter)
          setup_runner.run()
          python_dists[target] = setup_dir
Example #26
0
    def execute(self):
        # We drive creation of setup.py distributions from the original target graph, grabbing codegen'd
        # sources when needed. We ignore PythonDistribution targets.
        def is_exported_python_target(t):
            return t.is_original and self.has_provides(
                t) and not is_local_python_dist(t)

        exported_python_targets = OrderedSet(t
                                             for t in self.context.target_roots
                                             if is_exported_python_target(t))

        dist_dir = self.get_options().pants_distdir

        # NB: We have to create and then run in 2 steps so that we can discover all exported targets
        # in-play in the creation phase which then allows a tsort of these exported targets in the run
        # phase to ensure an exported target is, for example (--run="sdist upload"), uploaded before any
        # exported target that depends on it is uploaded.

        created: Dict[PythonTarget, Path] = {}

        def create(exported_python_target):
            if exported_python_target not in created:
                self.context.log.info(
                    "Creating setup.py project for {}".format(
                        exported_python_target))
                subject = self.derived_by_original.get(exported_python_target,
                                                       exported_python_target)
                setup_dir, dependencies = self.create_setup_py(
                    subject, dist_dir)
                created[exported_python_target] = Path(setup_dir)
                if self._recursive:
                    for dep in dependencies:
                        if is_exported_python_target(dep):
                            create(dep)

        for exported_python_target in exported_python_targets:
            create(exported_python_target)

        interpreter = self.context.products.get_data(PythonInterpreter)
        python_dists = self.context.products.register_data(
            self.PYTHON_DISTS_PRODUCT, {})

        setup_runner = SetupPyRunner.Factory.create(
            scope=self,
            interpreter=interpreter,
            pex_file_path=os.path.join(self.workdir, self.fingerprint,
                                       "setup-py-runner.pex"),
        )
        for exported_python_target in reversed(
                sort_targets(list(created.keys()))):
            setup_dir = created.get(exported_python_target)
            if setup_dir:
                if not self._run:
                    self.context.log.info(
                        "Running sdist against {}".format(setup_dir))
                    sdist = setup_runner.sdist(setup_dir)
                    tgz_name = sdist.name
                    sdist_path = os.path.join(dist_dir, tgz_name)
                    self.context.log.info("Writing {}".format(sdist_path))
                    shutil.move(sdist, sdist_path)
                    safe_rmtree(str(setup_dir))
                    python_dists[exported_python_target] = sdist_path
                else:
                    self.context.log.info("Running {} against {}".format(
                        self._run, setup_dir))
                    split_command = safe_shlex_split(self._run)
                    try:
                        setup_runner.run_setup_command(
                            source_dir=setup_dir, setup_command=split_command)
                    except SetupPyRunner.CommandFailure as e:
                        raise TaskError(f"Install failed: {e}")
                    python_dists[exported_python_target] = setup_dir
Example #27
0
    def _topological_sort(self, targets):
        """Topologically order a list of targets"""

        target_set = set(targets)
        return [t for t in reversed(sort_targets(targets)) if t in target_set]
Example #28
0
    def execute(self):
        # We operate on the target roots, except that we replace codegen targets with their
        # corresponding synthetic targets, since those have the generated sources that actually
        # get published. Note that the "provides" attributed is copied from the original target
        # to the synthetic target,  so that the latter can be used as a direct stand-in for the
        # former here.
        preliminary_targets = set(t for t in self.context.target_roots
                                  if self.has_provides(t))
        targets = set(preliminary_targets)
        for t in self.context.targets():
            # A non-codegen target has derived_from equal to itself, so we check is_original
            # to ensure that the synthetic targets take precedence.
            # We check that the synthetic target has the same "provides" as the original, because
            # there are other synthetic targets in play (e.g., resources targets) to which this
            # substitution logic must not apply.
            if (t.derived_from in preliminary_targets and not t.is_original
                    and self.has_provides(t)
                    and t.provides == t.derived_from.provides):
                targets.discard(t.derived_from)
                targets.add(t)
        if not targets:
            raise TaskError('setup-py target(s) must provide an artifact.')

        dist_dir = self.get_options().pants_distdir

        # NB: We have to create and then run in 2 steps so that we can discover all exported targets
        # in-play in the creation phase which then allows a tsort of these exported targets in the run
        # phase to ensure an exported target is, for example (--run="sdist upload"), uploaded before any
        # exported target that depends on it is uploaded.

        created = {}

        def create(target):
            if target not in created:
                self.context.log.info(
                    'Creating setup.py project for {}'.format(target))
                setup_dir, dependencies = self.create_setup_py(
                    target, dist_dir)
                created[target] = setup_dir
                if self._recursive:
                    for dep in dependencies:
                        if self.has_provides(dep):
                            create(dep)

        for target in targets:
            create(target)

        executed = {
        }  # Collected and returned for tests, processed target -> sdist|setup_dir.
        for target in reversed(sort_targets(created.keys())):
            setup_dir = created.get(target)
            if setup_dir:
                if not self._run:
                    self.context.log.info(
                        'Running packager against {}'.format(setup_dir))
                    setup_runner = Packager(setup_dir)
                    tgz_name = os.path.basename(setup_runner.sdist())
                    sdist_path = os.path.join(dist_dir, tgz_name)
                    self.context.log.info('Writing {}'.format(sdist_path))
                    shutil.move(setup_runner.sdist(), sdist_path)
                    safe_rmtree(setup_dir)
                    executed[target] = sdist_path
                else:
                    self.context.log.info('Running {} against {}'.format(
                        self._run, setup_dir))
                    setup_runner = SetupPyRunner(setup_dir, self._run)
                    setup_runner.run()
                    executed[target] = setup_dir
        return executed