Ejemplo n.º 1
0
 def __init__(self, jars):
     self.jars = sorted(jars)
     self.excludes = OrderedSet()
Ejemplo n.º 2
0
    def setup_parser(self, parser, args):
        self.config = Config.load()

        Goal.add_global_options(parser)

        # We support attempting zero or more goals.  Multiple goals must be delimited from further
        # options and non goal args with a '--'.  The key permutations we need to support:
        # ./pants goal => goals
        # ./pants goal goals => goals
        # ./pants goal compile src/java/... => compile
        # ./pants goal compile -x src/java/... => compile
        # ./pants goal compile src/java/... -x => compile
        # ./pants goal compile run -- src/java/... => compile, run
        # ./pants goal compile run -- src/java/... -x => compile, run
        # ./pants goal compile run -- -x src/java/... => compile, run

        if not args:
            args.append('goals')

        if len(args) == 1 and args[0] in set(['-h', '--help', 'help']):

            def format_usage(usages):
                left_colwidth = 0
                for left, right in usages:
                    left_colwidth = max(left_colwidth, len(left))
                lines = []
                for left, right in usages:
                    lines.append('  %s%s%s' %
                                 (left, ' ' *
                                  (left_colwidth - len(left) + 1), right))
                return '\n'.join(lines)

            usages = [
                ("%prog goal goals ([spec]...)", Phase('goals').description),
                ("%prog goal help [goal] ([spec]...)",
                 Phase('help').description),
                ("%prog goal [goal] [spec]...",
                 "Attempt goal against one or more targets."),
                ("%prog goal [goal] ([goal]...) -- [spec]...",
                 "Attempts all the specified goals."),
            ]
            parser.set_usage("\n%s" % format_usage(usages))
            parser.epilog = (
                "Either lists all installed goals, provides extra help for a goal or else "
                "attempts to achieve the specified goal for the listed targets."
                """
                       Note that target specs accept two special forms:
                         [dir]:  to include all targets in the specified directory
                         [dir]:: to include all targets found in all BUILD files recursively under
                                 the directory""")

            parser.print_help()
            sys.exit(0)
        else:
            goals, specs = Goal.parse_args(args)

            self.requested_goals = goals

            # TODO(John Sirois): kill PANTS_NEW and its usages when pants.new is rolled out
            ParseContext.enable_pantsnew()

            # Bootstrap goals by loading any configured bootstrap BUILD files
            with self.check_errors(
                    'The following bootstrap_buildfiles cannot be loaded:'
            ) as error:
                with self.timer.timing('parse:bootstrap'):
                    for path in self.config.getlist('goals',
                                                    'bootstrap_buildfiles',
                                                    default=[]):
                        try:
                            buildfile = BuildFile(
                                get_buildroot(),
                                os.path.relpath(path, get_buildroot()))
                            ParseContext(buildfile).parse()
                        except (TypeError, ImportError, TaskError, GoalError):
                            error(path, include_traceback=True)
                        except (IOError, SyntaxError):
                            error(path)

            # Bootstrap user goals by loading any BUILD files implied by targets
            with self.check_errors(
                    'The following targets could not be loaded:') as error:
                with self.timer.timing('parse:BUILD'):
                    for spec in specs:
                        self.parse_spec(error, spec)

            self.phases = [Phase(goal) for goal in goals]

            rcfiles = self.config.getdefault('rcfiles', type=list, default=[])
            if rcfiles:
                rcfile = RcFile(rcfiles,
                                default_prepend=False,
                                process_default=True)

                # Break down the goals specified on the command line to the full set that will be run so we
                # can apply default flags to inner goal nodes.  Also break down goals by Task subclass and
                # register the task class hierarchy fully qualified names so we can apply defaults to
                # baseclasses.

                all_goals = Phase.execution_order(
                    Phase(goal) for goal in goals)
                sections = OrderedSet()
                for goal in all_goals:
                    sections.add(goal.name)
                    for clazz in goal.task_type.mro():
                        if clazz == Task:
                            break
                        sections.add('%s.%s' %
                                     (clazz.__module__, clazz.__name__))

                augmented_args = rcfile.apply_defaults(sections, args)
                if augmented_args != args:
                    del args[:]
                    args.extend(augmented_args)
                    print("(using pantsrc expansion: pants goal %s)" %
                          ' '.join(augmented_args))

            Phase.setup_parser(parser, args, self.phases)
Ejemplo n.º 3
0
    def _compute_missing_deps(self, src_tgt, actual_deps):
        """Computes deps that are used by the compiler but not specified in a BUILD file.

    These deps are bugs waiting to happen: the code may happen to compile because the dep was
    brought in some other way (e.g., by some other root target), but that is obviously fragile.

    Note that in practice we're OK with reliance on indirect deps that are only brought in
    transitively. E.g., in Scala type inference can bring in such a dep subtly. Fortunately these
    cases aren't as fragile as a completely missing dependency. It's still a good idea to have
    explicit direct deps where relevant, so we optionally warn about indirect deps, to make them
    easy to find and reason about.

    - actual_deps: a map src -> list of actual deps (source, class or jar file) as noted by the
      compiler.

    Returns a tuple (missing_file_deps, missing_direct_tgt_deps) where:

    - missing_file_deps: a list of dep_files where src_tgt requires dep_file, and we're unable
      to map to a target (because its target isn't in the total set of targets in play,
      and we don't want to parse every BUILD file in the workspace just to find it).

    - missing_direct_tgt_deps: a list of dep_tgts where src_tgt is missing a direct dependency
                               on dep_tgt but has a transitive dep on it.

    All paths in the input and output are absolute.
    """
        analyzer = self._analyzer

        def must_be_explicit_dep(dep):
            # We don't require explicit deps on the java runtime, so we shouldn't consider that
            # a missing dep.
            return (dep not in analyzer.bootstrap_jar_classfiles
                    and not dep.startswith(self._distribution.real_home))

        def target_or_java_dep_in_targets(target, targets):
            # We want to check if the target is in the targets collection
            #
            # However, for the special case of scala_library that has a java_sources
            # reference we're ok if that exists in targets even if the scala_library does not.

            if target in targets:
                return True
            elif isinstance(target, ScalaLibrary):
                return any(t in targets for t in target.java_sources)
            else:
                return False

        # Find deps that are actual but not specified.
        missing_file_deps = OrderedSet()  # (src, src).
        missing_direct_tgt_deps_map = defaultdict(
            list)  # The same, but for direct deps.

        targets_by_file = analyzer.targets_by_file(self.context.targets())
        for actual_dep in filter(must_be_explicit_dep, actual_deps):
            actual_dep_tgts = targets_by_file.get(actual_dep)
            # actual_dep_tgts is usually a singleton. If it's not, we only need one of these
            # to be in our declared deps to be OK.
            if actual_dep_tgts is None:
                missing_file_deps.add((src_tgt, actual_dep))
            elif not target_or_java_dep_in_targets(src_tgt, actual_dep_tgts):
                # Obviously intra-target deps are fine.
                canonical_actual_dep_tgt = next(iter(actual_dep_tgts))
                if canonical_actual_dep_tgt not in src_tgt.dependencies:
                    # The canonical dep is the only one a direct dependency makes sense on.
                    # TODO get rid of src usage here. we dont have a way to map class
                    # files back to source files when using jdeps. I think we can get away without
                    # listing the src file directly and just list the target which has the transient
                    # dep
                    missing_direct_tgt_deps_map[(
                        src_tgt, canonical_actual_dep_tgt)].append(
                            (src_tgt, actual_dep))

        return (list(missing_file_deps),
                list(missing_direct_tgt_deps_map.items()))
Ejemplo n.º 4
0
  def reduced_dependencies(self, exported_target):
    """Calculates the reduced transitive dependencies for an exported target.

    The reduced set of dependencies will be just those transitive dependencies "owned" by
    the `exported_target`.

    A target is considered "owned" if:
    1. It's "3rdparty" and "directly reachable" from `exported_target` by at least 1 path.
    2. It's not "3rdparty" and not "directly reachable" by any of `exported_target`'s "3rdparty"
       dependencies.

    Here "3rdparty" refers to targets identified as either `is_third_party` or `is_exported`.

    And in this context "directly reachable" means the target can be reached by following a series
    of dependency links from the `exported_target`, never crossing another exported target and
    staying within the `exported_target` address space.  It's the latter restriction that allows for
    unambiguous ownership of exportable targets and mirrors the BUILD file convention of targets
    only being able to own sources in their filesystem subtree.  The single ambiguous case that can
    arise is when there is more than one exported target in the same BUILD file family that can
    "directly reach" a target in its address space.

    :raises: `UnExportedError` if the given `exported_target` is not, in-fact, exported.
    :raises: `NoOwnerError` if a transitive dependency is found with no proper owning exported
             target.
    :raises: `AmbiguousOwnerError` if there is more than one viable exported owner target for a
             given transitive dependency.
    """
    # The strategy adopted requires 3 passes:
    # 1.) Walk the exported target to collect provisional owned exportable targets, but _not_
    #     3rdparty since these may be introduced by exported subgraphs we discover in later steps!
    # 2.) Determine the owner of each target collected in 1 by walking the ancestor chain to find
    #     the closest exported target.  The ancestor chain is just all targets whose spec path is
    #     a prefix of the descendant.  In other words, all targets in descendant's BUILD file family
    #     (its siblings), all targets in its parent directory BUILD file family, and so on.
    # 3.) Finally walk the exported target once more, replacing each visited dependency with its
    #     owner.

    if not self.is_exported(exported_target):
      raise self.UnExportedError('Cannot calculate reduced dependencies for a non-exported '
                                 'target, given: {}'.format(exported_target))

    owner_by_owned_python_target = OrderedDict()

    # Only check ownership on the original target graph.
    original_exported_target = exported_target.derived_from

    def collect_potentially_owned_python_targets(current):
      if current.is_original:
        owner_by_owned_python_target[current] = None  # We can't know the owner in the 1st pass.
      return (current == exported_target) or not self.is_exported(current)

    self._walk(original_exported_target, collect_potentially_owned_python_targets)

    for owned in owner_by_owned_python_target:
      if self.requires_export(owned) and not self.is_exported(owned):
        potential_owners = set()
        for potential_owner in self._ancestor_iterator.iter_target_siblings_and_ancestors(owned):
          if self.is_exported(potential_owner) and owned in self._closure(potential_owner):
            potential_owners.add(potential_owner)
        if not potential_owners:
          raise self.NoOwnerError('No exported target owner found for {}'.format(owned))
        owner = potential_owners.pop()
        if potential_owners:
          ambiguous_owners = [o for o in potential_owners
                              if o.address.spec_path == owner.address.spec_path]
          if ambiguous_owners:
            raise self.AmbiguousOwnerError('Owners for {} are ambiguous.  Found {} and '
                                           '{} others: {}'.format(owned,
                                                                  owner,
                                                                  len(ambiguous_owners),
                                                                  ambiguous_owners))
        owner_by_owned_python_target[owned] = owner

    reduced_dependencies = OrderedSet()

    def collect_reduced_dependencies(current):
      if current == exported_target:
        return True
      else:
        # The provider will be one of:
        # 1. `None`, ie: a 3rdparty requirement we should collect.
        # 2. `exported_target`, ie: a local exportable target owned by `exported_target` that we
        #    should collect
        # 3. Or else a local exportable target owned by some other exported target in which case
        #    we should collect the exported owner.
        owner = owner_by_owned_python_target.get(current)
        if owner is None or owner == exported_target:
          reduced_dependencies.add(current)
        else:
          reduced_dependencies.add(owner)
        return owner == exported_target or not self.requires_export(current)

    self._walk(exported_target, collect_reduced_dependencies)
    return OrderedSet(d for d in reduced_dependencies if d.is_original)
Ejemplo n.º 5
0
  def _compute_missing_deps(self, src_tgt, actual_deps):
    """Computes deps that are used by the compiler but not specified in a BUILD file.

    These deps are bugs waiting to happen: the code may happen to compile because the dep was
    brought in some other way (e.g., by some other root target), but that is obviously fragile.

    Note that in practice we're OK with reliance on indirect deps that are only brought in
    transitively. E.g., in Scala type inference can bring in such a dep subtly. Fortunately these
    cases aren't as fragile as a completely missing dependency. It's still a good idea to have
    explicit direct deps where relevant, so we optionally warn about indirect deps, to make them
    easy to find and reason about.

    - actual_deps: a map src -> list of actual deps (source, class or jar file) as noted by the
      compiler.

    Returns a triple (missing_file_deps, missing_tgt_deps, missing_direct_tgt_deps) where:

    - missing_file_deps: a list of dep_files where src_tgt requires dep_file, and we're unable
      to map to a target (because its target isn't in the total set of targets in play,
      and we don't want to parse every BUILD file in the workspace just to find it).

    - missing_tgt_deps: a list of dep_tgt where src_tgt is missing a necessary transitive
                        dependency on dep_tgt.

    - missing_direct_tgt_deps: a list of dep_tgts where src_tgt is missing a direct dependency
                               on dep_tgt but has a transitive dep on it.

    All paths in the input and output are absolute.
    """
    analyzer = JvmDependencyAnalyzer(get_buildroot(),
                                     self.context.products.get_data('runtime_classpath'),
                                     self.context.products.get_data('product_deps_by_src'))
    def must_be_explicit_dep(dep):
      # We don't require explicit deps on the java runtime, so we shouldn't consider that
      # a missing dep.
      return (dep not in analyzer.bootstrap_jar_classfiles
              and not dep.startswith(DistributionLocator.cached().real_home))

    def target_or_java_dep_in_targets(target, targets):
      # We want to check if the target is in the targets collection
      #
      # However, for the special case of scala_library that has a java_sources
      # reference we're ok if that exists in targets even if the scala_library does not.

      if target in targets:
        return True
      elif target.is_scala:
        return any(t in targets for t in target.java_sources)
      else:
        return False

    # TODO: If recomputing these every time becomes a performance issue, memoize for
    # already-seen targets and incrementally compute for new targets not seen in a previous
    # partition, in this or a previous chunk.
    transitive_deps_by_target = analyzer.compute_transitive_deps_by_target(self.context.targets())

    # Find deps that are actual but not specified.
    missing_file_deps = OrderedSet()  # (src, src).
    missing_tgt_deps_map = defaultdict(list)  # (tgt, tgt) -> a list of (src, src) as evidence.
    missing_direct_tgt_deps_map = defaultdict(list)  # The same, but for direct deps.

    targets_by_file = analyzer.targets_by_file(self.context.targets())
    buildroot = get_buildroot()
    abs_srcs = [os.path.join(buildroot, src) for src in src_tgt.sources_relative_to_buildroot()]
    for src in abs_srcs:
      for actual_dep in filter(must_be_explicit_dep, actual_deps.get(src, [])):
        actual_dep_tgts = targets_by_file.get(actual_dep)
        # actual_dep_tgts is usually a singleton. If it's not, we only need one of these
        # to be in our declared deps to be OK.
        if actual_dep_tgts is None:
          missing_file_deps.add((src_tgt, actual_dep))
        elif not target_or_java_dep_in_targets(src_tgt, actual_dep_tgts):
          # Obviously intra-target deps are fine.
          canonical_actual_dep_tgt = next(iter(actual_dep_tgts))
          if actual_dep_tgts.isdisjoint(transitive_deps_by_target.get(src_tgt, [])):
            missing_tgt_deps_map[(src_tgt, canonical_actual_dep_tgt)].append((src, actual_dep))
          elif canonical_actual_dep_tgt not in src_tgt.dependencies:
            # The canonical dep is the only one a direct dependency makes sense on.
            missing_direct_tgt_deps_map[(src_tgt, canonical_actual_dep_tgt)].append(
                (src, actual_dep))

    return (list(missing_file_deps),
            missing_tgt_deps_map.items(),
            missing_direct_tgt_deps_map.items())
Ejemplo n.º 6
0
    def attempt(context, phases, timer=None):
        """
      Attempts to reach the goals for the supplied phases, optionally recording phase timings and
      then logging then when all specified phases have completed.
    """

        start = timer.now() if timer else None
        executed = OrderedDict()

        # I'd rather do this in a finally block below, but some goals os.fork and each of these cause
        # finally to run, printing goal timings multiple times instead of once at the end.
        def print_timings():
            if timer:
                timer.log('Timing report')
                timer.log('=============')
                for phase, timings in executed.items():
                    phase_time = None
                    for goal, times in timings.items():
                        if len(times) > 1:
                            timer.log(
                                '[%(phase)s:%(goal)s(%(numsteps)d)] %(timings)s -> %(total).3fs'
                                % {
                                    'phase':
                                    phase,
                                    'goal':
                                    goal,
                                    'numsteps':
                                    len(times),
                                    'timings':
                                    ','.join('%.3fs' % time for time in times),
                                    'total':
                                    sum(times)
                                })
                        else:
                            timer.log('[%(phase)s:%(goal)s] %(total).3fs' % {
                                'phase': phase,
                                'goal': goal,
                                'total': sum(times)
                            })
                        if not phase_time:
                            phase_time = 0
                        phase_time += sum(times)
                    if len(timings) > 1:
                        timer.log('[%(phase)s] total: %(total).3fs' % {
                            'phase': phase,
                            'total': phase_time
                        })
                elapsed = timer.now() - start
                timer.log('total: %.3fs' % elapsed)

        try:
            # Prepare tasks roots to leaves and allow for goals introducing new goals in existing phases.
            tasks_by_goal = {}
            expanded = OrderedSet()
            prepared = set()
            round = 0
            while True:
                goals = list(Phase.execution_order(phases))
                if set(goals) == prepared:
                    break
                else:
                    round += 1
                    context.log.debug('Preparing goals in round %d' % round)
                    for goal in reversed(goals):
                        if goal not in prepared:
                            phase = Phase.of(goal)
                            expanded.add(phase)
                            context.log.debug('preparing: %s:%s' %
                                              (phase, goal.name))
                            prepared.add(goal)
                            task = goal.prepare(context)
                            tasks_by_goal[goal] = task

            # Execute phases leaves to roots
            context.log.debug('Executing goals in phases %s' %
                              ' -> '.join(map(str, reversed(expanded))))
            for phase in phases:
                Group.execute(phase,
                              tasks_by_goal,
                              context,
                              executed,
                              timer=timer)

            print_timings()
            return 0
        except (TaskError, GoalError) as e:
            message = '%s' % e
            if message:
                print('\nFAILURE: %s\n' % e)
            else:
                print('\nFAILURE\n')
            print_timings()
            return 1
Ejemplo n.º 7
0
 def create_collection(current_ref):
     module = ivy_info.modules_by_ref[current_ref]
     return OrderedSet([a.path for a in module.artifacts])
Ejemplo n.º 8
0
 def calculate_tests(self, targets):
   tests = OrderedSet()
   for target in targets:
     if is_scala(target) and is_test(target):
       tests.update(os.path.join(target.target_base, test) for test in target.sources)
   return tests
Ejemplo n.º 9
0
 def add_task(product_type, rule):
     if product_type not in serializable_rules:
         serializable_rules[product_type] = OrderedSet()
     serializable_rules[product_type].add(rule)
Ejemplo n.º 10
0
    def setup_parser(self, parser, args):
        self.config = Config.load()
        Goal.add_global_options(parser)

        # We support attempting zero or more goals.  Multiple goals must be delimited from further
        # options and non goal args with a '--'.  The key permutations we need to support:
        # ./pants goal => goals
        # ./pants goal goals => goals
        # ./pants goal compile src/java/... => compile
        # ./pants goal compile -x src/java/... => compile
        # ./pants goal compile src/java/... -x => compile
        # ./pants goal compile run -- src/java/... => compile, run
        # ./pants goal compile run -- src/java/... -x => compile, run
        # ./pants goal compile run -- -x src/java/... => compile, run

        if not args:
            args.append('goals')

        if len(args) == 1 and args[0] in set(['-h', '--help', 'help']):

            def format_usage(usages):
                left_colwidth = 0
                for left, right in usages:
                    left_colwidth = max(left_colwidth, len(left))
                lines = []
                for left, right in usages:
                    lines.append('  %s%s%s' %
                                 (left, ' ' *
                                  (left_colwidth - len(left) + 1), right))
                return '\n'.join(lines)

            usages = [
                ("%prog goal goals ([spec]...)", Phase('goals').description),
                ("%prog goal help [goal] ([spec]...)",
                 Phase('help').description),
                ("%prog goal [goal] [spec]...",
                 "Attempt goal against one or more targets."),
                ("%prog goal [goal] ([goal]...) -- [spec]...",
                 "Attempts all the specified goals."),
            ]
            parser.set_usage("\n%s" % format_usage(usages))
            parser.epilog = (
                "Either lists all installed goals, provides extra help for a goal or else "
                "attempts to achieve the specified goal for the listed targets."
                """
                       Note that target specs accept two special forms:
                         [dir]:  to include all targets in the specified directory
                         [dir]:: to include all targets found in all BUILD files recursively under
                                 the directory""")

            parser.print_help()
            sys.exit(0)
        else:
            goals, specs = Goal.parse_args(args)
            self.requested_goals = goals

            with self.run_tracker.new_workunit(name='setup',
                                               labels=[WorkUnit.SETUP]):
                # Bootstrap goals by loading any configured bootstrap BUILD files
                with self.check_errors(
                        'The following bootstrap_buildfiles cannot be loaded:'
                ) as error:
                    with self.run_tracker.new_workunit(name='bootstrap',
                                                       labels=[WorkUnit.SETUP
                                                               ]):
                        for path in self.config.getlist('goals',
                                                        'bootstrap_buildfiles',
                                                        default=[]):
                            try:
                                buildfile = BuildFile(
                                    get_buildroot(),
                                    os.path.relpath(path, get_buildroot()))
                                ParseContext(buildfile).parse()
                            except (TypeError, ImportError, TaskError,
                                    GoalError):
                                error(path, include_traceback=True)
                            except (IOError, SyntaxError):
                                error(path)
                # Now that we've parsed the bootstrap BUILD files, and know about the SCM system.
                self.run_tracker.run_info.add_scm_info()

                # Bootstrap user goals by loading any BUILD files implied by targets.
                spec_parser = SpecParser(self.root_dir)
                with self.check_errors(
                        'The following targets could not be loaded:') as error:
                    with self.run_tracker.new_workunit(name='parse',
                                                       labels=[WorkUnit.SETUP
                                                               ]):
                        for spec in specs:
                            try:
                                for target, address in spec_parser.parse(spec):
                                    if target:
                                        self.targets.append(target)
                                        # Force early BUILD file loading if this target is an alias that expands
                                        # to others.
                                        unused = list(target.resolve())
                                    else:
                                        siblings = Target.get_all_addresses(
                                            address.buildfile)
                                        prompt = 'did you mean' if len(
                                            siblings
                                        ) == 1 else 'maybe you meant one of these'
                                        error('%s => %s?:\n    %s' %
                                              (address, prompt, '\n    '.join(
                                                  str(a) for a in siblings)))
                            except (TypeError, ImportError, TaskError,
                                    GoalError):
                                error(spec, include_traceback=True)
                            except (IOError, SyntaxError,
                                    TargetDefinitionException):
                                error(spec)

            self.phases = [Phase(goal) for goal in goals]

            rcfiles = self.config.getdefault(
                'rcfiles', type=list, default=['/etc/pantsrc', '~/.pants.rc'])
            if rcfiles:
                rcfile = RcFile(rcfiles,
                                default_prepend=False,
                                process_default=True)

                # Break down the goals specified on the command line to the full set that will be run so we
                # can apply default flags to inner goal nodes.  Also break down goals by Task subclass and
                # register the task class hierarchy fully qualified names so we can apply defaults to
                # baseclasses.

                sections = OrderedSet()
                for phase in Engine.execution_order(self.phases):
                    for goal in phase.goals():
                        sections.add(goal.name)
                        for clazz in goal.task_type.mro():
                            if clazz == Task:
                                break
                            sections.add('%s.%s' %
                                         (clazz.__module__, clazz.__name__))

                augmented_args = rcfile.apply_defaults(sections, args)
                if augmented_args != args:
                    del args[:]
                    args.extend(augmented_args)
                    sys.stderr.write(
                        "(using pantsrc expansion: pants goal %s)\n" %
                        ' '.join(augmented_args))

            Phase.setup_parser(parser, args, self.phases)
Ejemplo n.º 11
0
 def jar_dependencies(self):
     return OrderedSet(self.get_jar_dependencies())
Ejemplo n.º 12
0
    def _index(self, roots):
        """Index from the given roots into the storage provided by the base class.

    This is an additive operation: any existing connections involving these nodes are preserved.
    """
        all_addresses = set()
        new_targets = list()

        # Index the ProductGraph.
        for product in roots:
            # We have a successful HydratedTargets value (for a particular input Spec).
            for hydrated_target in product.dependencies:
                target_adaptor = hydrated_target.adaptor
                address = target_adaptor.address
                all_addresses.add(address)
                if address not in self._target_by_address:
                    new_targets.append(self._index_target(target_adaptor))

        # Once the declared dependencies of all targets are indexed, inject their
        # additional "traversable_(dependency_)?specs".
        deps_to_inject = OrderedSet()
        addresses_to_inject = set()

        def inject(target, dep_spec, is_dependency):
            address = Address.parse(dep_spec,
                                    relative_to=target.address.spec_path)
            if not any(address == t.address for t in target.dependencies):
                addresses_to_inject.add(address)
                if is_dependency:
                    deps_to_inject.add((target.address, address))

        self.apply_injectables(new_targets)

        for target in new_targets:
            traversables = [
                target.compute_dependency_specs(payload=target.payload)
            ]
            # Only poke `traversable_dependency_specs` if a concrete implementation is defined
            # in order to avoid spurious deprecation warnings.
            if type(
                    target
            ).traversable_dependency_specs is not Target.traversable_dependency_specs:
                traversables.append(target.traversable_dependency_specs)
            for spec in itertools.chain(*traversables):
                inject(target, spec, is_dependency=True)

            traversables = [
                target.compute_injectable_specs(payload=target.payload)
            ]
            if type(target).traversable_specs is not Target.traversable_specs:
                traversables.append(target.traversable_specs)
            for spec in itertools.chain(*traversables):
                inject(target, spec, is_dependency=False)

        # Inject all addresses, then declare injected dependencies.
        self.inject_addresses_closure(addresses_to_inject)
        for target_address, dep_address in deps_to_inject:
            self.inject_dependency(dependent=target_address,
                                   dependency=dep_address)

        return all_addresses
Ejemplo n.º 13
0
  def console_output(self, _):
    buildfiles = OrderedSet()
    address_mapper = self.context.address_mapper
    if self._dependees_types:
      base_paths = OrderedSet()
      for dependees_type in self._dependees_types:
        target_types = self.target_types_for_alias(dependees_type)
        # Try to find the SourceRoots for the given input type alias
        for target_type in target_types:
          try:
            roots = SourceRoot.roots(target_type)
            base_paths.update(roots)
          except KeyError:
            pass

      # TODO(John Sirois): BUG: This should not cause a failure, it should just force a slower full
      # scan.
      # TODO(John Sirois): BUG: The --type argument only limited the scn bases, it does no limit the
      # types of targets found under those bases, ie: we may have just limited our scan to roots
      # containing java_library, but those same roots likely also contain jvm_binary targets that
      # we do not wish to have in the results.  So the --type filtering needs to apply to the final
      # dependees_by_target map as well below.
      if not base_paths:
        raise TaskError(dedent("""\
                        No SourceRoot set for any of these target types: {}.
                        Please define a source root in BUILD file as:
                          source_root('<src-folder>', {})
                        """.format(' '.join(self._dependees_types),
                                   ', '.join(self._dependees_types))).strip())
      for base_path in base_paths:
        scanned = address_mapper.scan_buildfiles(get_buildroot(),
                                                 os.path.join(get_buildroot(), base_path),
                                                 spec_excludes=self._spec_excludes)
        buildfiles.update(scanned)
    else:
      buildfiles = address_mapper.scan_buildfiles(get_buildroot(),
                                                  spec_excludes=self._spec_excludes)

    build_graph = self.context.build_graph
    build_file_parser = self.context.build_file_parser

    dependees_by_target = defaultdict(set)
    for build_file in buildfiles:
      address_map = build_file_parser.parse_build_file(build_file)
      for address in address_map.keys():
        build_graph.inject_address_closure(address)
      for address in address_map.keys():
        target = build_graph.get_target(address)
        # TODO(John Sirois): tighten up the notion of targets written down in a BUILD by a
        # user vs. targets created by pants at runtime.
        target = self.get_concrete_target(target)
        for dependency in target.dependencies:
          dependency = self.get_concrete_target(dependency)
          dependees_by_target[dependency].add(target)

    roots = set(self.context.target_roots)
    if self._closed:
      for root in roots:
        yield root.address.spec

    for dependant in self.get_dependants(dependees_by_target, roots):
      yield dependant.address.spec
Ejemplo n.º 14
0
 def __init__(self, sources_rel_path, sources):
     self.sources_rel_path = sources_rel_path
     self.sources = assert_list(sources)
     self.excludes = OrderedSet()
Ejemplo n.º 15
0
    def run_tests(self, fail_fast, test_targets, output_dir, coverage):
        test_registry = self._collect_test_targets(test_targets)
        if test_registry.empty:
            return TestResult.successful

        coverage.instrument(output_dir)

        def parse_error_handler(parse_error):
            # Just log and move on since the result is only used to characterize failures, and raising
            # an error here would just distract from the underlying test failures.
            self.context.log.error(
                'Error parsing test result file {path}: {cause}'.format(
                    path=parse_error.xml_path, cause=parse_error.cause))

        # The 'instrument_classpath' product below below will be `None` if not set, and we'll default
        # back to runtime_classpath
        classpath_product = self.context.products.get_data(
            'instrument_classpath')

        result = 0
        for batch_id, (properties,
                       batch) in enumerate(self._iter_batches(test_registry)):
            (workdir, platform, target_jvm_options, target_env_vars,
             concurrency, threads) = properties

            batch_output_dir = output_dir
            if self._batched:
                batch_output_dir = os.path.join(batch_output_dir,
                                                'batch-{}'.format(batch_id))

            run_modifications = coverage.run_modifications(batch_output_dir)

            extra_jvm_options = run_modifications.extra_jvm_options

            # Batches of test classes will likely exist within the same targets: dedupe them.
            relevant_targets = {
                test_registry.get_owning_target(t)
                for t in batch
            }

            complete_classpath = OrderedSet()
            complete_classpath.update(run_modifications.classpath_prepend)
            complete_classpath.update(JUnit.global_instance().runner_classpath(
                self.context))
            complete_classpath.update(
                self.classpath(relevant_targets,
                               classpath_product=classpath_product))

            distribution = JvmPlatform.preferred_jvm_distribution(
                [platform], self._strict_jvm_version)

            # Override cmdline args with values from junit_test() target that specify concurrency:
            args = self._args(fail_fast, batch_output_dir) + [u'-xmlreport']

            if concurrency is not None:
                args = remove_arg(args, '-default-parallel')
                if concurrency == JUnitTests.CONCURRENCY_SERIAL:
                    args = ensure_arg(args,
                                      '-default-concurrency',
                                      param='SERIAL')
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES:
                    args = ensure_arg(args,
                                      '-default-concurrency',
                                      param='PARALLEL_CLASSES')
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_METHODS:
                    args = ensure_arg(args,
                                      '-default-concurrency',
                                      param='PARALLEL_METHODS')
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES_AND_METHODS:
                    args = ensure_arg(args,
                                      '-default-concurrency',
                                      param='PARALLEL_CLASSES_AND_METHODS')

            if threads is not None:
                args = remove_arg(args, '-parallel-threads', has_param=True)
                args += ['-parallel-threads', str(threads)]

            batch_test_specs = [test.render_test_spec() for test in batch]
            with argfile.safe_args(batch_test_specs,
                                   self.get_options()) as batch_tests:
                with self.chroot(relevant_targets, workdir) as chroot:
                    self.context.log.debug('CWD = {}'.format(chroot))
                    self.context.log.debug('platform = {}'.format(platform))
                    with environment_as(**dict(target_env_vars)):
                        subprocess_result = self.spawn_and_wait(
                            relevant_targets,
                            executor=SubprocessExecutor(distribution),
                            distribution=distribution,
                            classpath=complete_classpath,
                            main=JUnit.RUNNER_MAIN,
                            jvm_options=self.jvm_options +
                            list(platform.args) + extra_jvm_options +
                            list(target_jvm_options),
                            args=args + batch_tests,
                            workunit_factory=self.context.new_workunit,
                            workunit_name='run',
                            workunit_labels=[WorkUnitLabel.TEST],
                            cwd=chroot,
                            synthetic_jar_dir=batch_output_dir,
                            create_synthetic_jar=self.synthetic_classpath,
                        )
                        self.context.log.debug(
                            'JUnit subprocess exited with result ({})'.format(
                                subprocess_result))
                        result += abs(subprocess_result)

                tests_info = self.parse_test_info(batch_output_dir,
                                                  parse_error_handler,
                                                  ['classname'])
                for test_name, test_info in tests_info.items():
                    test_item = Test(test_info['classname'], test_name)
                    test_target = test_registry.get_owning_target(test_item)
                    self.report_all_info_for_single_test(
                        self.options_scope, test_target, test_name, test_info)

                if result != 0 and fail_fast:
                    break

        if result == 0:
            return TestResult.successful

        # NB: If the TestRegistry fails to find the owning target of a failed test, the target key in
        # this dictionary will be None: helper methods in this block account for that.
        target_to_failed_test = parse_failed_targets(test_registry, output_dir,
                                                     parse_error_handler)

        def sort_owning_target(t):
            return t.address.spec if t else ''

        failed_targets = sorted(target_to_failed_test, key=sort_owning_target)
        error_message_lines = []
        if self._failure_summary:

            def render_owning_target(t):
                return t.address.reference() if t else '<Unknown Target>'

            for target in failed_targets:
                error_message_lines.append('\n{indent}{owner}'.format(
                    indent=' ' * 4, owner=render_owning_target(target)))
                for test in sorted(target_to_failed_test[target]):
                    error_message_lines.append(
                        '{indent}{classname}#{methodname}'.format(
                            indent=' ' * 8,
                            classname=test.classname,
                            methodname=test.methodname))
        error_message_lines.append(
            '\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.'
            .format(main=JUnit.RUNNER_MAIN,
                    code=result,
                    failed=len(failed_targets),
                    targets=pluralize(len(failed_targets), 'target')))
        return TestResult(msg='\n'.join(error_message_lines),
                          rc=result,
                          failed_targets=failed_targets)
Ejemplo n.º 16
0
 def add_task(product_type, rule):
     # TODO(#7311): make a defaultdict-like wrapper for OrderedDict if more widely used.
     if product_type not in serializable_rules:
         serializable_rules[product_type] = OrderedSet()
     serializable_rules[product_type].add(rule)
Ejemplo n.º 17
0
 def get_all_deps():
   all_deps = OrderedSet()
   all_deps.update(Pants('3rdparty/python:antlr-%s' % antlr_version).resolve())
   if dependencies:
     all_deps.update(dependencies)
   return all_deps
Ejemplo n.º 18
0
 def normalized_rules(self):
     rules = OrderedSet(rule for ruleset in self.rules.values()
                        for rule in ruleset)
     rules.update(self.roots)
     return self.NormalizedRules(rules, self.union_rules)
Ejemplo n.º 19
0
 def calculate_genfiles(self, path, source):
     protobuf_parse = ProtobufParse(path, source)
     protobuf_parse.parse()
     return OrderedSet(self.calculate_java_genfiles(protobuf_parse))
Ejemplo n.º 20
0
    def wrapper(func):
        if not inspect.isfunction(func):
            raise ValueError(
                'The @rule decorator must be applied innermost of all decorators.'
            )

        owning_module = sys.modules[func.__module__]
        source = inspect.getsource(func)
        beginning_indent = _get_starting_indent(source)
        if beginning_indent:
            source = "\n".join(line[beginning_indent:]
                               for line in source.split("\n"))
        module_ast = ast.parse(source)

        def resolve_type(name):
            resolved = getattr(owning_module, name,
                               None) or owning_module.__builtins__.get(
                                   name, None)
            if resolved is None:
                raise ValueError(
                    f'Could not resolve type `{name}` in top level of module {owning_module.__name__}'
                )
            elif not isinstance(resolved, type):
                raise ValueError(
                    f'Expected a `type` constructor for `{name}`, but got: {resolved} (type '
                    f'`{type(resolved).__name__}`)')
            return resolved

        gets = OrderedSet()
        rule_func_node = assert_single_element(
            node for node in ast.iter_child_nodes(module_ast)
            if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef))
            and node.name == func.__name__)

        parents_table = {}
        for parent in ast.walk(rule_func_node):
            for child in ast.iter_child_nodes(parent):
                parents_table[child] = parent

        rule_visitor = _RuleVisitor()
        rule_visitor.visit(rule_func_node)
        gets.update(
            Get.create_statically_for_rule_graph(resolve_type(p),
                                                 resolve_type(s))
            for p, s in rule_visitor.gets)

        # Register dependencies for @console_rule/Goal.
        dependency_rules = (optionable_rule(
            return_type.subsystem_cls), ) if is_goal_cls else None

        # Set a default name for Goal classes if one is not explicitly provided
        if is_goal_cls and name is None:
            effective_name = return_type.name
        else:
            effective_name = name

        func.rule = TaskRule(
            return_type,
            tuple(parameter_types),
            func,
            input_gets=tuple(gets),
            dependency_rules=dependency_rules,
            cacheable=cacheable,
            name=effective_name,
        )

        return func
Ejemplo n.º 21
0
def enum(all_values):
    """A datatype which can take on a finite set of values. This method is experimental and unstable.

  Any enum subclass can be constructed with its create() classmethod. This method will use the first
  element of `all_values` as the default value, but enum classes can override this behavior by
  setting `default_value` in the class body.

  If `all_values` contains only strings, then each variant is made into an attribute on the
  generated enum class object. This allows code such as the following:

  class MyResult(enum(['success', 'not-success'])):
    pass

  MyResult.success # The same as: MyResult('success')
  MyResult.not_success # The same as: MyResult('not-success')

  Note that like with option names, hyphenated ('-') enum values are converted into attribute names
  with underscores ('_').

  :param Iterable all_values: A nonempty iterable of objects representing all possible values for
                              the enum.  This argument must be a finite, non-empty iterable with
                              unique values.
  :raises: :class:`ValueError`
  """
    # namedtuple() raises a ValueError if you try to use a field with a leading underscore.
    field_name = 'value'

    # This call to list() will eagerly evaluate any `all_values` which would otherwise be lazy, such
    # as a generator.
    all_values_realized = list(all_values)

    unique_values = OrderedSet(all_values_realized)
    if len(unique_values) == 0:
        raise ValueError("all_values must be a non-empty iterable!")
    elif len(unique_values) < len(all_values_realized):
        raise ValueError(
            "When converting all_values ({}) to a set, at least one duplicate "
            "was detected. The unique elements of all_values were: {}.".format(
                all_values_realized, list(unique_values)))

    class ChoiceDatatype(datatype([field_name]), ChoicesMixin):
        # Overriden from datatype() so providing an invalid variant is catchable as a TypeCheckError,
        # but more specific.
        type_check_error_type = EnumVariantSelectionError

        @memoized_classproperty
        def _singletons(cls):
            """Generate memoized instances of this enum wrapping each of this enum's allowed values.

      NB: The implementation of enum() should use this property as the source of truth for allowed
      values and enum instances from those values.
      """
            return OrderedDict((value, cls._make_singleton(value))
                               for value in all_values_realized)

        @classmethod
        def _make_singleton(cls, value):
            """
      We convert uses of the constructor to call create(), so we then need to go around __new__ to
      bootstrap singleton creation from datatype()'s __new__.
      """
            return super(ChoiceDatatype, cls).__new__(cls, value)

        @classproperty
        def _allowed_values(cls):
            """The values provided to the enum() type constructor, for use in error messages."""
            return list(cls._singletons.keys())

        def __new__(cls, value):
            """Create an instance of this enum.

      :param value: Use this as the enum value. If `value` is an instance of this class, return it,
                    otherwise it is checked against the enum's allowed values.
      """
            if isinstance(value, cls):
                return value

            if value not in cls._singletons:
                raise cls.make_type_error(
                    "Value {!r} must be one of: {!r}.".format(
                        value, cls._allowed_values))

            return cls._singletons[value]

        # TODO: figure out if this will always trigger on primitives like strings, and what situations
        # won't call this __eq__ (and therefore won't raise like we want). Also look into whether there
        # is a way to return something more conventional like `NotImplemented` here that maintains the
        # extra caution we're looking for.
        def __eq__(self, other):
            """Redefine equality to avoid accidentally comparing against a non-enum."""
            if other is None:
                return False
            if type(self) != type(other):
                raise self.make_type_error(
                    "when comparing {!r} against {!r} with type '{}': "
                    "enum equality is only defined for instances of the same enum class!"
                    .format(self, other,
                            type(other).__name__))
            return super(ChoiceDatatype, self).__eq__(other)

        # Redefine the canary so datatype __new__ doesn't raise.
        __eq__._eq_override_canary = None

        # NB: as noted in datatype(), __hash__ must be explicitly implemented whenever __eq__ is
        # overridden. See https://docs.python.org/3/reference/datamodel.html#object.__hash__.
        def __hash__(self):
            return super(ChoiceDatatype, self).__hash__()

        def resolve_for_enum_variant(self, mapping):
            """Return the object in `mapping` with the key corresponding to the enum value.

      `mapping` is a dict mapping enum variant value -> arbitrary object. All variant values must be
      provided.

      NB: The objects in `mapping` should be made into lambdas if lazy execution is desired, as this
      will "evaluate" all of the values in `mapping`.
      """
            keys = frozenset(mapping.keys())
            if keys != frozenset(self._allowed_values):
                raise self.make_type_error(
                    "pattern matching must have exactly the keys {} (was: {})".
                    format(self._allowed_values, list(keys)))
            match_for_variant = mapping[self.value]
            return match_for_variant

        @classproperty
        def all_variants(cls):
            """Iterate over all instances of this enum, in the declared order.

      NB: resolve_for_enum_variant() should be used instead of this method for performing
      conditional logic based on an enum instance's value.
      """
            return cls._singletons.values()

    # Python requires creating an explicit closure to save the value on each loop iteration.
    accessor_generator = lambda case: lambda cls: cls(case)
    for case in all_values_realized:
        if isinstance(case, six.string_types):
            accessor = classproperty(accessor_generator(case))
            attr_name = re.sub(r'-', '_', case)
            setattr(ChoiceDatatype, attr_name, accessor)
Ejemplo n.º 22
0
 def sections(self) -> List[str]:
   ret = OrderedSet()
   for cfg in self._configs:
     ret.update(cfg.sections())
   return list(ret)
Ejemplo n.º 23
0
    def stage_artifacts(tgt, jar, version, tag, changelog):
      publications = OrderedSet()

      # TODO Remove this once we fix https://github.com/pantsbuild/pants/issues/1229
      if (not self.context.products.get('jars').has(tgt) and
          not self.get_options().individual_plugins):
        raise TaskError('Expected to find a primary artifact for {} but there was no jar for it.'
                        .format(tgt.address.reference()))

      # TODO Remove this guard once we fix https://github.com/pantsbuild/pants/issues/1229, there
      # should always be a primary artifact.
      if self.context.products.get('jars').has(tgt):
        self._copy_artifact(tgt, jar, version, typename='jars')
        publications.add(self.Publication(name=jar.name, classifier=None, ext='jar'))

        self.create_source_jar(tgt, jar, version)
        publications.add(self.Publication(name=jar.name, classifier='sources', ext='jar'))

        # don't request docs unless they are available for all transitive targets
        # TODO: doc products should be checked by an independent jar'ing task, and
        # conditionally enabled; see https://github.com/pantsbuild/pants/issues/568
        doc_jar = self.create_doc_jar(tgt, jar, version)
        if doc_jar:
          publications.add(self.Publication(name=jar.name, classifier='javadoc', ext='jar'))

        if self.publish_changelog:
          changelog_path = self.artifact_path(jar, version, suffix='-CHANGELOG', extension='txt')
          with safe_open(changelog_path, 'wb') as changelog_file:
            changelog_file.write(changelog.encode('utf-8'))
          publications.add(self.Publication(name=jar.name, classifier='CHANGELOG', ext='txt'))

      # Process any extra jars that might have been previously generated for this target, or a
      # target that it was derived from.
      for extra_product, extra_config in (self.get_options().publish_extras or {}).items():
        override_name = jar.name
        if 'override_name' in extra_config:
          # If the supplied string has a '{target_provides_name}' in it, replace it with the
          # current jar name. If not, the string will be taken verbatim.
          override_name = extra_config['override_name'].format(target_provides_name=jar.name)

        classifier = None
        suffix = ''
        if 'classifier' in extra_config:
          classifier = extra_config['classifier']
          suffix = "-{0}".format(classifier)

        extension = extra_config.get('extension', 'jar')

        extra_pub = self.Publication(name=override_name, classifier=classifier, ext=extension)

        # A lot of flexibility is allowed in parameterizing the extra artifact, ensure those
        # parameters lead to a unique publication.
        # TODO(John Sirois): Check this much earlier.
        if extra_pub in publications:
          raise TaskError("publish_extra for '{0}' must override one of name, classifier or "
                          "extension with a non-default value.".format(extra_product))

        # Build a list of targets to check. This list will consist of the current target, plus the
        # entire derived_from chain.
        target_list = [tgt]
        target = tgt
        while target.derived_from != target:
          target_list.append(target.derived_from)
          target = target.derived_from
        for cur_tgt in target_list:
          if self.context.products.get(extra_product).has(cur_tgt):
            self._copy_artifact(cur_tgt, jar, version, typename=extra_product, suffix=suffix,
                                extension=extension, override_name=override_name)
            publications.add(extra_pub)

      pom_path = self.artifact_path(jar, version, extension='pom')
      PomWriter(get_pushdb, tag).write(tgt, path=pom_path)
      return publications
Ejemplo n.º 24
0
 def create_source_template(base_id, includes=None, excludes=None):
     return TemplateData(
         base=base_id,
         includes='|'.join(OrderedSet(includes)) if includes else None,
         excludes='|'.join(OrderedSet(excludes)) if excludes else None,
     )
Ejemplo n.º 25
0
    def test_pants_contrib_case(self):
        def create_requirement_lib(name):
            return self.create_python_requirement_library(
                relpath=name,
                name=name,
                requirements=['{}==1.1.1'.format(name)])

        req1 = create_requirement_lib('req1')
        create_requirement_lib('req2')
        req3 = create_requirement_lib('req3')

        self.create_python_library(relpath='src/python/pants/base',
                                   name='base',
                                   dependencies=[
                                       'req1',
                                       'req2',
                                   ])
        self.create_python_binary(
            relpath='src/python/pants/bin',
            name='bin',
            entry_point='pants.bin.pants_loader:main',
            dependencies=[
                # Should be stripped in reduced_dependencies since pants_packaged provides these sources.
                'src/python/pants/base',
            ])
        pants_packaged = self.create_python_library(relpath='src/python/pants',
                                                    name='pants_packaged',
                                                    provides=dedent("""
      setup_py(
        name='pants_packaged',
        version='0.0.0'
      ).with_binaries(
        # Should be stripped in reduced_dependencies since pants_packaged provides this.
        pants_bin='src/python/pants/bin'
      )
      """))
        contrib_lib = self.create_python_library(
            relpath='contrib/lib/src/python/pants/contrib/lib',
            name='lib',
            dependencies=[
                'req3',
                # Should be stripped in reduced_dependencies since pants_packaged provides these sources.
                'src/python/pants/base',
            ])
        contrib_plugin = self.create_python_library(
            relpath='contrib/lib/src/python/pants/contrib',
            name='plugin',
            provides=dedent("""
      setup_py(
        name='contrib',
        version='0.0.0'
      )
      """),
            dependencies=[
                'contrib/lib/src/python/pants/contrib/lib',
                'src/python/pants:pants_packaged', 'req1'
            ])
        reduced_dependencies = self.dependency_calculator.reduced_dependencies(
            contrib_plugin)
        self.assertEqual(reduced_dependencies,
                         OrderedSet([contrib_lib, req3, pants_packaged, req1]))
Ejemplo n.º 26
0
    def test_resolve_conflicted(self):
        # Create jar_libraries with different versions of the same dep: this will cause
        # a pre-ivy "eviction" in IvyUtils.generate_ivy, but the same case can be triggered
        # due to an ivy eviction where the declared version loses to a transitive version.
        losing_dep = JarDependency('com.google.guava', 'guava', '16.0')
        winning_dep = JarDependency('com.google.guava', 'guava', '16.0.1')
        losing_lib = self.make_target('//:a', JarLibrary, jars=[losing_dep])
        winning_lib = self.make_target('//:b', JarLibrary, jars=[winning_dep])
        # Confirm that the same artifact was added to each target.
        context = self.context(target_roots=[losing_lib, winning_lib])

        def artifact_path(name):
            return os.path.join(self.pants_workdir, 'ivy_artifact', name)

        def mock_ivy_info_for(conf):
            ivy_info = IvyInfo(conf)

            # Guava 16.0 would be evicted by Guava 16.0.1.  But in a real
            # resolve, it's possible that before it was evicted, it would
            # generate some resolution data.

            artifact_1 = artifact_path('bogus0')
            unused_artifact = artifact_path('unused')

            # Because guava 16.0 was evicted, it has no artifacts.
            guava_0 = IvyModule(
                IvyModuleRef('com.google.guava', 'guava', '16.0'), None, [])
            guava_1 = IvyModule(
                IvyModuleRef('com.google.guava', 'guava', '16.0.1'),
                artifact_1, [])
            ivy_info.add_module(guava_0)
            ivy_info.add_module(guava_1)

            artifact_dep_1 = artifact_path('bogus1')

            # Because fake#dep 16.0 was evicted before it was resolved,
            # its deps are never examined, so we don't call add_module.
            guava_dep_0 = IvyModule(
                IvyModuleRef('com.google.fake', 'dep', '16.0.0'), None,
                [guava_0.ref])
            guava_dep_1 = IvyModule(
                IvyModuleRef('com.google.fake', 'dep', '16.0.1'),
                artifact_dep_1, [guava_1.ref])

            ivy_info.add_module(guava_dep_0)
            ivy_info.add_module(guava_dep_1)

            # Add an unrelated module to ensure that it's not returned.
            unrelated_parent = IvyModuleRef('com.google.other', 'parent',
                                            '1.0')
            unrelated = IvyModule(
                IvyModuleRef('com.google.unrelated', 'unrelated', '1.0'),
                unused_artifact, [unrelated_parent])
            ivy_info.add_module(unrelated)

            return ivy_info

        symlink_map = {
            artifact_path('bogus0'): artifact_path('bogus0'),
            artifact_path('bogus1'): artifact_path('bogus1'),
            artifact_path('unused'): artifact_path('unused')
        }
        result = IvyResolveResult([], symlink_map, 'some-key-for-a-and-b', {})
        result._ivy_info_for = mock_ivy_info_for

        def mock_ivy_resolve(*args, **kwargs):
            return result

        task = self.create_task(context, workdir='unused')
        task._ivy_resolve = mock_ivy_resolve

        task.execute()
        compile_classpath = context.products.get_data('compile_classpath',
                                                      None)
        losing_cp = compile_classpath.get_for_target(losing_lib)
        winning_cp = compile_classpath.get_for_target(winning_lib)
        self.assertEquals(losing_cp, winning_cp)
        self.assertEquals(
            OrderedSet([(u'default', artifact_path(u'bogus0')),
                        (u'default', artifact_path(u'bogus1'))]), winning_cp)
Ejemplo n.º 27
0
    def process_target(current_target):
      """
      :type current_target:pants.build_graph.target.Target
      """
      def get_target_type(tgt):
        def is_test(t):
          return isinstance(t, JUnitTests) or isinstance(t, PythonTests)
        if is_test(tgt):
          return ExportTask.SourceRootTypes.TEST
        else:
          if (isinstance(tgt, Resources) and
              tgt in resource_target_map and
                is_test(resource_target_map[tgt])):
            return ExportTask.SourceRootTypes.TEST_RESOURCE
          elif isinstance(tgt, Resources):
            return ExportTask.SourceRootTypes.RESOURCE
          else:
            return ExportTask.SourceRootTypes.SOURCE

      info = {
        'targets': [],
        'libraries': [],
        'roots': [],
        'id': current_target.id,
        'target_type': get_target_type(current_target),
        # NB: is_code_gen should be removed when export format advances to 1.1.0 or higher
        'is_code_gen': current_target.is_synthetic,
        'is_synthetic': current_target.is_synthetic,
        'pants_target_type': self._get_pants_target_alias(type(current_target)),
      }

      if not current_target.is_synthetic:
        info['globs'] = current_target.globs_relative_to_buildroot()
        if self.get_options().sources:
          info['sources'] = list(current_target.sources_relative_to_buildroot())

      info['transitive'] = current_target.transitive
      info['scope'] = str(current_target.scope)
      info['is_target_root'] = current_target in target_roots_set

      if isinstance(current_target, PythonRequirementLibrary):
        reqs = current_target.payload.get_field_value('requirements', set())
        """:type : set[pants.backend.python.python_requirement.PythonRequirement]"""
        info['requirements'] = [req.key for req in reqs]

      if isinstance(current_target, PythonTarget):
        interpreter_for_target = self._interpreter_cache.select_interpreter_for_targets(
          [current_target])
        if interpreter_for_target is None:
          raise TaskError('Unable to find suitable interpreter for {}'
                          .format(current_target.address))
        python_interpreter_targets_mapping[interpreter_for_target].append(current_target)
        info['python_interpreter'] = str(interpreter_for_target.identity)

      def iter_transitive_jars(jar_lib):
        """
        :type jar_lib: :class:`pants.backend.jvm.targets.jar_library.JarLibrary`
        :rtype: :class:`collections.Iterator` of
                :class:`pants.java.jar.M2Coordinate`
        """
        if classpath_products:
          jar_products = classpath_products.get_artifact_classpath_entries_for_targets((jar_lib,))
          for _, jar_entry in jar_products:
            coordinate = jar_entry.coordinate
            # We drop classifier and type_ since those fields are represented in the global
            # libraries dict and here we just want the key into that dict (see `_jar_id`).
            yield M2Coordinate(org=coordinate.org, name=coordinate.name, rev=coordinate.rev)

      target_libraries = OrderedSet()
      if isinstance(current_target, JarLibrary):
        target_libraries = OrderedSet(iter_transitive_jars(current_target))
      for dep in current_target.dependencies:
        info['targets'].append(dep.address.spec)
        if isinstance(dep, JarLibrary):
          for jar in dep.jar_dependencies:
            target_libraries.add(M2Coordinate(jar.org, jar.name, jar.rev))
          # Add all the jars pulled in by this jar_library
          target_libraries.update(iter_transitive_jars(dep))
        if isinstance(dep, Resources):
          resource_target_map[dep] = current_target

      if isinstance(current_target, ScalaLibrary):
        for dep in current_target.java_sources:
          info['targets'].append(dep.address.spec)
          process_target(dep)

      if isinstance(current_target, JvmTarget):
        info['excludes'] = [self._exclude_id(exclude) for exclude in current_target.excludes]
        info['platform'] = current_target.platform.name
        if hasattr(current_target, 'test_platform'):
          info['test_platform'] = current_target.test_platform.name

      info['roots'] = map(lambda (source_root, package_prefix): {
        'source_root': source_root,
        'package_prefix': package_prefix
      }, self._source_roots_for_target(current_target))

      if classpath_products:
        info['libraries'] = [self._jar_id(lib) for lib in target_libraries]
      targets_map[current_target.address.spec] = info
Ejemplo n.º 28
0
 def set_base_classpath_for_group(self, group_key, classpath):
     # set the initial classpath of the elements of group_key to classpath.
     self._group_classpaths[group_key] = OrderedSet(reversed(classpath))
Ejemplo n.º 29
0
 def javadeps(self):
     return OrderedSet()
Ejemplo n.º 30
0
 def __init__(self, sources_rel_path=None, sources=None):
     super(ResourcesPayload, self).__init__(sources_rel_path,
                                            OrderedSet(sources))