示例#1
0
    def __init__(self, name, sources=None, exclusives=None):
        Target.__init__(self, name, exclusives=exclusives)

        self.add_labels("sources")
        self.target_base = SourceRoot.find(self)
        self.sources = None
        self._sources = sources or []
示例#2
0
 def __init__(self, requirement, dynamic=False, repository=None, name=None, version_filter=None):
   self._requirement = Requirement.parse(requirement)
   self._name = name or self._requirement.project_name
   self._dynamic = dynamic
   self._repository = repository
   self._version_filter = version_filter or (lambda: True)
   Target.__init__(self, self._name, False)
示例#3
0
 def execute(self):
   for buildfile in BuildFile.scan_buildfiles(self.root_dir):
     for address in Target.get_all_addresses(buildfile):
       target = Target.get(address)
       if hasattr(target, 'sources') and target.sources is not None:
         for sourcefile in target.sources:
           print sourcefile, address
示例#4
0
文件: ide.py 项目: DikangGu/commons
    def configure_target(target):
      if target not in analyzed:
        analyzed.add(target)

        self.has_scala = self.has_scala or is_scala(target)

        if isinstance(target, JavaLibrary) or isinstance(target, ScalaLibrary):
          # TODO(John Sirois): this does not handle test resources, make test resources 1st class
          # in ant build and punch this through to pants model
          resources = set()
          if target.resources:
            resources.update(target.resources)
          if target.binary_resources:
            resources.update(target.binary_resources)
          if resources:
            self.resource_extensions.update(Project.extract_resource_extensions(resources))
            configure_source_sets(ExportableJvmLibrary.RESOURCES_BASE_DIR, resources, is_test = False)

        if target.sources:
          test = is_test(target)
          self.has_tests = self.has_tests or test
          configure_source_sets(target.target_base, target.sources, is_test = test)

        siblings = Target.get_all_addresses(target.address.buildfile)
        return filter(accept_target, [ Target.get(a) for a in siblings if a != target.address ])
示例#5
0
  def test_sibling_references(self):
    with temporary_dir() as root_dir:
      buildfile = create_buildfile(root_dir, 'a', name='BUILD',
        content=dedent("""
          dependencies(name='util',
            dependencies=[
              jar(org='com.twitter', name='util', rev='0.0.1')
            ]
          )
        """).strip()
      )
      sibling = create_buildfile(root_dir, 'a', name='BUILD.sibling',
        content=dedent("""
          dependencies(name='util-ex',
            dependencies=[
              pants(':util'),
              jar(org='com.twitter', name='util-ex', rev='0.0.1')
            ]
          )
        """).strip()
      )
      ParseContext(buildfile).parse()

      utilex = Target.get(Address.parse(root_dir, 'a:util-ex', is_relative=False))
      utilex_deps = set(utilex.resolve())

      util = Target.get(Address.parse(root_dir, 'a:util', is_relative=False))
      util_deps = set(util.resolve())

      self.assertEquals(util_deps, util_deps.intersection(utilex_deps))
示例#6
0
  def __init__(self, name, sources=None):
    Target.__init__(self, name)

    self.add_labels('sources')
    self.target_base = SourceRoot.find(self)
    self.sources = None
    self._sources = sources or []
示例#7
0
  def __init__(self, name, dependencies, is_meta):
    Target.__init__(self, name, is_meta)

    self._injected_deps = []
    self.processed_dependencies = resolve(dependencies)

    self.add_label('internal')
    self.dependency_addresses = OrderedSet()
    self.dependencies = OrderedSet()
    self.internal_dependencies = OrderedSet()
    self.jar_dependencies = OrderedSet()

    # TODO(John Sirois): if meta targets were truly built outside parse contexts - we could instead
    # just use the more general check: if parsing: delay(doit) else: doit()
    # Fix how target _ids are built / addresses to not require a BUILD file - ie: support anonymous,
    # non-addressable targets - which is what meta-targets really are once created.
    if is_meta:
      # Meta targets are built outside any parse context - so update dependencies immediately
      self.update_dependencies(self.processed_dependencies)
    else:
      # Defer dependency resolution after parsing the current BUILD file to allow for forward
      # references
      self._post_construct(self.update_dependencies, self.processed_dependencies)

    self._post_construct(self.inject_dependencies)
示例#8
0
文件: goal.py 项目: soheilhy/commons
 def add_targets(self, error, dir, buildfile):
   try:
     self.targets.extend(Target.get(addr) for addr in Target.get_all_addresses(buildfile))
   except (TypeError, ImportError):
     error(dir, include_traceback=True)
   except (IOError, SyntaxError):
     error(dir)
示例#9
0
    def __init__(self, name, dependencies, is_meta):
        Target.__init__(self, name, is_meta)

        self.resolved_dependencies = OrderedSet()
        self.internal_dependencies = OrderedSet()
        self.jar_dependencies = OrderedSet()

        self.update_dependencies(dependencies)
示例#10
0
文件: doc.py 项目: CodeWarltz/commons
 def __init__(self, name, url_builder, exclusives=None):
   """
   :param string name: The name of this target, which combined with this
     build file defines the target :class:`twitter.pants.base.address.Address`.
   :param url_builder: Function that accepts a page target and an optional wiki config dict.
   :returns: A tuple of (alias, fully qualified url).
   """
   Target.__init__(self, name, exclusives=exclusives)
   self.url_builder = url_builder
示例#11
0
 def __init__(self, name, username=None, password=None):
   """
     :name The name of these credentials
     :username Either a constant username value or else a callable that can fetch one
     :password Either a constant password value or else a callable that can fetch one
   """
   Target.__init__(self, name, False)
   self._username = username if callable(username) else lambda: username
   self._password = password if callable(password) else lambda: password
示例#12
0
 def _find_targets(self):
   if len(self.context.target_roots) > 0:
     for target in self.context.target_roots:
       yield target
   else:
     for buildfile in BuildFile.scan_buildfiles(get_buildroot()):
       target_addresses = Target.get_all_addresses(buildfile)
       for target_address in target_addresses:
         yield Target.get(target_address)
示例#13
0
  def __init__(self, name, url, push_db, exclusives=None):
    """name: an identifier for the repo
    url: the url used to access the repo and retrieve artifacts or artifact metadata
    push_db: the data file associated with this repo that records artifact push history"""

    Target.__init__(self, name, exclusives=exclusives)

    self.name = name
    self.url = url
    self.push_db = push_db
示例#14
0
  def __init__(self, name, dependencies):
    """name: The name of this module target, addressable via pants via the portion of the spec
        following the colon
    dependencies: one or more JarDependencies this JarLibrary bundles or Pants pointing to other
        JarLibraries or JavaTargets"""

    assert len(dependencies) > 0, "At least one dependency must be specified"
    Target.__init__(self, name, False)

    self.dependencies = dependencies
示例#15
0
文件: filemap.py 项目: alfss/commons
 def execute(self, expanded_target_addresses):
   buildroot = get_buildroot()
   if len(self.context.target_roots) > 0:
     for target in self.context.target_roots:
       self._execute_target(target, buildroot)
   else:
     for buildfile in BuildFile.scan_buildfiles(buildroot):
       target_addresses = Target.get_all_addresses(buildfile)
       for target_address in target_addresses:
         target = Target.get(target_address)
         self._execute_target(target, buildroot)
示例#16
0
 def _walk(self, walked, work, predicate = None):
   Target._walk(self, walked, work, predicate)
   for dep in self.dependencies:
     if isinstance(dep, Target) and not dep in walked:
       walked.add(dep)
       if not predicate or predicate(dep):
         additional_targets = work(dep)
         dep._walk(walked, work, predicate)
         if additional_targets:
           for additional_target in additional_targets:
             additional_target._walk(walked, work, predicate)
示例#17
0
 def __init__(self, requirement, name=None, repository=None, version_filter=None, use_2to3=False,
              compatibility=None, exclusives=None):
   # TODO(wickman) Allow PythonRequirements to be specified using pip-style vcs or url identifiers,
   # e.g. git+https or just http://...
   self._requirement = Requirement.parse(requirement)
   self._repository = repository
   self._name = name or self._requirement.project_name
   self._use_2to3 = use_2to3
   self._version_filter = version_filter or (lambda py, pl: True)
   # TODO(wickman) Unify this with PythonTarget .compatibility
   self.compatibility = compatibility or ['']
   Target.__init__(self, self._name, exclusives=exclusives)
示例#18
0
 def _parse_addresses(self, spec):
   if spec.endswith('::'):
     dir = self._get_dir(spec[:-len('::')])
     for buildfile in BuildFile.scan_buildfiles(self._root_dir, os.path.join(self._root_dir, dir)):
       for address in Target.get_all_addresses(buildfile):
         yield address
   elif spec.endswith(':'):
     dir = self._get_dir(spec[:-len(':')])
     for address in Target.get_all_addresses(BuildFile(self._root_dir, dir)):
       yield address
   else:
     yield Address.parse(self._root_dir, spec)
示例#19
0
  def __init__(self, name, dependencies):
    """name: The name of this module target, addressable via pants via the portion of the spec
        following the colon
    dependencies: one or more JarDependencies this JarLibrary bundles or Pants pointing to other
        JarLibraries or JavaTargets"""

    assert len(dependencies) > 0, "At least one dependency must be specified"
    Target.__init__(self, name, False)
    self.add_label('jars')
    self.dependencies = resolve(dependencies)
    self.dependency_addresses = set()
    for dependency in self.dependencies:
      if hasattr(dependency, 'address'):
        self.dependency_addresses.add(dependency.address)
示例#20
0
  def __init__(self, spec):
    # it's critical the spec is parsed 1st, the results are needed elsewhere in constructor flow
    parse_context = ParseContext.locate()

    def parse_address():
      if spec.startswith(':'):
        # the :[target] could be in a sibling BUILD - so parse using the canonical address
        pathish = "%s:%s" % (parse_context.buildfile.canonical_relpath, spec[1:])
        return Address.parse(parse_context.buildfile.root_dir, pathish, False)
      else:
        return Address.parse(parse_context.buildfile.root_dir, spec, False)

    self.address = parse_address()

    Target.__init__(self, self.address.target_name, False)
示例#21
0
  def __init__(self, context):
    ConsoleTask.__init__(self, context)

    self._print_uptodate = context.options.check_deps_print_uptodate
    self.repos = context.config.getdict('jar-publish', 'repos')
    self._artifacts_to_targets = {}
    all_addresses = (address for buildfile in BuildFile.scan_buildfiles(get_buildroot())
                     for address in Target.get_all_addresses(buildfile))
    for address in all_addresses:
      target = Target.get(address)
      if target.is_exported:
        provided_jar, _, _ = target.get_artifact_info()
        artifact = (provided_jar.org, provided_jar.name)
        if not artifact in self._artifacts_to_targets:
          self._artifacts_to_targets[artifact] = target
示例#22
0
文件: paths.py 项目: alfss/commons
  def _find_path(cls, from_target, to_target, log):
    from_target, to_target = cls._coerce_to_targets(from_target, to_target)

    log.debug('Looking for path from %s to %s' % (from_target.address.reference(), to_target.address.reference()))

    queue = [([from_target], 0)]
    while True:
      if not queue:
        print('no path found from %s to %s!' % (from_target.address.reference(), to_target.address.reference()))
        break

      path, indent = queue.pop(0)
      next_target = path[-1]
      if next_target in cls.examined_targets:
        continue
      cls.examined_targets.add(next_target)

      log.debug('%sexamining %s' % ('  ' * indent, next_target))

      if next_target == to_target:
        print('')
        for target in path:
          print('%s' % target.address.reference())
        break

      if hasattr(next_target, 'dependency_addresses'):
        for address in next_target.dependency_addresses:
          dep = Target.get(address)
          queue.append((path + [dep], indent + 1))
示例#23
0
文件: list.py 项目: adamsxu/commons
  def execute(self):
    if self.options.only_provides:
      def extract_artifact_id(target):
        provided_jar = target._as_jar_dependency()
        return "%s%s%s" % (provided_jar.org, self.options.separator, provided_jar.name)

      extractors = dict(
        address = lambda target: str(target.address),
        artifact_id = extract_artifact_id,
        repo_name = lambda target: target.provides.repo.name,
        repo_url = lambda target: target.provides.repo.url,
        repo_db = lambda target: target.provides.repo.push_db,
      )

      column_extractors = [ extractors[col] for col in (self.options.provides_columns.split(',')) ]
      print_fn = lambda address: self._print_provides(column_extractors, address)
    elif self.options.documented:
      def print_documented(address):
        target = Target.get(address)
        if target.description:
          return '%s\n  %s' % (address, '\n  '.join(target.description.strip().split('\n')))
      print_fn = print_documented
    else:
      print_fn = lambda address: str(address)

    for buildfile in self.buildfiles:
      for address in Target.get_all_addresses(buildfile):
        line = print_fn(address)
        if line:
          print(line)
示例#24
0
  def _find_paths(cls, from_target, to_target, log, find_all):
    from_target, to_target = cls._coerce_to_targets(from_target, to_target)

    log.debug('Looking for path from %s to %s' % (from_target.address.reference(), to_target.address.reference()))

    paths_found = False

    queue = [([from_target], 0)]
    while True:
      if not queue:
        if not paths_found:
          print 'no path found from %s to %s!' % (from_target.address.reference(), to_target.address.reference())
        break

      path, indent = queue.pop(0)
      next_target = path[-1]
      log.debug('%sexamining %s' % ('  ' * indent, next_target))

      if next_target == to_target:
        if paths_found:
          print ''
        else:
          paths_found = True
        for target in path:
          print '%s' % target.address.reference()
        if find_all:
          continue
        else:
          break

      if hasattr(next_target, 'dependency_addresses'):
        for address in next_target.dependency_addresses:
          dep = Target.get(address)
          queue.append((path + [dep], indent + 1))
示例#25
0
文件: py.py 项目: adamsxu/commons
  def __init__(self, root_dir, parser, argv):
    Command.__init__(self, root_dir, parser, argv)

    if not self.args:
      self.error("A spec argument is required")

    targets = []

    for k in range(len(self.args)):
      arg = self.args[0]
      if arg == '--':
        self.args.pop(0)
        break

      try:
        address = Address.parse(root_dir, arg)
        target = Target.get(address)
      except Exception as e:
        break
      if not target:
        break

      targets.append(target)
      self.args.pop(0)

      # stop at PythonBinary target
      if isinstance(target, PythonBinary):
        break

    self.target = targets.pop(0) if targets else None
    self.extra_targets = targets

    if self.target is None:
      self.error('No valid target specified!')
示例#26
0
 def resolve(self):
   # De-reference this pants pointer to an actual parsed target.
   resolved = Target.get(self.address)
   if not resolved:
     raise KeyError("Failed to find target for: %s" % self.address)
   for dep in resolved.resolve():
     yield dep
示例#27
0
  def __init__(self, spec):
    # it's critical the spec is parsed 1st, the results are needed elsewhere in constructor flow
    parse_context = ParseContext.locate()

    def parse_address():
      if spec.startswith(':'):
        # the :[target] could be in a sibling BUILD - so parse using the canonical address
        pathish = "%s:%s" % (parse_context.buildfile.canonical_relpath, spec[1:])
        return Address.parse(parse_context.buildfile.root_dir, pathish, False)
      else:
        return Address.parse(parse_context.buildfile.root_dir, spec, False)

    self.address = parse_address()
    # We must disable the re-init check, because our funky __getattr__ breaks it.
    # We're not involved in any multiple inheritance, so it's OK to disable it here.
    Target.__init__(self, self.address.target_name, False, reinit_check=False)
示例#28
0
  def __init__(self, root_dir, parser, argv):
    Command.__init__(self, root_dir, parser, argv)

    if not self.args:
      self.error("A spec argument is required")

    try:
      specs_end = self.args.index('--')
      if len(self.args) > specs_end:
        self.build_args = self.args[specs_end+1:len(self.args)+1]
      else:
        self.build_args = []
    except ValueError:
      specs_end = 1
      self.build_args = self.args[1:] if len(self.args) > 1 else []

    self.targets = OrderedSet()
    for spec in self.args[0:specs_end]:
      try:
        address = Address.parse(root_dir, spec)
      except:
        self.error("Problem parsing spec %s: %s" % (spec, traceback.format_exc()))

      try:
        target = Target.get(address)
      except:
        self.error("Problem parsing BUILD target %s: %s" % (address, traceback.format_exc()))

      if not target:
        self.error("Target %s does not exist" % address)
      self.targets.update(tgt for tgt in target.resolve() if is_concrete(tgt))
示例#29
0
  def _parse_targets(self, targets, root_dir):
    for spec in self.args:
      try:
        address = Address.parse(root_dir, spec)
      except:
        self.error("Problem parsing spec %s: %s" % (spec, traceback.format_exc()))

      try:
        target = Target.get(address)
      except:
        self.error("Problem parsing target %s: %s" % (address, traceback.format_exc()))

      if address.is_meta:
        print("target is meta")
        target = target.do_in_context(lambda: bang.extract_target([target], None))
      if not IvyResolve._is_resolvable(target):
        self.error("Target: %s is not resolvable" % address)

      targets.add(target)

    if not self.intransitive:
      def add_targets(ttarget):
        if hasattr(ttarget, 'internal_dependencies'):
          for dep in ttarget.internal_dependencies:
            if IvyResolve._is_resolvable(dep):
              targets.add(dep)
            else:
              print("skipping %s as it's not ivy resolvable" % dep.name)
      target.walk(add_targets)

    return targets
示例#30
0
 def __getattr__(self, name):
   try:
     return Target.__getattribute__(self, name)
   except AttributeError as e:
     try:
       return getattr(self.get(), name)
     except (AttributeError, LookupError):
       raise e
示例#31
0
    def java_sources(self):
        if self._raw_java_sources is not None:
            self._java_sources = list(
                Target.resolve_all(maybe_list(self._raw_java_sources, Target),
                                   JavaLibrary))

            self._raw_java_sources = None

            # TODO(John Sirois): reconsider doing this auto-linking.
            # We have circular java/scala dep, add an inbound dependency edge from java to scala in this
            # case to force scala compilation to precede java - since scalac supports generating java
            # stubs for these cycles and javac does not this is both necessary and always correct.
            for java_target in self._java_sources:
                java_target.update_dependencies([self])
        return self._java_sources
示例#32
0
    def __init__(self, spec, exclusives=None):
        # it's critical the spec is parsed 1st, the results are needed elsewhere in constructor flow
        parse_context = ParseContext.locate()

        def parse_address():
            if spec.startswith(':'):
                # the :[target] could be in a sibling BUILD - so parse using the canonical address
                pathish = "%s:%s" % (parse_context.buildfile.canonical_relpath,
                                     spec[1:])
                return Address.parse(parse_context.buildfile.root_dir, pathish,
                                     False)
            else:
                return Address.parse(parse_context.buildfile.root_dir, spec,
                                     False)

        self.address = parse_address()

        # We must disable the re-init check, because our funky __getattr__ breaks it.
        # We're not involved in any multiple inheritance, so it's OK to disable it here.

        Target.__init__(self,
                        self.address.target_name,
                        reinit_check=False,
                        exclusives=exclusives)
示例#33
0
文件: setup_py.py 项目: xianxu/pants
    def __init__(self, run_tracker, root_dir, parser, argv):
        Command.__init__(self, run_tracker, root_dir, parser, argv)

        if not self.args:
            self.error("A spec argument is required")

        address = Address.parse(root_dir, self.args[0])
        self.target = Target.get(address)

        if self.target is None:
            self.error('%s is not a valid target!' % self.args[0])

        if not self.target.provides:
            self.error('Target must provide an artifact.')

        self.dependencies = self.minified_dependencies(self.target)
示例#34
0
文件: py.py 项目: cscotta/commons
    def __init__(self, root_dir, parser, argv):
        Command.__init__(self, root_dir, parser, argv)

        if not self.args:
            self.error("A spec argument is required")

        try:
            address = Address.parse(root_dir, self.args[0])
            target = Target.get(address)
        except Exception as e:
            self.error("Invalid target in %s (%s)" % (self.args[0], str(e)))

        if not target:
            self.error("Target %s does not exist" % address)
        self.target = target
        self.args.pop(0)
示例#35
0
文件: build.py 项目: cscotta/commons
    def __init__(self, root_dir, parser, argv):
        Command.__init__(self, root_dir, parser, argv)

        if not self.args:
            self.error("A spec argument is required")

        try:
            specs_end = self.args.index('--')
            if len(self.args) > specs_end:
                self.build_args = self.args.__getslice__(
                    specs_end + 1,
                    len(self.args) + 1)
            else:
                self.build_args = []
        except ValueError:
            specs_end = 1
            self.build_args = self.args[1:] if len(self.args) > 1 else []

        self.targets = OrderedSet()
        for spec in self.args.__getslice__(0, specs_end):
            try:
                address = Address.parse(root_dir, spec)
            except:
                self.error("Problem parsing spec %s: %s" %
                           (spec, traceback.format_exc()))

            try:
                target = Target.get(address)
            except:
                self.error("Problem parsing BUILD target %s: %s" %
                           (address, traceback.format_exc()))

            try:
                InternalTarget.check_cycles(target)
            except CycleException as e:
                self.error("Target contains an internal dependency cycle: %s" %
                           e)

            if not target:
                self.error("Target %s does not exist" % address)
            if not target.address.is_meta:
                target.address.is_meta = self.options.is_meta or address.is_meta
            self.targets.add(target)

        self.is_ide = self.options.is_ide
        self.ide_transitivity = self.options.ide_transitivity
示例#36
0
  def _find_paths_rec(cls, from_target, to_target):
    if from_target == to_target:
      return [[from_target]]

    if from_target not in cls.all_paths or to_target not in cls.all_paths[from_target]:
      paths = []
      if hasattr(from_target, 'dependency_addresses'):
        for address in from_target.dependency_addresses:
          dep = Target.get(address)
          for path in cls._find_paths_rec(dep, to_target):
            new_path = copy.copy(path)
            new_path.insert(0, from_target)
            paths.append(new_path)

      cls.all_paths[from_target][to_target] = paths

    return cls.all_paths[from_target][to_target]
示例#37
0
    def get_pytest_eggs(root):
        specs = ["3rdparty/python:pytest"]
        eggs = []
        for spec in specs:
            address = Address.parse(root, spec)
            target = Target.get(address)

            def add_eggs(target):
                deps = []
                for dep in target.dependencies:
                    if isinstance(dep, PythonEgg):
                        for egg in dep.eggs:
                            eggs.append(egg)
                    else:
                        deps.append(dep)
                return deps

            target.walk(lambda t: add_eggs(t))
        return eggs
示例#38
0
  def _parse_targets(self, root_dir):
    targets = OrderedSet()
    for spec in self.args:
      try:
        address = Address.parse(root_dir, spec)
      except:
        self.error("Problem parsing spec %s: %s" % (spec, traceback.format_exc()))

      try:
        target = Target.get(address)
      except:
        self.error("Problem parsing target %s: %s" % (address, traceback.format_exc()))

      if not Doc._is_documentable(target):
        self.error("Target: %s is not documentable" % address)

      targets.add(target)

    return targets
示例#39
0
    def __init__(self, root_dir, parser, argv):
        Command.__init__(self, root_dir, parser, argv)

        if len(self.args) is not 1:
            self.error("Exactly one BUILD address is required.")

        spec = self.args[0]
        try:
            address = Address.parse(root_dir, spec)
        except IOError:
            self.error("Problem parsing spec %s: %s" %
                       (spec, traceback.format_exc()))

        try:
            self.target = Target.get(address)
        except (ImportError, SyntaxError, TypeError):
            self.error("Problem parsing BUILD target %s: %s" %
                       (address, traceback.format_exc()))

        if not self.target:
            self.error("Target %s does not exist" % address)
示例#40
0
    def _run_lint(self, target, args):
        chroot = PythonChroot(target,
                              self.root_dir,
                              extra_targets=[
                                  Target.get(
                                      Address.parse(self.root_dir,
                                                    '3rdparty/python:pylint'))
                              ])
        builder = chroot.dump()
        builder.info().entry_point = 'pylint.lint'
        builder.freeze()

        interpreter_args = [
            '--rcfile=%s' %
            os.path.join(self.root_dir, 'build-support', 'pylint', 'pylint.rc')
        ]
        interpreter_args.extend(args or [])
        sources = OrderedSet([])
        target.walk(lambda trg: sources.update(trg.sources if hasattr(
            trg, 'sources') and trg.sources is not None else []))
        pex = PEX(builder.path())
        pex.run(args=interpreter_args + list(sources), with_chroot=True)
示例#41
0
  def execute(self):
    if self.options.only_provides:
      def extract_artifact_id(target):
        provided_jar = target._as_jar_dependency()
        return "%s%s%s" % (provided_jar.org, self.options.separator, provided_jar.name)

      extractors = dict(
        address = lambda target: str(target.address),
        artifact_id = extract_artifact_id,
        repo_name = lambda target: target.provides.repo.name,
        repo_url = lambda target: target.provides.repo.url,
        repo_db = lambda target: target.provides.repo.push_db,
      )

      column_extractors = [ extractors[col] for col in (self.options.provides_columns.split(',')) ]
      print_fn = lambda address: self._print_provides(column_extractors, address)
    else:
      print_fn = lambda address: str(address)

    for buildfile in self.buildfiles:
      for address in Target.get_all_addresses(buildfile):
        line = print_fn(address)
        if line:
          print line
示例#42
0
    def setup_parser(self, parser, args):
        self.config = Config.load()
        Goal.add_global_options(parser)

        # We support attempting zero or more goals.  Multiple goals must be delimited from further
        # options and non goal args with a '--'.  The key permutations we need to support:
        # ./pants goal => goals
        # ./pants goal goals => goals
        # ./pants goal compile src/java/... => compile
        # ./pants goal compile -x src/java/... => compile
        # ./pants goal compile src/java/... -x => compile
        # ./pants goal compile run -- src/java/... => compile, run
        # ./pants goal compile run -- src/java/... -x => compile, run
        # ./pants goal compile run -- -x src/java/... => compile, run

        if not args:
            args.append('goals')

        if len(args) == 1 and args[0] in set(['-h', '--help', 'help']):

            def format_usage(usages):
                left_colwidth = 0
                for left, right in usages:
                    left_colwidth = max(left_colwidth, len(left))
                lines = []
                for left, right in usages:
                    lines.append('  %s%s%s' %
                                 (left, ' ' *
                                  (left_colwidth - len(left) + 1), right))
                return '\n'.join(lines)

            usages = [
                ("%prog goal goals ([spec]...)", Phase('goals').description),
                ("%prog goal help [goal] ([spec]...)",
                 Phase('help').description),
                ("%prog goal [goal] [spec]...",
                 "Attempt goal against one or more targets."),
                ("%prog goal [goal] ([goal]...) -- [spec]...",
                 "Attempts all the specified goals."),
            ]
            parser.set_usage("\n%s" % format_usage(usages))
            parser.epilog = (
                "Either lists all installed goals, provides extra help for a goal or else "
                "attempts to achieve the specified goal for the listed targets."
                """
                       Note that target specs accept two special forms:
                         [dir]:  to include all targets in the specified directory
                         [dir]:: to include all targets found in all BUILD files recursively under
                                 the directory""")

            parser.print_help()
            sys.exit(0)
        else:
            goals, specs = Goal.parse_args(args)
            self.requested_goals = goals

            with self.run_tracker.new_workunit(name='setup',
                                               labels=[WorkUnit.SETUP]):
                # Bootstrap goals by loading any configured bootstrap BUILD files
                with self.check_errors(
                        'The following bootstrap_buildfiles cannot be loaded:'
                ) as error:
                    with self.run_tracker.new_workunit(name='bootstrap',
                                                       labels=[WorkUnit.SETUP
                                                               ]):
                        for path in self.config.getlist('goals',
                                                        'bootstrap_buildfiles',
                                                        default=[]):
                            try:
                                buildfile = BuildFile(
                                    get_buildroot(),
                                    os.path.relpath(path, get_buildroot()))
                                ParseContext(buildfile).parse()
                            except (TypeError, ImportError, TaskError,
                                    GoalError):
                                error(path, include_traceback=True)
                            except (IOError, SyntaxError):
                                error(path)
                # Now that we've parsed the bootstrap BUILD files, and know about the SCM system.
                self.run_tracker.run_info.add_scm_info()

                # Bootstrap user goals by loading any BUILD files implied by targets.
                spec_parser = SpecParser(self.root_dir)
                with self.check_errors(
                        'The following targets could not be loaded:') as error:
                    with self.run_tracker.new_workunit(name='parse',
                                                       labels=[WorkUnit.SETUP
                                                               ]):
                        for spec in specs:
                            try:
                                for target, address in spec_parser.parse(spec):
                                    if target:
                                        self.targets.append(target)
                                        # Force early BUILD file loading if this target is an alias that expands
                                        # to others.
                                        unused = list(target.resolve())
                                    else:
                                        siblings = Target.get_all_addresses(
                                            address.buildfile)
                                        prompt = 'did you mean' if len(
                                            siblings
                                        ) == 1 else 'maybe you meant one of these'
                                        error('%s => %s?:\n    %s' %
                                              (address, prompt, '\n    '.join(
                                                  str(a) for a in siblings)))
                            except (TypeError, ImportError, TaskError,
                                    GoalError):
                                error(spec, include_traceback=True)
                            except (IOError, SyntaxError,
                                    TargetDefinitionException):
                                error(spec)

            self.phases = [Phase(goal) for goal in goals]

            rcfiles = self.config.getdefault('rcfiles', type=list, default=[])
            if rcfiles:
                rcfile = RcFile(rcfiles,
                                default_prepend=False,
                                process_default=True)

                # Break down the goals specified on the command line to the full set that will be run so we
                # can apply default flags to inner goal nodes.  Also break down goals by Task subclass and
                # register the task class hierarchy fully qualified names so we can apply defaults to
                # baseclasses.

                sections = OrderedSet()
                for phase in Engine.execution_order(self.phases):
                    for goal in phase.goals():
                        sections.add(goal.name)
                        for clazz in goal.task_type.mro():
                            if clazz == Task:
                                break
                            sections.add('%s.%s' %
                                         (clazz.__module__, clazz.__name__))

                augmented_args = rcfile.apply_defaults(sections, args)
                if augmented_args != args:
                    del args[:]
                    args.extend(augmented_args)
                    sys.stderr.write(
                        "(using pantsrc expansion: pants goal %s)\n" %
                        ' '.join(augmented_args))

            Phase.setup_parser(parser, args, self.phases)
示例#43
0
  def __init__(self, target_base, name, is_meta = False):
    Target.__init__(self, name, is_meta)

    # TODO(John Sirois): rationalize constructor parameter and find_target_base
    self.target_base = target_base if target_base else self.find_target_base()
示例#44
0
 def get_targets():
   for address in Command.scan_addresses(root_dir):
     target = Target.get(address)
     if IvyResolve._is_resolvable(target):
       yield target
示例#45
0
    def execute(self, targets):
        scala_targets = filter(lambda t: has_sources(t, '.scala'), targets)
        if not scala_targets:
            return

        # Get the exclusives group for the targets to compile.
        # Group guarantees that they'll be a single exclusives key for them.
        egroups = self.context.products.get_data('exclusives_groups')
        group_id = egroups.get_group_key_for_target(scala_targets[0])

        # Add resource dirs to the classpath for us and for downstream tasks.
        for conf in self._confs:
            egroups.update_compatible_classpaths(group_id,
                                                 [(conf, self._resources_dir)])

        # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
        cp = egroups.get_classpath_for_group(group_id)

        # Add (only to the local copy) classpath entries necessary for our compiler plugins.
        for conf in self._confs:
            for jar in self._zinc_utils.plugin_jars():
                cp.insert(0, (conf, jar))

        # Invalidation check. Everything inside the with block must succeed for the
        # invalid targets to become valid.
        with self.invalidated(scala_targets,
                              invalidate_dependents=True,
                              partition_size_hint=self._partition_size_hint
                              ) as invalidation_check:
            if invalidation_check.invalid_vts and not self.dry_run:
                invalid_targets = [
                    vt.target for vt in invalidation_check.invalid_vts
                ]
                # The analysis for invalid and deleted sources is no longer valid.
                invalid_sources_by_target = self._compute_sources_by_target(
                    invalid_targets)
                invalid_sources = list(
                    itertools.chain.from_iterable(
                        invalid_sources_by_target.values()))
                deleted_sources = self._get_deleted_sources()

                # Work in a tmpdir so we don't stomp the main analysis files on error.
                # The tmpdir is cleaned up in a shutdown hook, because background work
                # may need to access files we create here even after this method returns.
                self._ensure_analysis_tmpdir()
                tmpdir = os.path.join(self._analysis_tmpdir, str(uuid.uuid4()))
                os.mkdir(tmpdir)
                valid_analysis_tmp = os.path.join(tmpdir, 'valid_analysis')
                newly_invalid_analysis_tmp = os.path.join(
                    tmpdir, 'newly_invalid_analysis')
                invalid_analysis_tmp = os.path.join(tmpdir, 'invalid_analysis')
                if ZincUtils.is_nonempty_analysis(self._analysis_file):
                    with self.context.new_workunit(name='prepare-analysis'):
                        if self._zinc_utils.run_zinc_split(
                                self._analysis_file,
                            ((invalid_sources + deleted_sources,
                              newly_invalid_analysis_tmp),
                             ([], valid_analysis_tmp))):
                            raise TaskError(
                                'Failed to split off invalid analysis.')
                        if ZincUtils.is_nonempty_analysis(
                                self._invalid_analysis_file):
                            if self._zinc_utils.run_zinc_merge([
                                    self._invalid_analysis_file,
                                    newly_invalid_analysis_tmp
                            ], invalid_analysis_tmp):
                                raise TaskError(
                                    'Failed to merge prior and current invalid analysis.'
                                )
                        else:
                            invalid_analysis_tmp = newly_invalid_analysis_tmp

                        # Now it's OK to overwrite the main analysis files with the new state.
                        ZincUtils._move_analysis(valid_analysis_tmp,
                                                 self._analysis_file)
                        ZincUtils._move_analysis(invalid_analysis_tmp,
                                                 self._invalid_analysis_file)

                # Figure out the sources and analysis belonging to each partition.
                partitions = [
                ]  # Each element is a triple (vts, sources_by_target, analysis).
                for vts in invalidation_check.invalid_vts_partitioned:
                    partition_tmpdir = os.path.join(
                        tmpdir, Target.maybe_readable_identify(vts.targets))
                    os.mkdir(partition_tmpdir)
                    sources = list(
                        itertools.chain.from_iterable([
                            invalid_sources_by_target.get(t, [])
                            for t in vts.targets
                        ]))
                    analysis_file = os.path.join(partition_tmpdir, 'analysis')
                    partitions.append((vts, sources, analysis_file))

                # Split per-partition files out of the global invalid analysis.
                if ZincUtils.is_nonempty_analysis(
                        self._invalid_analysis_file) and partitions:
                    with self.context.new_workunit(name='partition-analysis'):
                        splits = [(x[1], x[2]) for x in partitions]
                        if self._zinc_utils.run_zinc_split(
                                self._invalid_analysis_file, splits):
                            raise TaskError(
                                'Failed to split invalid analysis into per-partition files.'
                            )

                # Now compile partitions one by one.
                for partition in partitions:
                    (vts, sources, analysis_file) = partition
                    self._process_target_partition(partition, cp)
                    # No exception was thrown, therefore the compile succeded and analysis_file is now valid.

                    if os.path.exists(
                            analysis_file
                    ):  # The compilation created an analysis.
                        # Kick off the background artifact cache write.
                        if self.get_artifact_cache(
                        ) and self.context.options.write_to_artifact_cache:
                            self._write_to_artifact_cache(
                                analysis_file, vts, invalid_sources_by_target)

                        # Merge the newly-valid analysis into our global valid analysis.
                        if ZincUtils.is_nonempty_analysis(self._analysis_file):
                            with self.context.new_workunit(
                                    name='update-upstream-analysis'):
                                new_valid_analysis = analysis_file + '.valid.new'
                                if self._zinc_utils.run_zinc_merge(
                                    [self._analysis_file, analysis_file],
                                        new_valid_analysis):
                                    raise TaskError(
                                        'Failed to merge new analysis back into valid analysis file.'
                                    )
                            ZincUtils._move_analysis(new_valid_analysis,
                                                     self._analysis_file)
                        else:  # We need to keep analysis_file around. Background tasks may need it.
                            ZincUtils._copy_analysis(analysis_file,
                                                     self._analysis_file)

                    if ZincUtils.is_nonempty_analysis(
                            self._invalid_analysis_file):
                        with self.context.new_workunit(
                                name='trim-downstream-analysis'):
                            # Trim out the newly-valid sources from our global invalid analysis.
                            new_invalid_analysis = analysis_file + '.invalid.new'
                            discarded_invalid_analysis = analysis_file + '.invalid.discard'
                            if self._zinc_utils.run_zinc_split(
                                    self._invalid_analysis_file,
                                [(sources, discarded_invalid_analysis),
                                 ([], new_invalid_analysis)]):
                                raise TaskError(
                                    'Failed to trim invalid analysis file.')
                            ZincUtils._move_analysis(
                                new_invalid_analysis,
                                self._invalid_analysis_file)

                    # Now that all the analysis accounting is complete, we can safely mark the
                    # targets as valid.
                    vts.update()

                # Check for missing dependencies, if needed.
                if invalidation_check.invalid_vts and os.path.exists(
                        self._analysis_file):
                    deps_cache = JvmDependencyCache(self.context,
                                                    scala_targets,
                                                    self._analysis_file,
                                                    self._classes_dir)
                    deps_cache.check_undeclared_dependencies()

        # Provide the target->class and source->class mappings to downstream tasks if needed.
        if self.context.products.isrequired('classes'):
            sources_by_target = self._compute_sources_by_target(scala_targets)
            classes_by_source = self._compute_classes_by_source()
            self._add_all_products_to_genmap(sources_by_target,
                                             classes_by_source)

        # Update the classpath for downstream tasks.
        for conf in self._confs:
            egroups.update_compatible_classpaths(group_id,
                                                 [(conf, self._classes_dir)])
示例#46
0
 def _walk(self, walked, work, predicate=None):
   Target._walk(self, walked, work, predicate)
   self.binary._walk(walked, work, predicate)
示例#47
0
 def __init__(self, name, url_builder):
     """:url_builder a function that accepts a page target and an optional wiki :config dict and
 returns a tuple of (alias, fully qualified url)."""
     Target.__init__(self, name)
     self.url_builder = url_builder
示例#48
0
 def setUpClass(cls):
     cls.build_root = mkdtemp(suffix='_BUILD_ROOT')
     set_buildroot(cls.build_root)
     cls._cwd = os.getcwd()
     os.chdir(cls.build_root)
     Target._clear_all_addresses()
示例#49
0
 def print_provides(column_extractors, address):
     target = Target.get(address)
     if target.is_exported:
         return " ".join(
             extractor(target) for extractor in column_extractors)
示例#50
0
文件: doc.py 项目: wfarner/commons
 def __init__(self, name, url_builder, exclusives=None):
   """:url_builder a function that accepts a page target and an optional wiki :config dict and
   returns a tuple of (alias, fully qualified url)."""
   Target.__init__(self, name, is_meta=False, exclusives=exclusives)
   self.url_builder = url_builder
示例#51
0
 def _print_provides(self, column_extractors, address):
   target = Target.get(address)
   if is_exported(target):
     return " ".join(extractor(target) for extractor in column_extractors)
示例#52
0
 def get_targets():
   for address in Command.scan_addresses(root_dir):
     target = Target.get(address)
     if Doc._is_documentable(target):
       yield target
示例#53
0
 def print_documented(address):
     target = Target.get(address)
     if target.description:
         return '%s\n  %s' % (address, '\n  '.join(
             target.description.strip().split('\n')))
示例#54
0
  def __init__(self, name, is_meta=False):
    Target.__init__(self, name, is_meta)

    self.add_label('sources')
    self.target_base = SourceRoot.find(self)
 def tearDown(self):
   Target._clear_all_addresses()
示例#56
0
    def __init__(
            self,
            name,
            source=None,
            dependencies=None,
            entry_point=None,
            inherit_path=False,  # pex option
            zip_safe=True,  # pex option
            always_write_cache=False,  # pex option
            repositories=None,  # pex option
            indices=None,  # pex option
            ignore_errors=False,  # pex option
            allow_pypi=False,  # pex option
            platforms=(),
            compatibility=None,
            exclusives=None):
        """
    :param name: target name
    :param source: the python source file that becomes this binary's __main__.
      If None specified, drops into an interpreter by default.
    :param dependencies: List of :class:`twitter.pants.base.target.Target` instances
      this target depends on.
    :type dependencies: list of targets
    :param entry_point: the default entry point for this binary.  if None, drops into the entry
      point that is defined by source
    :param inherit_path: inherit the sys.path of the environment that this binary runs in
    :param zip_safe: whether or not this binary is safe to run in compacted (zip-file) form
    :param always_write_cache: whether or not the .deps cache of this PEX file should always
      be written to disk.
    :param repositories: a list of repositories to query for dependencies.
    :param indices: a list of indices to use for packages.
    :param platforms: extra platforms to target when building this binary.
    :param compatibility: either a string or list of strings that represents
      interpreter compatibility for this target, using the Requirement-style format,
      e.g. ``'CPython>=3', or just ['>=2.7','<3']`` for requirements agnostic to interpreter class.
    :param dict exclusives: An optional dict of exclusives tags. See CheckExclusives for details.
    """

        # TODO(John Sirois): Fixup TargetDefinitionException - it has awkward Target base-class
        # initialization requirements right now requiring this Target.__init__.
        Target.__init__(self, name, exclusives=exclusives)

        if source is None and entry_point is None:
            raise TargetDefinitionException(
                self,
                'A python binary target must specify either source or entry_point.'
            )

        PythonTarget.__init__(
            self,
            name,
            [] if source is None else [source],
            compatibility=compatibility,
            dependencies=dependencies,
            exclusives=exclusives,
        )

        if not isinstance(platforms, (list, tuple)) and not isinstance(
                platforms, Compatibility.string):
            raise TargetDefinitionException(
                self, 'platforms must be a list, tuple or string.')

        self._entry_point = entry_point
        self._inherit_path = bool(inherit_path)
        self._zip_safe = bool(zip_safe)
        self._always_write_cache = bool(always_write_cache)
        self._repositories = maybe_list(repositories or [])
        self._indices = maybe_list(indices or [])
        self._ignore_errors = bool(ignore_errors)
        self._platforms = tuple(maybe_list(platforms or []))

        if source and entry_point:
            entry_point_module = entry_point.split(':', 1)[0]
            source_entry_point = self._translate_to_entry_point(
                self.sources[0])
            if entry_point_module != source_entry_point:
                raise TargetDefinitionException(
                    self,
                    'Specified both source and entry_point but they do not agree: %s vs %s'
                    % (source_entry_point, entry_point_module))