示例#1
0
  def _rearrange_output_for_package(self, target_workdir, java_package):
    """Rearrange the output files to match a standard Java structure.

    Antlr emits a directory structure based on the relative path provided
    for the grammar file. If the source root of the file is different from
    the Pants build root, then the Java files end up with undesired parent
    directories.
    """
    package_dir_rel = java_package.replace('.', os.path.sep)
    package_dir = os.path.join(target_workdir, package_dir_rel)
    safe_mkdir(package_dir)
    for root, dirs, files in safe_walk(target_workdir):
      if root == package_dir_rel:
        # This path is already in the correct location
        continue
      for f in files:
        os.rename(
          os.path.join(root, f),
          os.path.join(package_dir, f)
        )

    # Remove any empty directories that were left behind
    for root, dirs, files in safe_walk(target_workdir, topdown = False):
      for d in dirs:
        full_dir = os.path.join(root, d)
        if not os.listdir(full_dir):
          os.rmdir(full_dir)
  def _compute_classpath_elements_by_class(self, classpath):
    """Computes a mapping of a .class file to its corresponding element on the given classpath."""
    # Don't consider loose classes dirs in our classes dir. Those will be considered
    # separately, by looking at products.
    def non_product(path):
      return path != self._classes_dir
    classpath_entries = filter(non_product, classpath)

    if self._upstream_class_to_path is None:
      self._upstream_class_to_path = {}
      for cp_entry in self._find_all_bootstrap_jars() + classpath_entries:
        # Per the classloading spec, a 'jar' in this context can also be a .zip file.
        if os.path.isfile(cp_entry) and (cp_entry.endswith('.jar') or cp_entry.endswith('.zip')):
          with open_zip(cp_entry, 'r') as jar:
            for cls in jar.namelist():
              # First jar with a given class wins, just like when classloading.
              if cls.endswith(b'.class') and not cls in self._upstream_class_to_path:
                self._upstream_class_to_path[cls] = cp_entry
        elif os.path.isdir(cp_entry):
          for dirpath, _, filenames in safe_walk(cp_entry, followlinks=True):
            for f in filter(lambda x: x.endswith('.class'), filenames):
              cls = os.path.relpath(os.path.join(dirpath, f), cp_entry)
              if not cls in self._upstream_class_to_path:
                self._upstream_class_to_path[cls] = os.path.join(dirpath, f)
    return self._upstream_class_to_path
示例#3
0
  def _link_current_reports(self, report_dir, link_dir, preserve):
    # Kill everything not preserved.
    for name in os.listdir(link_dir):
      path = os.path.join(link_dir, name)
      if name not in preserve:
        if os.path.isdir(path):
          safe_rmtree(path)
        else:
          os.unlink(path)

    # Link ~all the isolated run/ dir contents back up to the stable workdir
    # NB: When batching is enabled, files can be emitted under different subdirs. If those files
    # have the like-names, the last file with a like-name will be the one that is used. This may
    # result in a loss of information from the ignored files. We're OK with this because:
    # a) We're planning on deprecating this loss of information.
    # b) It is the same behavior as existed before batching was added.
    for root, dirs, files in safe_walk(report_dir, topdown=True):
      dirs.sort()  # Ensure a consistent walk order for sanity sake.
      for f in itertools.chain(fnmatch.filter(files, '*.err.txt'),
                               fnmatch.filter(files, '*.out.txt'),
                               fnmatch.filter(files, 'TEST-*.xml')):
        src = os.path.join(root, f)
        dst = os.path.join(link_dir, f)
        safe_delete(dst)
        os.symlink(src, dst)

    for path in os.listdir(report_dir):
      if path in ('coverage', 'reports'):
        src = os.path.join(report_dir, path)
        dst = os.path.join(link_dir, path)
        os.symlink(src, dst)
示例#4
0
    def classpath_entries_contents(cls, classpath_entries):
        """Provide a generator over the contents (classes/resources) of a classpath.

    Subdirectories are included and differentiated via a trailing forward slash (for symmetry
    across ZipFile.namelist and directory walks).

    :param classpath_entries: A sequence of classpath_entries. Non-jars/dirs are ignored.
    :returns: An iterator over all classpath contents, one directory, class or resource relative
              path per iteration step.
    :rtype: :class:`collections.Iterator` of string
    """
        for entry in classpath_entries:
            if cls.is_jar(entry):
                # Walk the jar namelist.
                with open_zip(entry, mode="r") as jar:
                    for name in jar.namelist():
                        yield name
            elif os.path.isdir(entry):
                # Walk the directory, including subdirs.
                def rel_walk_name(abs_sub_dir, name):
                    return fast_relpath(os.path.join(abs_sub_dir, name), entry)

                for abs_sub_dir, dirnames, filenames in safe_walk(entry):
                    for name in dirnames:
                        yield "{}/".format(rel_walk_name(abs_sub_dir, name))
                    for name in filenames:
                        yield rel_walk_name(abs_sub_dir, name)
            else:
                # non-jar and non-directory classpath entries should be ignored
                pass
示例#5
0
  def test_publish_local(self):
    for with_alias in [True, False]:
      targets = self._prepare_for_publishing(with_alias=with_alias)

      with temporary_dir() as publish_dir:
        task = self.prepare_task(args=['--test-local=%s' % publish_dir,
                                      '--no-test-dryrun'],
                                build_graph=self.build_graph,
                                build_file_parser=self.build_file_parser,
                                targets=targets)
        self._prepare_mocks(task)
        task.execute()

        #Nothing is written to the pushdb during a local publish
        #(maybe some directories are created, but git will ignore them)
        files = []
        for _, _, filenames in safe_walk(self.push_db_basedir):
          files.extend(filenames)
        self.assertEquals(0, len(files),
                          "Nothing should be written to the pushdb during a local publish")

        publishable_count = len(targets) - (1 if with_alias else 0)
        self.assertEquals(publishable_count, task.confirm_push.call_count,
                          "Expected one call to confirm_push per artifact")
        self.assertEquals(publishable_count, task.publish.call_count,
                          "Expected one call to publish per artifact")
示例#6
0
  def generate(self):
    # auto-generate the python files that we bundle up
    self.run_thrifts()

    # Thrift generates code with all parent namespaces with empty __init__.py's. Generally
    # speaking we want to drop anything w/o an __init__.py, and for anything with an __init__.py,
    # we want to explicitly make it a namespace package, hence the hoops here.
    for root, _, files in safe_walk(os.path.normpath(self.package_root)):
      reldir = os.path.relpath(root, self.package_root)
      if reldir == '.':  # skip root
        continue
      if '__init__.py' not in files:  # skip non-packages
        continue
      init_py_abspath = os.path.join(root, '__init__.py')
      module_path = self.path_to_module(reldir)
      self.created_packages.add(module_path)
      if os.path.getsize(init_py_abspath) == 0:  # empty __init__, translate to namespace package
        with open(init_py_abspath, 'wb') as f:
          f.write(b"__import__('pkg_resources').declare_namespace(__name__)")
        self.created_namespace_packages.add(module_path)
      else:
        # non-empty __init__, this is a leaf package, usually with ttypes and constants, leave as-is
        pass

    if not self.created_packages:
      raise self.CodeGenerationException('No Thrift structures declared in {}!'.format(self.target))
示例#7
0
 def test_safe_walk(self):
   with temporary_dir() as tmpdir:
     safe_mkdir(os.path.join(tmpdir, '中文'))
     if isinstance(tmpdir, unicode):
       tmpdir = tmpdir.encode('utf-8')
     for _, dirs, _ in dirutil.safe_walk(tmpdir):
       self.assertTrue(all(isinstance(dirname, unicode) for dirname in dirs))
  def _write_to_artifact_cache(self, vts, compile_context, get_update_artifact_cache_work):
    assert len(vts.targets) == 1
    assert vts.targets[0] == compile_context.target

    # Noop if the target is uncacheable.
    if (compile_context.target.has_label('no_cache')):
      return
    vt = vts.versioned_targets[0]

    # Set up args to relativize analysis in the background.
    portable_analysis_file = JvmCompileStrategy._portable_analysis_for_target(
        self._analysis_dir, compile_context.target)
    relativize_args_tuple = (compile_context.analysis_file, portable_analysis_file)

    # Compute the classes and resources for this target.
    artifacts = []
    resources_by_target = self.context.products.get_data('resources_by_target')
    if resources_by_target is not None:
      for _, paths in resources_by_target[compile_context.target].abs_paths():
        artifacts.extend(paths)
    for dirpath, _, filenames in safe_walk(compile_context.classes_dir):
      artifacts.extend([os.path.join(dirpath, f) for f in filenames])

    # Get the 'work' that will publish these artifacts to the cache.
    # NB: the portable analysis_file won't exist until we finish.
    vts_artifactfiles_pair = (vt, artifacts + [portable_analysis_file])
    update_artifact_cache_work = get_update_artifact_cache_work([vts_artifactfiles_pair])

    # And execute it.
    if update_artifact_cache_work:
      work_chain = [
          Work(self._analysis_tools.relativize, [relativize_args_tuple], 'relativize'),
          update_artifact_cache_work
      ]
      self.context.submit_background_work_chain(work_chain, parent_workunit_name='cache')
示例#9
0
 def walk(self, relpath, topdown=True):
   def onerror(error):
     raise OSError(getattr(error, 'errno', None), 'Failed to walk below {}'.format(relpath), error)
   for root, dirs, files in safe_walk(os.path.join(self.build_root, relpath),
                                      topdown=topdown,
                                      onerror=onerror):
     yield fast_relpath(root, self.build_root), dirs, files
  def test_java_compile_reads_resource_mapping(self):
    # Ensure that if an annotation processor produces a resource-mapping,
    # the artifact contains that resource mapping.

    with temporary_dir() as cache_dir:
      artifact_dir = os.path.join(cache_dir, 'JavaCompile',
                                  'testprojects.src.java.com.pants.testproject.annotation.main.main')
      config = {'compile.java': {'write_artifact_caches': [cache_dir]}}

      pants_run = self.run_pants(['compile',
                                  'testprojects/src/java/com/pants/testproject/annotation/main'],
                                 config)
      self.assert_success(pants_run)

      self.assertTrue(os.path.exists(artifact_dir))
      artifacts = os.listdir(artifact_dir)
      self.assertEqual(len(artifacts), 1)

      with temporary_dir() as extract_dir:
        TarArchiver.extract(os.path.join(artifact_dir, artifacts[0]), extract_dir)
        all_files = set()
        for dirpath, dirs, files in safe_walk(extract_dir):
          for name in files:
            path = os.path.join(dirpath, name)
            all_files.add(path)

        report_file_name = os.path.join(extract_dir, 'compile/jvm/java/classes/deprecation_report.txt')
        self.assertIn(report_file_name, all_files)

        annotated_classes = [line.rstrip() for line in file(report_file_name).read().splitlines()]
        self.assertEquals(
          {'com.pants.testproject.annotation.main.Main', 'com.pants.testproject.annotation.main.Main$TestInnerClass'},
          set(annotated_classes))
示例#11
0
    def test_publish_remote(self):
        targets = self._prepare_for_publishing()
        self.set_options(dryrun=False, repos=self._get_repos(), push_postscript="\nPS")
        task = self.create_task(self.context(target_roots=targets))
        self._prepare_mocks(task)
        task.execute()

        # One file per task is written to the pushdb during a local publish
        files = []
        for _, _, filenames in safe_walk(self.push_db_basedir):
            files.extend(filenames)

        self.assertEquals(len(targets), len(files), "During a remote publish, one pushdb should be written per target")
        self.assertEquals(len(targets), task.confirm_push.call_count, "Expected one call to confirm_push per artifact")
        self.assertEquals(len(targets), task.publish.call_count, "Expected one call to publish per artifact")

        self.assertEquals(len(targets), task.scm.commit.call_count, "Expected one call to scm.commit per artifact")
        args, kwargs = task.scm.commit.call_args
        message = args[0]
        message_lines = message.splitlines()
        self.assertTrue(
            len(message_lines) > 1, "Expected at least one commit message line in addition to the post script."
        )
        self.assertEquals("PS", message_lines[-1])

        self.assertEquals(len(targets), task.scm.add.call_count, "Expected one call to scm.add per artifact")

        self.assertEquals(len(targets), task.scm.tag.call_count, "Expected one call to scm.tag per artifact")
        args, kwargs = task.scm.tag.call_args
        tag_name, tag_message = args
        tag_message_splitlines = tag_message.splitlines()
        self.assertTrue(
            len(tag_message_splitlines) > 1, "Expected at least one tag message line in addition to the post script."
        )
        self.assertEquals("PS", tag_message_splitlines[-1])
示例#12
0
  def generate_doc(self, language_predicate, create_jvmdoc_command):
    """
    Generate an execute method given a language predicate and command to create documentation

    language_predicate: a function that accepts a target and returns True if the target is of that
                        language
    create_jvmdoc_command: (classpath, directory, *targets) -> command (string) that will generate
                           documentation documentation for targets
    """
    catalog = self.context.products.isrequired(self.jvmdoc().product_type)
    if catalog and self.combined:
      raise TaskError(
          'Cannot provide {} target mappings for combined output'.format(self.jvmdoc().product_type))

    def docable(target):
      if not language_predicate(target):
        self.context.log.debug('Skipping [{}] because it is does not pass the language predicate'.format(target.address.spec))
        return False
      if not self._include_codegen and target.is_synthetic:
        self.context.log.debug('Skipping [{}] because it is a synthetic target'.format(target.address.spec))
        return False
      for pattern in self._exclude_patterns:
        if pattern.search(target.address.spec):
          self.context.log.debug(
            "Skipping [{}] because it matches exclude pattern '{}'".format(target.address.spec, pattern.pattern))
          return False
      return True

    targets = self.get_targets(predicate=docable)
    if not targets:
      return

    with self.invalidated(targets, invalidate_dependents=self.combined) as invalidation_check:
      def find_invalid_targets():
        invalid_targets = set()
        for vt in invalidation_check.invalid_vts:
          invalid_targets.update(vt.targets)
        return invalid_targets

      invalid_targets = list(find_invalid_targets())
      if invalid_targets:
        if self.combined:
          self._generate_combined(targets, create_jvmdoc_command)
        else:
          self._generate_individual(invalid_targets, create_jvmdoc_command)

    if self.open and self.combined:
      try:
        desktop.ui_open(os.path.join(self.workdir, 'combined', 'index.html'))
      except desktop.OpenError as e:
        raise TaskError(e)

    if catalog:
      for target in targets:
        gendir = self._gendir(target)
        jvmdocs = []
        for root, dirs, files in safe_walk(gendir):
          jvmdocs.extend(os.path.relpath(os.path.join(root, f), gendir) for f in files)
        self.context.products.get(self.jvmdoc().product_type).add(target, gendir, jvmdocs)
示例#13
0
 def _listtree(self, root, empty_dirs):
   listing = set()
   for path, dirs, files in safe_walk(root):
     relpath = os.path.normpath(os.path.relpath(path, root))
     if empty_dirs:
       listing.update(os.path.normpath(os.path.join(relpath, d)) for d in dirs)
     listing.update(os.path.normpath(os.path.join(relpath, f)) for f in files)
   return listing
示例#14
0
文件: artifact.py 项目: amedina/pants
 def extract(self):
   for dir_name, _, filenames in safe_walk(self._directory):
     for filename in filenames:
       filename = os.path.join(dir_name, filename)
       relpath = os.path.relpath(filename, self._directory)
       dst = os.path.join(self._artifact_root, relpath)
       safe_mkdir_for(dst)
       shutil.copy(filename, dst)
       self._relpaths.add(relpath)
示例#15
0
 def _parse_xml_files(self):
   testsuites = []
   for root, dirs, files in safe_walk(self._xml_dir, topdown=True):
     dirs.sort()  # Ensures a consistent gathering order.
     for xml_file in sorted(fnmatch.filter(files, 'TEST-*.xml')):
       testsuites += self._parse_xml_file(os.path.join(root, xml_file))
   merged_suites = ReportTestSuite.merged(testsuites,
                                          logger=self._logger,
                                          error_on_conflict=self._error_on_conflict)
   return sorted(merged_suites)
示例#16
0
  def _get_resource_extensions(self, project):
    resource_extensions = set()
    resource_extensions.update(project.resource_extensions)

    # TODO(John Sirois): make test resources 1st class in ant build and punch this through to pants
    # model
    for _, _, files in safe_walk(os.path.join(get_buildroot(), 'tests', 'resources')):
      resource_extensions.update(Project.extract_resource_extensions(files))

    return resource_extensions
示例#17
0
 def test_safe_walk(self):
   """Test that directory names are correctly represented as unicode strings"""
   # This test is unnecessary in python 3 since all strings are unicode there is no
   # unicode constructor.
   with temporary_dir() as tmpdir:
     safe_mkdir(os.path.join(tmpdir, '中文'))
     if isinstance(tmpdir, six.text_type):
       tmpdir = tmpdir.encode('utf-8')
     for _, dirs, _ in dirutil.safe_walk(tmpdir):
       self.assertTrue(all(isinstance(dirname, six.text_type) for dirname in dirs))
示例#18
0
  def execute(self):
    # Gather all targets that are both capable of importing jars and actually
    # declare some imports.
    targets = self.context.targets(lambda t: isinstance(t, ImportJarsMixin)
                                             and t.imported_jar_libraries)
    if not targets:
      return None
    imports_map = self.context.products.get('ivy_imports')
    executor = self.create_java_executor()

    # Create a list of all of these targets plus the list of JarDependencies
    # they depend on.
    all_targets = set(targets)
    for target in targets:
      all_targets.update(target.imported_jar_libraries)

    imported_targets = []

    with self.invalidated(all_targets, invalidate_dependents=True) as invalidation_check:
      invalid_targets = []
      if invalidation_check.invalid_vts:
        invalid_targets += [vt.target for vt in invalidation_check.invalid_vts]
      for target in targets:
        if self._is_invalid(invalid_targets, target):
          jars = target.imported_jars
          self.context.log.info('Mapping import jars for {target}: \n  {jars}'.format(
            target=target.address.spec,
            jars='\n  '.join(self._str_jar(s) for s in jars)))
          self.mapjars(imports_map, target, executor, jars=jars)
          imported_targets.append(target)

    # Reconstruct the ivy_imports target -> mapdir  mapping for targets that are
    # valid from walking the build cache
    cached_targets = set(targets) - set(invalid_targets)
    for import_jars_target in cached_targets:
      mapdir = self.mapjar_workdir(import_jars_target)
      for root, _, files in  safe_walk(mapdir):
        jarfiles = []
        for f in files:
          # We only expect ivy to touch this directory, so it should be just a directory with the
          # ivy output.  However, Ivy will stick an 'ivy.xml' file here which we don't want to map.
          if f != 'ivy.xml':
            full_filename = os.path.join(root, f)
            if os.path.islink(full_filename):
              jarfiles.append(f)
            else:
              raise TaskError('ivy-imports found unexpected file in ivy output directory: {}'
                              .format(full_filename))
        if jarfiles:
          imports_map.add(import_jars_target, root, jarfiles)


    # Returning the list of imported targets for testing purposes
    return imported_targets
示例#19
0
 def _find_sources_generated_by_target(self, target):
   if target.id in self._generated_sources_cache:
     for source in self._generated_sources_cache[target.id]:
       yield source
     return
   target_workdir = self._task.codegen_workdir(target)
   if not os.path.exists(target_workdir):
     return
   for root, dirs, files in safe_walk(target_workdir):
     for name in files:
       yield os.path.join(root, name)
示例#20
0
 def create(self, basedir, outdir, name, prefix=None):
   zippath = os.path.join(outdir, '{}.zip'.format(name))
   with open_zip(zippath, 'w', compression=ZIP_DEFLATED) as zip:
     for root, _, files in safe_walk(basedir):
       root = ensure_text(root)
       for file in files:
         file = ensure_text(file)
         full_path = os.path.join(root, file)
         relpath = os.path.relpath(full_path, basedir)
         if prefix:
           relpath = os.path.join(ensure_text(prefix), relpath)
         zip.write(full_path, relpath)
   return zippath
示例#21
0
  def generate_doc(self, language_predicate, create_jvmdoc_command):
    """
    Generate an execute method given a language predicate and command to create documentation

    language_predicate: a function that accepts a target and returns True if the target is of that
                        language
    create_jvmdoc_command: (classpath, directory, *targets) -> command (string) that will generate
                           documentation documentation for targets
    """
    if self.skip:
      return

    catalog = self.context.products.isrequired(self.jvmdoc().product_type)
    if catalog and self.combined:
      raise TaskError(
          'Cannot provide {} target mappings for combined output'.format(self.jvmdoc().product_type))

    def docable(tgt):
      return language_predicate(tgt) and (self._include_codegen or not tgt.is_codegen)

    targets = self.context.targets(predicate=docable)
    if not targets:
      return

    with self.invalidated(targets) as invalidation_check:
      safe_mkdir(self.workdir)
      classpath = self.classpath(targets)

      def find_jvmdoc_targets():
        invalid_targets = set()
        for vt in invalidation_check.invalid_vts:
          invalid_targets.update(vt.targets)

        if self.transitive:
          return invalid_targets
        else:
          return set(invalid_targets).intersection(set(self.context.target_roots))

      jvmdoc_targets = list(find_jvmdoc_targets())
      if self.combined:
        self._generate_combined(classpath, jvmdoc_targets, create_jvmdoc_command)
      else:
        self._generate_individual(classpath, jvmdoc_targets, create_jvmdoc_command)

    if catalog:
      for target in targets:
        gendir = self._gendir(target)
        jvmdocs = []
        for root, dirs, files in safe_walk(gendir):
          jvmdocs.extend(os.path.relpath(os.path.join(root, f), gendir) for f in files)
        self.context.products.get(self.jvmdoc().product_type).add(target, gendir, jvmdocs)
示例#22
0
    def test_java_compile_reads_resource_mapping(self, strategy):
        # Ensure that if an annotation processor produces a resource-mapping,
        # the artifact contains that resource mapping.

        with temporary_dir() as cache_dir:
            artifact_dir = os.path.join(
                cache_dir, "JavaCompile", "testprojects.src.java.org.pantsbuild.testproject.annotation.main.main"
            )
            config = {"compile.java": {"write_artifact_caches": [cache_dir]}}

            pants_run = self.run_pants(
                [
                    "compile.java",
                    "--strategy={}".format(strategy),
                    "compile.apt",
                    "--strategy={}".format(strategy),
                    "testprojects/src/java/org/pantsbuild/testproject/annotation/main",
                ],
                config,
            )
            self.assert_success(pants_run)

            self.assertTrue(os.path.exists(artifact_dir))
            artifacts = os.listdir(artifact_dir)
            self.assertEqual(len(artifacts), 1)

            with temporary_dir() as extract_dir:
                TarArchiver.extract(os.path.join(artifact_dir, artifacts[0]), extract_dir)
                all_files = set()
                for dirpath, dirs, files in safe_walk(extract_dir):
                    for name in files:
                        path = os.path.join(dirpath, name)
                        all_files.add(path)

                # Locate the report file on the classpath.
                report_file_name = "deprecation_report.txt"
                reports = [f for f in all_files if f.endswith(report_file_name)]
                self.assertEquals(
                    1, len(reports), "Expected exactly one {} file; got: {}".format(report_file_name, all_files)
                )

                with open(reports[0]) as fp:
                    annotated_classes = [line.rstrip() for line in fp.read().splitlines()]
                    self.assertEquals(
                        {
                            "org.pantsbuild.testproject.annotation.main.Main",
                            "org.pantsbuild.testproject.annotation.main.Main$TestInnerClass",
                        },
                        set(annotated_classes),
                    )
示例#23
0
  def test_incremental(self):
    make_unpacked_jar = functools.partial(self._make_unpacked_jar,
                                          include_patterns=['a/b/c/*.proto'])

    with self.sample_jarfile() as jar_filename:
      rev1 = self._make_coord(rev='0.0.1')
      foo_target = make_unpacked_jar(rev1)

      # The first time through, the target should be unpacked.
      unpack_task = self.create_task(self.context(target_roots=[foo_target]))
      self._add_dummy_product(unpack_task, foo_target, jar_filename, rev1)
      unpacked_targets = unpack_task.execute()

      self.assertEquals([foo_target], unpacked_targets)
      unpack_dir = unpack_task._unpack_dir(foo_target)
      files = []
      for _, dirname, filenames in safe_walk(unpack_dir):
        files += filenames
      self.assertEquals(['foo.proto'], files)

      # Calling the task a second time should not need to unpack any targets
      unpack_task = self.create_task(self.context(target_roots=[foo_target]))
      self._add_dummy_product(unpack_task, foo_target, jar_filename, rev1)
      unpacked_targets = unpack_task.execute()

      self.assertEquals([], unpacked_targets)

      # Change the library version and the target should be unpacked again.
      self.reset_build_graph()  # Forget about the old definition of the unpack/jars:foo-jar target
      rev2 = self._make_coord(rev='0.0.2')
      foo_target = make_unpacked_jar(rev2)

      unpack_task = self.create_task(self.context(target_roots=[foo_target]))
      self._add_dummy_product(unpack_task, foo_target, jar_filename, rev2)
      unpacked_targets = unpack_task.execute()

      self.assertEquals([foo_target], unpacked_targets)

      # Change the include pattern and the target should be unpacked again
      self.reset_build_graph()  # Forget about the old definition of the unpack/jars:foo-jar target

      make_unpacked_jar = functools.partial(self._make_unpacked_jar,
                                            include_patterns=['a/b/c/foo.proto'])
      foo_target = make_unpacked_jar(rev2)
      unpack_task = self.create_task(self.context(target_roots=[foo_target]))
      self._add_dummy_product(unpack_task, foo_target, jar_filename, rev2)
      unpacked_targets = unpack_task.execute()

      self.assertEquals([foo_target], unpacked_targets)
示例#24
0
  def scan_buildfiles(root_dir, base_path=None, spec_excludes=None):
    """Looks for all BUILD files
    :param root_dir: the root of the repo containing sources
    :param base_path: directory under root_dir to scan
    :param spec_excludes: list of paths to exclude from the scan.  These can be absolute paths
      or paths that are relative to the root_dir.
    """

    def calc_exclude_roots(root_dir, excludes):
      """Return a map of root directories to subdirectory names suitable for a quick evaluation
      inside safe_walk()
      """
      result = defaultdict(set)
      for exclude in excludes:
        if exclude:
          if not os.path.isabs(exclude):
            exclude = os.path.join(root_dir, exclude)
          if exclude.startswith(root_dir):
            result[os.path.dirname(exclude)].add(os.path.basename(exclude))

      return result

    def find_excluded(root, dirs, exclude_roots):
      """Removes any of the directories specified in exclude_roots from dirs.
      """
      to_remove = []
      for exclude_root in exclude_roots:
        # root ends with a /, trim it off
        if root.rstrip('/') == exclude_root:
          for subdir in exclude_roots[exclude_root]:
            if subdir in dirs:
              to_remove.append(subdir)
      return to_remove

    buildfiles = []
    if not spec_excludes:
      exclude_roots = {}
    else:
      exclude_roots = calc_exclude_roots(root_dir, spec_excludes)

    for root, dirs, files in safe_walk(os.path.join(root_dir, base_path or ''), topdown=True):
      to_remove = find_excluded(root, dirs, exclude_roots)
      for subdir in to_remove:
        dirs.remove(subdir)
      for filename in files:
        if BuildFile._is_buildfile_name(filename):
          buildfile_relpath = os.path.relpath(os.path.join(root, filename), root_dir)
          buildfiles.append(BuildFile.from_cache(root_dir, buildfile_relpath))
    return OrderedSet(sorted(buildfiles, key=lambda buildfile: buildfile.full_path))
示例#25
0
    def _rearrange_output_for_package(self, target_workdir, java_package):
        """Rearrange the output files to match a standard Java structure.

    Antlr emits a directory structure based on the relative path provided
    for the grammar file. If the source root of the file is different from
    the Pants build root, then the Java files end up with undesired parent
    directories.
    """
        package_dir_rel = java_package.replace('.', os.path.sep)
        package_dir = os.path.join(target_workdir, package_dir_rel)
        safe_mkdir(package_dir)
        for root, dirs, files in safe_walk(target_workdir):
            if root == package_dir_rel:
                # This path is already in the correct location
                continue
            for f in files:
                os.rename(os.path.join(root, f), os.path.join(package_dir, f))

        # Remove any empty directories that were left behind
        for root, dirs, files in safe_walk(target_workdir, topdown=False):
            for d in dirs:
                full_dir = os.path.join(root, d)
                if not os.listdir(full_dir):
                    os.rmdir(full_dir)
示例#26
0
 def create(self, basedir, outdir, name, prefix=None):
     zippath = os.path.join(outdir, '{}.zip'.format(name))
     with open_zip(zippath, 'w', compression=ZIP_DEFLATED) as zip:
         # For symlinks, we want to archive the actual content of linked files but
         # under the relpath derived from symlink.
         for root, _, files in safe_walk(basedir, followlinks=True):
             root = ensure_text(root)
             for file in files:
                 file = ensure_text(file)
                 full_path = os.path.join(root, file)
                 relpath = os.path.relpath(full_path, basedir)
                 if prefix:
                     relpath = os.path.join(ensure_text(prefix), relpath)
                 zip.write(full_path, relpath)
     return zippath
示例#27
0
    def _create_context_jar(self, compile_context):
        """Jar up the compile_context to its output jar location.

    TODO(stuhood): In the medium term, we hope to add compiler support for this step, which would
    allow the jars to be used as compile _inputs_ as well. Currently using jar'd compile outputs as
    compile inputs would make the compiler's analysis useless.
      see https://github.com/twitter-forks/sbt/tree/stuhood/output-jars
    """
        root = compile_context.classes_dir
        with compile_context.open_jar(mode='w') as jar:
            for abs_sub_dir, dirnames, filenames in safe_walk(root):
                for name in dirnames + filenames:
                    abs_filename = os.path.join(abs_sub_dir, name)
                    arcname = fast_relpath(abs_filename, root)
                    jar.write(abs_filename, arcname)
示例#28
0
    def _scrub_generated_timestamps(self, target_workdir):
        """Remove the first line of comment from each file if it contains a timestamp."""
        for root, _, filenames in safe_walk(target_workdir):
            for filename in filenames:
                source = os.path.join(root, filename)

                with open(source) as f:
                    lines = f.readlines()
                if len(lines) < 1:
                    return
                with open(source, 'w') as f:
                    if not self._COMMENT_WITH_TIMESTAMP_RE.match(lines[0]):
                        f.write(lines[0])
                    for line in lines[1:]:
                        f.write(line)
示例#29
0
  def unpack_libraries(self, target, aar_file, coordinate):
    context = self.context(target_roots=[target])
    task = self.create_task(context)

    jar_import_products = context.products.get_data(JarImportProducts, init_func=JarImportProducts)
    jar_import_products.imported(target, coordinate, aar_file)

    task.execute()

    # Gather classes found when unpacking the aar_file.
    files = []
    jar_location = task.unpacked_jar_location(coordinate)
    for _, _, filenames in safe_walk(jar_location):
      files.extend(filenames)
    return files
示例#30
0
  def _scrub_generated_timestamps(self, target_workdir):
    """Remove the first line of comment from each file if it contains a timestamp."""
    for root, _, filenames in safe_walk(target_workdir):
      for filename in filenames:
        source = os.path.join(root, filename)

        with open(source) as f:
          lines = f.readlines()
        if len(lines) < 1:
          return
        with open(source, 'w') as f:
          if not self._COMMENT_WITH_TIMESTAMP_RE.match(lines[0]):
            f.write(lines[0])
          for line in lines[1:]:
            f.write(line)
示例#31
0
  def unpack_libraries(self, target, aar_file, coordinate):
    context = self.context(target_roots=[target])
    task = self.create_task(context)

    jar_import_products = context.products.get_data(JarImportProducts, init_func=JarImportProducts)
    jar_import_products.imported(target, coordinate, aar_file)

    task.execute()

    # Gather classes found when unpacking the aar_file.
    files = []
    jar_location = task.unpacked_jar_location(coordinate)
    for _, _, filenames in safe_walk(jar_location):
      files.extend(filenames)
    return files
示例#32
0
  def _create_context_jar(self, compile_context):
    """Jar up the compile_context to its output jar location.

    TODO(stuhood): In the medium term, we hope to add compiler support for this step, which would
    allow the jars to be used as compile _inputs_ as well. Currently using jar'd compile outputs as
    compile inputs would make the compiler's analysis useless.
      see https://github.com/twitter-forks/sbt/tree/stuhood/output-jars
    """
    root = compile_context.classes_dir
    with compile_context.open_jar(mode='w') as jar:
      for abs_sub_dir, dirnames, filenames in safe_walk(root):
        for name in dirnames + filenames:
          abs_filename = os.path.join(abs_sub_dir, name)
          arcname = fast_relpath(abs_filename, root)
          jar.write(abs_filename, arcname)
示例#33
0
 def create(self, basedir, outdir, name, prefix=None):
   zippath = os.path.join(outdir, '{}.{}'.format(name, self.extension))
   with open_zip(zippath, 'w', compression=self.compression) as zip:
     # For symlinks, we want to archive the actual content of linked files but
     # under the relpath derived from symlink.
     for root, _, files in safe_walk(basedir, followlinks=True):
       root = ensure_text(root)
       for file in files:
         file = ensure_text(file)
         full_path = os.path.join(root, file)
         relpath = os.path.relpath(full_path, basedir)
         if prefix:
           relpath = os.path.join(ensure_text(prefix), relpath)
         zip.write(full_path, relpath)
   return zippath
示例#34
0
    def scan_buildfiles(root_dir, base_path=None, spec_excludes=None):
        """Looks for all BUILD files
    :param root_dir: the root of the repo containing sources
    :param base_path: directory under root_dir to scan
    :param spec_excludes: list of absolute paths to exclude from the scan"""
        def calc_exclude_roots(root_dir, excludes):
            """Return a map of root directories to subdirectory names suitable for a quick evaluation
      inside safe_walk()
      """
            result = defaultdict(set)
            for exclude in excludes:
                if exclude and exclude.startswith(root_dir):
                    result[os.path.dirname(exclude)].add(
                        os.path.basename(exclude))
            return result

        def find_excluded(root, dirs, exclude_roots):
            """Removes any of the directories specified in exclude_roots from dirs.
      """
            to_remove = []
            for exclude_root in exclude_roots:
                # root ends with a /, trim it off
                if root.rstrip('/') == exclude_root:
                    for subdir in exclude_roots[exclude_root]:
                        if subdir in dirs:
                            to_remove.append(subdir)
            return to_remove

        buildfiles = []
        if not spec_excludes:
            exclude_roots = {}
        else:
            exclude_roots = calc_exclude_roots(root_dir, spec_excludes)

        for root, dirs, files in safe_walk(os.path.join(
                root_dir, base_path or ''),
                                           topdown=True):
            to_remove = find_excluded(root, dirs, exclude_roots)
            for subdir in to_remove:
                dirs.remove(subdir)
            for filename in files:
                if BuildFile._is_buildfile_name(filename):
                    buildfile_relpath = os.path.relpath(
                        os.path.join(root, filename), root_dir)
                    buildfiles.append(
                        BuildFile.from_cache(root_dir, buildfile_relpath))
        return OrderedSet(
            sorted(buildfiles, key=lambda buildfile: buildfile.full_path))
示例#35
0
  def generate_doc(self, language_predicate, create_jvmdoc_command):
    """
    Generate an execute method given a language predicate and command to create documentation

    language_predicate: a function that accepts a target and returns True if the target is of that
                        language
    create_jvmdoc_command: (classpath, directory, *targets) -> command (string) that will generate
                           documentation documentation for targets
    """
    if self.skip:
      return

    catalog = self.context.products.isrequired(self.jvmdoc().product_type)
    if catalog and self.combined:
      raise TaskError(
          'Cannot provide %s target mappings for combined output' % self.jvmdoc().product_type)

    def docable(tgt):
      return language_predicate(tgt) and (self._include_codegen or not tgt.is_codegen)

    targets = self.context.targets()
    with self.invalidated(filter(docable, targets)) as invalidation_check:
      safe_mkdir(self.workdir)
      classpath = self.classpath(targets, confs=self.confs)

      def find_jvmdoc_targets():
        invalid_targets = set()
        for vt in invalidation_check.invalid_vts:
          invalid_targets.update(vt.targets)

        if self.transitive:
          return invalid_targets
        else:
          return set(invalid_targets).intersection(set(self.context.target_roots))

      jvmdoc_targets = list(filter(docable, find_jvmdoc_targets()))
      if self.combined:
        self._generate_combined(classpath, jvmdoc_targets, create_jvmdoc_command)
      else:
        self._generate_individual(classpath, jvmdoc_targets, create_jvmdoc_command)

    if catalog:
      for target in targets:
        gendir = self._gendir(target)
        jvmdocs = []
        for root, dirs, files in safe_walk(gendir):
          jvmdocs.extend(os.path.relpath(os.path.join(root, f), gendir) for f in files)
        self.context.products.get(self.jvmdoc().product_type).add(target, gendir, jvmdocs)
示例#36
0
  def test_incremental(self):
    with self.sample_jarfile() as jar_filename:
      self.add_to_build_file('unpack', dedent('''
        unpacked_jars(name='foo',
          libraries=['unpack/jars:foo-jars'],
          include_patterns=[
            'a/b/c/*.proto',
          ],
         )
        '''.format(jar_filename=jar_filename)))
      self._make_jar_library('0.0.1')
      foo_target = self.target('unpack:foo')

      # The first time through, the target should be unpacked.
      unpack_task = self.prepare_task(targets=[foo_target],
                                      build_graph=self.build_graph,
                                      build_file_parser=self.build_file_parser)

      # Dummy up ivy_imports product:
      ivy_imports_product = unpack_task.context.products.get('ivy_imports')
      ivy_imports_product.add(foo_target, os.path.dirname(jar_filename),
                              [os.path.basename(jar_filename)])


      unpacked_targets = unpack_task.execute()
      self.assertEquals([foo_target], unpacked_targets)
      unpack_dir = unpack_task._unpack_dir(foo_target)
      files = []
      for _, dirname, filenames in safe_walk(unpack_dir):
        files += filenames
      self.assertEquals(['foo.proto'], files)

      # Calling the task a second time should not need to unpack any targets
      unpack_task = self.prepare_task(targets=[foo_target],
                                      build_graph=self.build_graph,
                                      build_file_parser=self.build_file_parser)
      unpacked_targets = unpack_task.execute()
      self.assertEquals([], unpacked_targets)

      # Change the library version and the target should be unpacked again.
      self._make_jar_library('0.0.2')
      self.reset_build_graph()  # Forget about the old definition of the unpack/jars:foo-jar target
      foo_target = self.target('unpack:foo') # Re-inject the target
      unpack_task = self.prepare_task(targets=[foo_target],
                                      build_graph=self.build_graph,
                                      build_file_parser=self.build_file_parser)
      unpacked_targets = unpack_task.execute()
      self.assertEquals([foo_target], unpacked_targets)
示例#37
0
  def unpack_libraries(self, target_name, aar_file):
    test_target = self.target(target_name)
    task = self.create_task(self.context(target_roots=[test_target]))

    for android_archive in test_target.imported_jars:
      target_jar = self._approximate_ivy_mapjar_name(aar_file, android_archive)
      self._add_ivy_imports_product(test_target, target_jar, task)
    task.execute()

    # Gather classes found when unpacking the aar_file.
    aar_name = os.path.basename(target_jar)
    files = []
    jar_location = task.unpacked_jar_location(aar_name)
    for _, _, filename in safe_walk(jar_location):
      files.extend(filename)
    return files
示例#38
0
  def iter_generated_sources(self, target):
    # This is sort of facepalmy -- python.new will make this much better.
    for target_type, target_builder in self.generated_targets.items():
      if isinstance(target, target_type):
        builder_cls = target_builder
        break
    else:
      raise TypeError(
        'iter_generated_sources could not find suitable code generator for {}'.format(type(target)))

    builder = builder_cls(target=target, root_dir=self._root)
    builder.generate()
    for root, _, files in safe_walk(builder.package_root):
      for fn in files:
        target_file = os.path.join(root, fn)
        yield os.path.relpath(target_file, builder.package_root), target_file
示例#39
0
  def iter_generated_sources(self, target):
    # This is sort of facepalmy -- python.new will make this much better.
    for target_type, target_builder in self.generated_targets.items():
      if isinstance(target, target_type):
        builder_cls = target_builder
        break
    else:
      raise TypeError(
        'iter_generated_sources could not find suitable code generator for {}'.format(type(target)))

    builder = builder_cls(target=target, root_dir=self._root)
    builder.generate()
    for root, _, files in safe_walk(builder.package_root):
      for fn in files:
        target_file = os.path.join(root, fn)
        yield os.path.relpath(target_file, builder.package_root), target_file
示例#40
0
    def test_incremental(self):
        with self.sample_jarfile() as jar_filename:
            self.add_to_build_file(
                'unpack',
                dedent('''
        unpacked_jars(name='foo',
          libraries=['unpack/jars:foo-jars'],
          include_patterns=[
            'a/b/c/*.proto',
          ],
         )
        '''))
            self._make_jar_library('0.0.1')
            foo_target = self.target('unpack:foo')

            # The first time through, the target should be unpacked.
            unpack_task = self.create_task(
                self.context(target_roots=[foo_target]))
            self._add_dummy_product(foo_target, jar_filename, unpack_task)
            unpacked_targets = unpack_task.execute()

            self.assertEquals([foo_target], unpacked_targets)
            unpack_dir = unpack_task._unpack_dir(foo_target)
            files = []
            for _, dirname, filenames in safe_walk(unpack_dir):
                files += filenames
            self.assertEquals(['foo.proto'], files)

            # Calling the task a second time should not need to unpack any targets
            unpack_task = self.create_task(
                self.context(target_roots=[foo_target]))
            self._add_dummy_product(foo_target, jar_filename, unpack_task)
            unpacked_targets = unpack_task.execute()

            self.assertEquals([], unpacked_targets)

            # Change the library version and the target should be unpacked again.
            self._make_jar_library('0.0.2')
            self.reset_build_graph(
            )  # Forget about the old definition of the unpack/jars:foo-jar target
            foo_target = self.target('unpack:foo')  # Re-inject the target
            self._add_dummy_product(foo_target, jar_filename, unpack_task)
            unpack_task = self.create_task(
                self.context(target_roots=[foo_target]))
            unpacked_targets = unpack_task.execute()

            self.assertEquals([foo_target], unpacked_targets)
示例#41
0
  def iter_generated_sources(cls, target, root, config=None):
    config = config or Config.from_cache()
    # This is sort of facepalmy -- python.new will make this much better.
    for target_type, target_builder in cls.GENERATED_TARGETS.items():
      if isinstance(target, target_type):
        builder_cls = target_builder
        break
    else:
      raise TypeError(
          'write_generated_sources could not find suitable code generator for %s' % type(target))

    builder = builder_cls(target, root, config)
    builder.generate()
    for root, _, files in safe_walk(builder.package_root):
      for fn in files:
        target_file = os.path.join(root, fn)
        yield os.path.relpath(target_file, builder.package_root), target_file
示例#42
0
    def test_publish_remote(self):
        targets = self._prepare_for_publishing()
        self.set_options(dryrun=False,
                         repos=self._get_repos(),
                         push_postscript='\nPS')
        task = self.create_task(self.context(target_roots=targets))
        self._prepare_mocks(task)
        task.execute()

        # One file per task is written to the pushdb during a local publish
        files = []
        for _, _, filenames in safe_walk(self.push_db_basedir):
            files.extend(filenames)

        self.assertEquals(
            len(targets), len(files),
            'During a remote publish, one pushdb should be written per target')
        self.assertEquals(len(targets), task.confirm_push.call_count,
                          'Expected one call to confirm_push per artifact')
        self.assertEquals(len(targets), task.publish.call_count,
                          'Expected one call to publish per artifact')

        self.assertEquals(len(targets), task.scm.commit.call_count,
                          'Expected one call to scm.commit per artifact')
        args, kwargs = task.scm.commit.call_args
        message = args[0]
        message_lines = message.splitlines()
        self.assertTrue(
            len(message_lines) > 1,
            'Expected at least one commit message line in addition to the post script.'
        )
        self.assertEquals('PS', message_lines[-1])

        self.assertEquals(len(targets), task.scm.add.call_count,
                          'Expected one call to scm.add per artifact')

        self.assertEquals(len(targets), task.scm.tag.call_count,
                          'Expected one call to scm.tag per artifact')
        args, kwargs = task.scm.tag.call_args
        tag_name, tag_message = args
        tag_message_splitlines = tag_message.splitlines()
        self.assertTrue(
            len(tag_message_splitlines) > 1,
            'Expected at least one tag message line in addition to the post script.'
        )
        self.assertEquals('PS', tag_message_splitlines[-1])
示例#43
0
  def test_java_compile_reads_resource_mapping(self):
    # Ensure that if an annotation processor produces a resource-mapping,
    # the artifact contains that resource mapping.

    with temporary_dir() as cache_dir:
      config = {'cache.compile.rsc': {'write_to': [cache_dir]}}

      self.assert_success(self.run_pants([
        'compile',
        'testprojects/src/java/org/pantsbuild/testproject/annotation/main',
      ], config=config))

      base_artifact_dir = self.get_cache_subdir(cache_dir)
      artifact_dir = os.path.join(
        base_artifact_dir,
        'testprojects.src.java.org.pantsbuild.testproject.annotation.main.main',
      )

      self.assertTrue(os.path.exists(artifact_dir))
      artifacts = os.listdir(artifact_dir)
      self.assertEqual(len(artifacts), 1)
      single_artifact = artifacts[0]

      with temporary_dir() as extract_dir:
        artifact_path = os.path.join(artifact_dir, single_artifact)
        archiver_for_path(artifact_path).extract(artifact_path, extract_dir)
        all_files = set()
        for dirpath, dirs, files in safe_walk(extract_dir):
          for name in files:
            path = os.path.join(dirpath, name)
            all_files.add(path)

        # Locate the report file on the classpath.
        report_file_name = 'deprecation_report.txt'
        reports = [f for f in all_files if f.endswith(report_file_name)]
        self.assertEqual(1, len(reports),
                          'Expected exactly one {} file; got: {}'.format(report_file_name,
                                                                         all_files))

        with open(reports[0], 'r') as fp:
          annotated_classes = [line.rstrip() for line in fp.read().splitlines()]
          self.assertEqual(
            {'org.pantsbuild.testproject.annotation.main.Main',
             'org.pantsbuild.testproject.annotation.main.Main$TestInnerClass'},
            set(annotated_classes))
示例#44
0
def parse_failed_targets(test_registry, junit_xml_path, error_handler):
    """Parses junit xml reports and maps targets to the set of individual tests that failed.

    Targets with no failed tests are omitted from the returned mapping and failed tests with no
    identifiable owning target are keyed under `None`.

    :param test_registry: A registry of tests that were run.
    :type test_registry: :class:`RegistryOfTests`
    :param string junit_xml_path: A path to a file or directory containing test junit xml reports
                                  to analyze.
    :param error_handler: An error handler that will be called with any junit xml parsing errors.
    :type error_handler: callable that accepts a single :class:`ParseError` argument.
    :returns: A mapping from targets to the set of individual tests that failed. Any failed tests
              that belong to no identifiable target will be mapped to `None`.
    :rtype: dict from :class:`pants.build_graph.target.Target` to a set of :class:`Test`
    """
    failed_targets = defaultdict(set)

    def parse_junit_xml_file(path):
        try:
            xml = XmlParser.from_file(path)
            failures = int(xml.get_attribute("testsuite", "failures"))
            errors = int(xml.get_attribute("testsuite", "errors"))
            if failures or errors:
                for testcase in xml.parsed.getElementsByTagName("testcase"):
                    test_failed = testcase.getElementsByTagName("failure")
                    test_errored = testcase.getElementsByTagName("error")
                    if test_failed or test_errored:
                        test = Test(
                            classname=testcase.getAttribute("classname"),
                            methodname=testcase.getAttribute("name"),
                        )
                        target = test_registry.get_owning_target(test)
                        failed_targets[target].add(test)
        except (XmlParser.XmlError, ValueError) as e:
            error_handler(ParseError(path, e))

    if os.path.isdir(junit_xml_path):
        for root, _, files in safe_walk(junit_xml_path):
            for junit_xml_file in fnmatch.filter(files, "TEST-*.xml"):
                parse_junit_xml_file(os.path.join(root, junit_xml_file))
    else:
        parse_junit_xml_file(junit_xml_path)

    return dict(failed_targets)
示例#45
0
    def test_java_compile_reads_resource_mapping(self):
        # Ensure that if an annotation processor produces a resource-mapping,
        # the artifact contains that resource mapping.

        with temporary_dir() as cache_dir:
            artifact_dir = os.path.join(
                cache_dir, 'JavaCompile',
                'testprojects.src.java.com.pants.testproject.annotation.main.main'
            )
            config = {'java-compile': {'write_artifact_caches': [cache_dir]}}

            pants_run = self.run_pants([
                'compile',
                'testprojects/src/java/com/pants/testproject/annotation/main'
            ], config)
            self.assert_success(pants_run)

            self.assertTrue(os.path.exists(artifact_dir))
            artifacts = os.listdir(artifact_dir)
            self.assertEqual(len(artifacts), 1)

            with temporary_dir() as extract_dir:
                TarArchiver.extract(os.path.join(artifact_dir, artifacts[0]),
                                    extract_dir)
                all_files = set()
                for dirpath, dirs, files in safe_walk(extract_dir):
                    for name in files:
                        path = os.path.join(dirpath, name)
                        all_files.add(path)

                report_file_name = os.path.join(
                    extract_dir,
                    'compile/jvm/java/classes/deprecation_report.txt')
                self.assertIn(report_file_name, all_files)

                annotated_classes = [
                    line.rstrip()
                    for line in file(report_file_name).read().splitlines()
                ]
                self.assertEquals(
                    {
                        'com.pants.testproject.annotation.main.Main',
                        'com.pants.testproject.annotation.main.Main$TestInnerClass'
                    }, set(annotated_classes))
示例#46
0
  def test_publish_local_dryrun(self):
    targets = self._prepare_for_publishing()

    with temporary_dir() as publish_dir:
      self.set_options(local=publish_dir)
      task = self.create_task(self.context(target_roots=targets))
      self._prepare_mocks(task)
      task.execute()

      # Nothing is written to the pushdb during a dryrun publish
      # (maybe some directories are created, but git will ignore them)
      files = []
      for _, _, filenames in safe_walk(self.push_db_basedir):
        files.extend(filenames)
      self.assertEqual(0, len(files),
                        'Nothing should be written to the pushdb during a dryrun publish')

      self.assertEqual(0, task.confirm_push.call_count,
                        'Expected confirm_push not to be called')
      self.assertEqual(0, task.publish.call_count,
                        'Expected publish not to be called')
示例#47
0
def _copytree(from_base, to_base):
    def abort(error):
        raise TaskError('Failed to copy from %s to %s: %s' %
                        (from_base, to_base, error))

    # TODO(John Sirois): Consider adding a unit test and lifting this to common/dirutils or similar
    def safe_link(src, dst):
        try:
            os.link(src, dst)
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise e

    for dirpath, dirnames, filenames in safe_walk(from_base,
                                                  topdown=True,
                                                  onerror=abort):
        to_path = os.path.join(to_base, os.path.relpath(dirpath, from_base))
        for dirname in dirnames:
            safe_mkdir(os.path.join(to_path, dirname))
        for filename in filenames:
            safe_link(os.path.join(dirpath, filename),
                      os.path.join(to_path, filename))
示例#48
0
  def test_publish_local_dryrun(self):
    targets = self._prepare_for_publishing()

    with temporary_dir() as publish_dir:
      task = self.prepare_task(args=['--test-local=%s' % publish_dir],
                               build_graph=self.build_graph,
                               build_file_parser=self.build_file_parser,
                               targets=targets)
      self._prepare_mocks(task)
      task.execute()

      # Nothing is written to the pushdb during a dryrun publish
      # (maybe some directories are created, but git will ignore them)
      files = []
      for _, _, filenames in safe_walk(self.push_db_basedir):
        files.extend(filenames)
      self.assertEquals(0, len(files),
                        "Nothing should be written to the pushdb during a dryrun publish")

      self.assertEquals(0, task.confirm_push.call_count,
                        "Expected confirm_push not to be called")
      self.assertEquals(0, task.publish.call_count,
                        "Expected publish not to be called")
示例#49
0
  def test_publish_local(self):
    for with_alias in [True, False]:
      targets = self._prepare_for_publishing(with_alias=with_alias)

      with temporary_dir() as publish_dir:
        self.set_options(dryrun=False, local=publish_dir)
        task = self.create_task(self.context(target_roots=targets))
        self._prepare_mocks(task)
        task.execute()

        #Nothing is written to the pushdb during a local publish
        #(maybe some directories are created, but git will ignore them)
        files = []
        for _, _, filenames in safe_walk(self.push_db_basedir):
          files.extend(filenames)
        self.assertEqual(0, len(files),
                          'Nothing should be written to the pushdb during a local publish')

        publishable_count = len(targets) - (1 if with_alias else 0)
        self.assertEqual(publishable_count, task.confirm_push.call_count,
                          'Expected one call to confirm_push per artifact')
        self.assertEqual(publishable_count, task.publish.call_count,
                          'Expected one call to publish per artifact')
示例#50
0
  def _compute_classpath_elements_by_class(self, classpath):
    # Don't consider loose classes dirs in our classes dir. Those will be considered
    # separately, by looking at products.
    def non_product(path):
      return path != self._classes_dir

    if self._upstream_class_to_path is None:
      self._upstream_class_to_path = {}
      classpath_entries = filter(non_product, classpath)
      for cp_entry in self._find_all_bootstrap_jars() + classpath_entries:
        # Per the classloading spec, a 'jar' in this context can also be a .zip file.
        if os.path.isfile(cp_entry) and (cp_entry.endswith('.jar') or cp_entry.endswith('.zip')):
          with open_zip(cp_entry, 'r') as jar:
            for cls in jar.namelist():
              # First jar with a given class wins, just like when classloading.
              if cls.endswith(b'.class') and not cls in self._upstream_class_to_path:
                self._upstream_class_to_path[cls] = cp_entry
        elif os.path.isdir(cp_entry):
          for dirpath, _, filenames in safe_walk(cp_entry, followlinks=True):
            for f in filter(lambda x: x.endswith('.class'), filenames):
              cls = os.path.relpath(os.path.join(dirpath, f), cp_entry)
              if not cls in self._upstream_class_to_path:
                self._upstream_class_to_path[cls] = os.path.join(dirpath, f)
    return self._upstream_class_to_path
示例#51
0
  def execute_codegen(self, target, target_workdir):
    super().execute_codegen(target, target_workdir)

    # Thrift generates code with all parent namespaces with empty __init__.py's. Since pants allows
    # splitting a thrift namespace hierarchy across multiple packages, we explicitly insert
    # namespace packages to allow for consumption of 2 or more of these packages in the same
    # PYTHONPATH.
    for root, _, files in safe_walk(target_workdir):
      if '__init__.py' not in files:  # skip non-packages
        continue

      init_py_abspath = os.path.join(root, '__init__.py')

      # Thrift puts an __init__.py file at the root, and we don't want one there (it's not needed,
      # and it confuses some import mechanisms).
      if root == target_workdir:
        safe_delete(init_py_abspath)
      elif os.path.getsize(init_py_abspath) == 0:  # empty __init__, translate to namespace package
        with open(init_py_abspath, 'wb') as f:
          f.write(b"__import__('pkg_resources').declare_namespace(__name__)")
      else:
        # A non-empty __init__, this is a leaf package, usually with ttypes and constants; so we
        # leave as-is.
        pass
示例#52
0
  def test_publish_remote(self):
    targets = self._prepare_for_publishing()

    task = self.prepare_task(config=self._get_config(),
                             args=['--no-test-dryrun'],
                             build_graph=self.build_graph,
                             build_file_parser=self.build_file_parser,
                             targets=targets)
    self._prepare_mocks(task)
    task.execute()

    # One file per task is written to the pushdb during a local publish
    files = []
    for _, _, filenames in safe_walk(self.push_db_basedir):
      files.extend(filenames)
    self.assertEquals(len(targets), len(files),
                      "During a remote publish, one pushdb should be written per target")

    self.assertEquals(len(targets), task.confirm_push.call_count,
                      "Expected one call to confirm_push per artifact")
    self.assertEquals(len(targets), task.publish.call_count,
                      "Expected one call to publish per artifact")
    self.assertEquals(len(targets), task.scm.tag.call_count,
                      "Expected one call to scm.tag per artifact")
 def _iter_wheels(self, path):
     for root, _, files in safe_walk(path):
         for f in files:
             if f.endswith('.whl'):
                 yield os.path.join(root, f)
示例#54
0
 def iter_files():
     for root, _, files in safe_walk(base):
         module = os.path.relpath(root, base).replace(os.path.sep, '.')
         for filename in files:
             yield module, filename, os.path.join(root, filename)
示例#55
0
 def walk(self, relpath, topdown=True):
   def onerror(error):
     raise OSError('Failed to walk below {}: {}'.format(relpath, error))
   for root, dirs, files in safe_walk(os.path.join(self.build_root, relpath), topdown=topdown, onerror=onerror):
     yield fast_relpath(root, self.build_root), dirs, files
示例#56
0
 def _maven_targets_excludes(repo_root):
   excludes = []
   for (dirpath, dirnames, filenames) in safe_walk(repo_root):
     if "pom.xml" in filenames:
       excludes.append(os.path.join(os.path.relpath(dirpath, start=repo_root), "target"))
   return excludes
示例#57
0
    def configure_jvm(self, extra_source_paths, extra_test_paths):
        """
      Configures this project's source sets returning the full set of targets the project is
      comprised of.  The full set can be larger than the initial set of targets when any of the
      initial targets only has partial ownership of its source set's directories.
    """

        # TODO(John Sirois): much waste lies here, revisit structuring for more readable and efficient
        # construction of source sets and excludes ... and add a test!

        analyzed_targets = OrderedSet()
        targeted = set()

        def relative_sources(target):
            sources = target.payload.sources.relative_to_buildroot()
            return [
                os.path.relpath(source, target.target_base)
                for source in sources
            ]

        def source_target(target):
            result = ((self.transitive or target in self.targets)
                      and target.has_sources()
                      and (not (self.skip_java and is_java(target))
                           and not (self.skip_scala and is_scala(target))))
            return result

        def configure_source_sets(relative_base,
                                  sources,
                                  is_test=False,
                                  resources_only=False):
            absolute_base = os.path.join(self.root_dir, relative_base)
            paths = set([os.path.dirname(source) for source in sources])
            for path in paths:
                absolute_path = os.path.join(absolute_base, path)
                # Note, this can add duplicate source paths to self.sources().  We'll de-dup them later,
                # because we want to prefer test paths.
                targeted.add(absolute_path)
                source_set = SourceSet(self.root_dir,
                                       relative_base,
                                       path,
                                       is_test=is_test,
                                       resources_only=resources_only)
                self.sources.append(source_set)

        def find_source_basedirs(target):
            dirs = set()
            if source_target(target):
                absolute_base = os.path.join(self.root_dir, target.target_base)
                dirs.update([
                    os.path.join(absolute_base, os.path.dirname(source))
                    for source in relative_sources(target)
                ])
            return dirs

        def configure_target(target):
            if target not in analyzed_targets:
                analyzed_targets.add(target)
                self.has_scala = not self.skip_scala and (self.has_scala
                                                          or is_scala(target))

                # Hack for java_sources and Eclipse/IntelliJ: add java_sources to project
                if isinstance(target, ScalaLibrary):
                    for java_source in target.java_sources:
                        configure_target(java_source)

                # Resources are already in the target set
                if target.has_resources:
                    resources_by_basedir = defaultdict(set)
                    for resources in target.resources:
                        analyzed_targets.add(resources)
                        resources_by_basedir[resources.target_base].update(
                            relative_sources(resources))
                    for basedir, resources in resources_by_basedir.items():
                        self.resource_extensions.update(
                            Project.extract_resource_extensions(resources))
                        configure_source_sets(basedir,
                                              resources,
                                              is_test=target.is_test,
                                              resources_only=True)
                if target.has_sources():
                    test = target.is_test
                    self.has_tests = self.has_tests or test
                    base = target.target_base
                    configure_source_sets(base,
                                          relative_sources(target),
                                          is_test=test,
                                          resources_only=isinstance(
                                              target, Resources))

                # TODO(Garrett Malmquist): This is dead code, and should be redone/reintegrated.
                # Other BUILD files may specify sources in the same directory as this target. Those BUILD
                # files might be in parent directories (globs('a/b/*.java')) or even children directories if
                # this target globs children as well.  Gather all these candidate BUILD files to test for
                # sources they own that live in the directories this targets sources live in.
                target_dirset = find_source_basedirs(target)
                if not isinstance(target.address, BuildFileAddress):
                    return []  # Siblings only make sense for BUILD files.
                candidates = self.target_util.get_all_addresses(
                    target.address.build_file)
                for ancestor in target.address.build_file.ancestors():
                    candidates.update(
                        self.target_util.get_all_addresses(ancestor))
                for sibling in target.address.build_file.siblings():
                    candidates.update(
                        self.target_util.get_all_addresses(sibling))
                for descendant in target.address.build_file.descendants(
                        spec_excludes=self.spec_excludes):
                    candidates.update(
                        self.target_util.get_all_addresses(descendant))

                def is_sibling(target):
                    return source_target(
                        target) and target_dirset.intersection(
                            find_source_basedirs(target))

                return filter(is_sibling, [
                    self.target_util.get(a)
                    for a in candidates if a != target.address
                ])

        resource_targets = []
        for target in self.targets:
            if isinstance(target, Resources):
                # Wait to process these until all resources that are reachable from other targets are
                # processed.  That way we'll only add a new SourceSet if this target has never been seen
                # before. This allows test resource SourceSets to be properly keep the is_test property.
                resource_targets.append(target)
            else:
                target.walk(configure_target, predicate=source_target)

        for target in resource_targets:
            target.walk(configure_target)

        def full_path(source_set):
            return os.path.join(source_set.root_dir, source_set.source_base,
                                source_set.path)

        # Check if there are any overlapping source_sets, and output an error message if so.
        # Overlapping source_sets cause serious problems with package name inference.
        overlap_error = (
            'SourceSets {current} and {previous} evaluate to the same full path.'
            ' This can be caused by multiple BUILD targets claiming the same source,'
            ' e.g., if a BUILD target in a parent directory contains an rglobs() while'
            ' a BUILD target in a subdirectory of that uses a globs() which claims the'
            ' same sources. This may cause package names to be inferred incorrectly (e.g.,'
            ' you might see src.com.foo.bar.Main instead of com.foo.bar.Main).'
        )
        source_full_paths = {}
        for source_set in sorted(self.sources, key=full_path):
            full = full_path(source_set)
            if full in source_full_paths:
                previous_set = source_full_paths[full]
                logger.debug(
                    overlap_error.format(current=source_set,
                                         previous=previous_set))
            source_full_paths[full] = source_set

        # We need to figure out excludes, in doing so there are 2 cases we should not exclude:
        # 1.) targets depend on A only should lead to an exclude of B
        # A/BUILD
        # A/B/BUILD
        #
        # 2.) targets depend on A and C should not lead to an exclude of B (would wipe out C)
        # A/BUILD
        # A/B
        # A/B/C/BUILD
        #
        # 1 approach: build set of all paths and parent paths containing BUILDs our targets depend on -
        # these are unexcludable

        unexcludable_paths = set()
        for source_set in self.sources:
            parent = os.path.join(self.root_dir, source_set.source_base,
                                  source_set.path)
            while True:
                unexcludable_paths.add(parent)
                parent, _ = os.path.split(parent)
                # no need to add the repo root or above, all source paths and extra paths are children
                if parent == self.root_dir:
                    break

        for source_set in self.sources:
            paths = set()
            source_base = os.path.join(self.root_dir, source_set.source_base)
            for root, dirs, _ in safe_walk(
                    os.path.join(source_base, source_set.path)):
                if dirs:
                    paths.update(
                        [os.path.join(root, directory) for directory in dirs])
            unused_children = paths - targeted
            if unused_children:
                for child in unused_children:
                    if child not in unexcludable_paths:
                        source_set.excludes.append(
                            os.path.relpath(child, source_base))

        targets = OrderedSet()
        for target in self.targets:
            target.walk(lambda target: targets.add(target), source_target)
        targets.update(analyzed_targets - targets)
        self.sources.extend(
            SourceSet(get_buildroot(), p, None, is_test=False)
            for p in extra_source_paths)
        self.sources.extend(
            SourceSet(get_buildroot(), p, None, is_test=True)
            for p in extra_test_paths)
        if self.use_source_root:
            self.sources = Project._collapse_by_source_root(
                self.context.source_roots, self.sources)
        self.sources = self.dedup_sources(self.sources)

        return targets
示例#58
0
文件: jacoco.py 项目: ryokugyu/pants
 def _iter_datafiles(self, output_dir):
     for root, _, files in safe_walk(output_dir):
         for f in files:
             if f == self._DATAFILE_NAME:
                 yield os.path.join(root, f)
                 break
示例#59
0
    def generate_doc(self, language_predicate, create_jvmdoc_command):
        """
    Generate an execute method given a language predicate and command to create documentation

    language_predicate: a function that accepts a target and returns True if the target is of that
                        language
    create_jvmdoc_command: (classpath, directory, *targets) -> command (string) that will generate
                           documentation documentation for targets
    """
        catalog = self.context.products.isrequired(self.jvmdoc().product_type)
        if catalog and self.combined:
            raise TaskError(
                'Cannot provide {} target mappings for combined output'.format(
                    self.jvmdoc().product_type))

        def docable(target):
            if not language_predicate(target):
                self.context.log.debug(
                    'Skipping [{}] because it is does not pass the language predicate'
                    .format(target.address.spec))
                return False
            if not self._include_codegen and target.is_synthetic:
                self.context.log.debug(
                    'Skipping [{}] because it is a synthetic target'.format(
                        target.address.spec))
                return False
            for pattern in self._exclude_patterns:
                if pattern.search(target.address.spec):
                    self.context.log.debug(
                        "Skipping [{}] because it matches exclude pattern '{}'"
                        .format(target.address.spec, pattern.pattern))
                    return False
            return True

        targets = self.get_targets(predicate=docable)
        if not targets:
            return

        with self.invalidated(
                targets,
                invalidate_dependents=self.combined) as invalidation_check:

            def find_invalid_targets():
                invalid_targets = set()
                for vt in invalidation_check.invalid_vts:
                    invalid_targets.update(vt.targets)
                return invalid_targets

            invalid_targets = list(find_invalid_targets())
            if invalid_targets:
                if self.combined:
                    self._generate_combined(targets, create_jvmdoc_command)
                else:
                    self._generate_individual(invalid_targets,
                                              create_jvmdoc_command)

        if self.open and self.combined:
            try:
                desktop.ui_open(
                    os.path.join(self.workdir, 'combined', 'index.html'))
            except desktop.OpenError as e:
                raise TaskError(e)

        if catalog:
            for target in targets:
                gendir = self._gendir(target)
                jvmdocs = []
                for root, dirs, files in safe_walk(gendir):
                    jvmdocs.extend(
                        os.path.relpath(os.path.join(root, f), gendir)
                        for f in files)
                self.context.products.get(self.jvmdoc().product_type).add(
                    target, gendir, jvmdocs)
示例#60
0
 def walk(self, relpath, topdown=True):
     for root, dirs, files in safe_walk(os.path.join(
             self.build_root, relpath),
                                        topdown=topdown):
         yield fast_relpath(root, self.build_root), dirs, files