コード例 #1
0
ファイル: protobuf_gen.py プロジェクト: igmor/pants
  def genlang(self, lang, targets):
    bases, sources = self._calculate_sources(targets)
    bases = bases.union(self._proto_path_imports(targets))

    if lang == 'java':
      output_dir = self.java_out
      gen_flag = '--java_out'
    elif lang == 'python':
      output_dir = self.py_out
      gen_flag = '--python_out'
    else:
      raise TaskError('Unrecognized protobuf gen lang: %s' % lang)

    safe_mkdir(output_dir)
    gen = '%s=%s' % (gen_flag, output_dir)

    args = [self.protobuf_binary, gen]

    if self.plugins:
      for plugin in self.plugins:
        # TODO(Eric Ayers) Is it a good assumption that the generated source output dir is
        # acceptable for all plugins?
        args.append("--%s_protobuf_out=%s" % (plugin, output_dir))

    for base in bases:
      args.append('--proto_path=%s' % base)

    args.extend(sources)
    log.debug('Executing: %s' % ' '.join(args))
    process = subprocess.Popen(args)
    result = process.wait()
    if result != 0:
      raise TaskError('%s ... exited non-zero (%i)' % (self.protobuf_binary, result))
コード例 #2
0
ファイル: go_fetch.py プロジェクト: peiyuwang/pants
  def _fetch_pkg(self, gopath, pkg, rev):
    """Fetch the package and setup symlinks."""
    fetcher = self._get_fetcher(pkg)
    root = fetcher.root()
    root_dir = os.path.join(self.workdir, 'fetches', root, rev)

    # Only fetch each remote root once.
    if not os.path.exists(root_dir):
      with temporary_dir() as tmp_fetch_root:
        fetcher.fetch(dest=tmp_fetch_root, rev=rev)
        safe_mkdir(root_dir)
        for path in os.listdir(tmp_fetch_root):
          shutil.move(os.path.join(tmp_fetch_root, path), os.path.join(root_dir, path))

    # TODO(John Sirois): Circle back and get get rid of this symlink tree.
    # GoWorkspaceTask will further symlink a single package from the tree below into a
    # target's workspace when it could just be linking from the fetch_dir.  The only thing
    # standing in the way is a determination of what we want to artifact cache.  If we don't
    # want to cache fetched zips, linking straight from the fetch_dir works simply.  Otherwise
    # thought needs to be applied to using the artifact cache directly or synthesizing a
    # canonical owner target for the fetched files that 'child' targets (subpackages) can
    # depend on and share the fetch from.
    dest_dir = os.path.join(gopath, 'src', root)
    # We may have been `invalidate`d and not `clean-all`ed so we need a new empty symlink
    # chroot to avoid collision; thus `clean=True`.
    safe_mkdir(dest_dir, clean=True)
    for path in os.listdir(root_dir):
      os.symlink(os.path.join(root_dir, path), os.path.join(dest_dir, path))
コード例 #3
0
ファイル: aapt_builder.py プロジェクト: Yasumoto/pants
  def execute(self):
    safe_mkdir(self.workdir)
    # TODO(mateor) map stderr and stdout to workunit streams (see CR 859)
    with self.context.new_workunit(name='apk-bundle', labels=[WorkUnit.MULTITOOL]):
      targets = self.context.targets(self.is_app)
      with self.invalidated(targets) as invalidation_check:
        invalid_targets = []
        for vt in invalidation_check.invalid_vts:
          invalid_targets.extend(vt.targets)
        for target in invalid_targets:
          # 'input_dirs' is the folder containing the Android dex file
          input_dirs = []
          # 'gen_out' holds resource folders (e.g. 'res')
          gen_out = []
          mapping = self.context.products.get('dex')
          for basedir in mapping.get(target):
            input_dirs.append(basedir)

          def gather_resources(target):
            """Gather the 'resource_dir' of the target"""
            if isinstance(target, AndroidResources):
              gen_out.append(os.path.join(get_buildroot(), target.resource_dir))

          target.walk(gather_resources)

          process = subprocess.Popen(self.render_args(target, gen_out, input_dirs))
          result = process.wait()
          if result != 0:
            raise TaskError('Android aapt tool exited non-zero ({code})'.format(code=result))
    for target in targets:
      self.context.products.get('apk').add(target, self.workdir).append(target.app_name + "-unsigned.apk")
コード例 #4
0
ファイル: provides.py プロジェクト: digideskio/pants
  def execute(self):
    safe_mkdir(self.workdir)
    targets = self.context.targets()
    for conf in self.confs:
      outpath = os.path.join(self.workdir,
                             '{0}.{1}.provides'.format(IvyUtils.identify(targets)[1], conf))
      if self.transitive:
        outpath += '.transitive'
      ivyinfo = IvyUtils.parse_xml_report(self.context.target_roots, conf)
      jar_paths = OrderedSet()
      for root in self.target_roots:
        jar_paths.update(self.get_jar_paths(ivyinfo, root, conf))

      with open(outpath, 'w') as outfile:
        def do_write(s):
          outfile.write(s)
          if self.also_write_to_stdout:
            sys.stdout.write(s)
        for jar in jar_paths:
          do_write('# from jar %s\n' % jar)
          for line in self.list_jar(jar):
            if line.endswith('.class'):
              class_name = line[:-6].replace('/', '.')
              do_write(class_name)
              do_write('\n')
      self.context.log.info('Wrote provides information to %s' % outpath)
コード例 #5
0
ファイル: base_test.py プロジェクト: jduan/pants
  def setUp(self):
    super(BaseTest, self).setUp()
    Goal.clear()
    Subsystem.reset()

    self.real_build_root = BuildRoot().path

    self.build_root = os.path.realpath(mkdtemp(suffix='_BUILD_ROOT'))
    self.addCleanup(safe_rmtree, self.build_root)

    self.pants_workdir = os.path.join(self.build_root, '.pants.d')
    safe_mkdir(self.pants_workdir)

    self.options = defaultdict(dict)  # scope -> key-value mapping.
    self.options[''] = {
      'pants_workdir': self.pants_workdir,
      'pants_supportdir': os.path.join(self.build_root, 'build-support'),
      'pants_distdir': os.path.join(self.build_root, 'dist'),
      'pants_configdir': os.path.join(self.build_root, 'config'),
      'cache_key_gen_version': '0-test',
    }

    BuildRoot().path = self.build_root
    self.addCleanup(BuildRoot().reset)

    # We need a pants.ini, even if empty. get_buildroot() uses its presence.
    self.create_file('pants.ini')
    self._build_configuration = BuildConfiguration()
    self._build_configuration.register_aliases(self.alias_groups)
    self.build_file_parser = BuildFileParser(self._build_configuration, self.build_root)
    self.address_mapper = BuildFileAddressMapper(self.build_file_parser, FilesystemBuildFile)
    self.build_graph = BuildGraph(address_mapper=self.address_mapper)
コード例 #6
0
ファイル: jvm_compile.py プロジェクト: priyakoth/pants
  def prepare_execute(self, chunks):
    relevant_targets = list(itertools.chain(*chunks))

    # Target -> sources (relative to buildroot).
    # TODO(benjy): Should sources_by_target be available in all Tasks?
    self._sources_by_target = self._compute_sources_by_target(relevant_targets)

    # Update the classpath by adding relevant target's classes directories to its classpath.
    compile_classpath = self.context.products.get_data('compile_classpath')
    runtime_classpath = self.context.products.get_data('runtime_classpath', compile_classpath.copy)

    with self.context.new_workunit('validate-{}-analysis'.format(self._name)):
      for target in relevant_targets:
        cc = self.compile_context(target)
        safe_mkdir(cc.classes_dir)
        runtime_classpath.add_for_target(target, [(conf, cc.classes_dir) for conf in self._confs])
        self.validate_analysis(cc.analysis_file)

    # This ensures the workunit for the worker pool is set
    with self.context.new_workunit('isolation-{}-pool-bootstrap'.format(self._name)) \
            as workunit:
      # This uses workunit.parent as the WorkerPool's parent so that child workunits
      # of different pools will show up in order in the html output. This way the current running
      # workunit is on the bottom of the page rather than possibly in the middle.
      self._worker_pool = WorkerPool(workunit.parent,
                                     self.context.run_tracker,
                                     self._worker_count)
コード例 #7
0
ファイル: jvm_compile.py プロジェクト: priyakoth/pants
    def work_for_vts(vts, compile_context, target_closure):
      progress_message = compile_context.target.address.spec
      cp_entries = self._compute_classpath_entries(classpath_products,
                                                   target_closure,
                                                   compile_context,
                                                   extra_compile_time_classpath)

      upstream_analysis = dict(self._upstream_analysis(compile_contexts, cp_entries))

      # Capture a compilation log if requested.
      log_file = self._capture_log_file(compile_context.target)

      # Double check the cache before beginning compilation
      hit_cache = check_cache(vts)
      incremental = False

      if not hit_cache:
        # Mutate analysis within a temporary directory, and move it to the final location
        # on success.
        tmpdir = os.path.join(self.analysis_tmpdir, compile_context.target.id)
        safe_mkdir(tmpdir)
        tmp_analysis_file = self._analysis_for_target(
            tmpdir, compile_context.target)
        # If the analysis exists for this context, it is an incremental compile.
        if os.path.exists(compile_context.analysis_file):
          incremental = True
          shutil.copy(compile_context.analysis_file, tmp_analysis_file)
        target, = vts.targets
        compile_vts(vts,
                    compile_context.sources,
                    tmp_analysis_file,
                    upstream_analysis,
                    cp_entries,
                    compile_context.classes_dir,
                    log_file,
                    progress_message,
                    target.platform)
        atomic_copy(tmp_analysis_file, compile_context.analysis_file)

        # Jar the compiled output.
        self._create_context_jar(compile_context)

      # Update the products with the latest classes.
      register_vts([compile_context])

      # We write to the cache only if we didn't hit during the double check, and optionally
      # only for clean builds.
      is_cacheable = not hit_cache and (self.get_options().incremental_caching or not incremental)
      self.context.log.debug(
          'Completed compile for {}. '
          'Hit cache: {}, was incremental: {}, is cacheable: {}, cache writes enabled: {}.'.format(
            compile_context.target.address.spec,
            hit_cache,
            incremental,
            is_cacheable,
            update_artifact_cache_vts_work is not None
            ))
      if is_cacheable and update_artifact_cache_vts_work:
        # Kick off the background artifact cache write.
        self._write_to_artifact_cache(vts, compile_context, update_artifact_cache_vts_work)
コード例 #8
0
ファイル: cache_manager.py プロジェクト: lahosken/pants
  def __init__(self,
               results_dir_root,
               cache_key_generator,
               build_invalidator_dir,
               invalidate_dependents,
               fingerprint_strategy=None,
               invalidation_report=None,
               task_name=None,
               task_version=None,
               artifact_write_callback=lambda _: None):
    """
    :API: public
    """
    self._cache_key_generator = cache_key_generator
    self._task_name = task_name or 'UNKNOWN'
    self._task_version = task_version or 'Unknown_0'
    self._invalidate_dependents = invalidate_dependents
    self._invalidator = BuildInvalidator(build_invalidator_dir)
    self._fingerprint_strategy = fingerprint_strategy
    self._artifact_write_callback = artifact_write_callback
    self.invalidation_report = invalidation_report

    # Create the task-versioned prefix of the results dir, and a stable symlink to it (useful when debugging).
    self._results_dir_prefix = os.path.join(results_dir_root, sha1(self._task_version).hexdigest()[:12])
    safe_mkdir(self._results_dir_prefix)
    stable_prefix = os.path.join(results_dir_root, self._STABLE_DIR_NAME)
    safe_delete(stable_prefix)
    relative_symlink(self._results_dir_prefix, stable_prefix)
コード例 #9
0
ファイル: ide_gen.py プロジェクト: jcoveney/pants
  def map_internal_jars(self, targets):
    internal_jar_dir = os.path.join(self.gen_project_workdir, 'internal-libs')
    safe_mkdir(internal_jar_dir, clean=True)

    internal_source_jar_dir = os.path.join(self.gen_project_workdir, 'internal-libsources')
    safe_mkdir(internal_source_jar_dir, clean=True)

    internal_jars = self.context.products.get('jars')
    internal_source_jars = self.context.products.get('source_jars')
    for target in targets:
      mappings = internal_jars.get(target)
      if mappings:
        for base, jars in mappings.items():
          if len(jars) != 1:
            raise IdeGen.Error('Unexpected mapping, multiple jars for %s: %s' % (target, jars))

          jar = jars[0]
          cp_jar = os.path.join(internal_jar_dir, jar)
          shutil.copy(os.path.join(base, jar), cp_jar)

          cp_source_jar = None
          mappings = internal_source_jars.get(target)
          if mappings:
            for base, jars in mappings.items():
              if len(jars) != 1:
                raise IdeGen.Error(
                  'Unexpected mapping, multiple source jars for %s: %s' % (target, jars)
                )
              jar = jars[0]
              cp_source_jar = os.path.join(internal_source_jar_dir, jar)
              shutil.copy(os.path.join(base, jar), cp_source_jar)

          self._project.internal_jars.add(ClasspathEntry(cp_jar, source_jar=cp_source_jar))
コード例 #10
0
ファイル: ivy_utils.py プロジェクト: ankurgarg1986/pants
  def symlink_cachepath(ivy_home, inpath, symlink_dir, outpath):
    """Symlinks all paths listed in inpath that are under ivy_home into symlink_dir.

    Preserves all other paths. Writes the resulting paths to outpath.
    Returns a map of path -> symlink to that path.
    """
    safe_mkdir(symlink_dir)
    with safe_open(inpath, 'r') as infile:
      paths = filter(None, infile.read().strip().split(os.pathsep))
    new_paths = []
    for path in paths:
      if not path.startswith(ivy_home):
        new_paths.append(path)
        continue
      symlink = os.path.join(symlink_dir, os.path.relpath(path, ivy_home))
      try:
        os.makedirs(os.path.dirname(symlink))
      except OSError as e:
        if e.errno != errno.EEXIST:
          raise
      # Note: The try blocks cannot be combined. It may be that the dir exists but the link doesn't.
      try:
        os.symlink(path, symlink)
      except OSError as e:
        # We don't delete and recreate the symlink, as this may break concurrently executing code.
        if e.errno != errno.EEXIST:
          raise
      new_paths.append(symlink)
    with safe_open(outpath, 'w') as outfile:
      outfile.write(':'.join(new_paths))
    symlink_map = dict(zip(paths, new_paths))
    return symlink_map
コード例 #11
0
ファイル: ivy_utils.py プロジェクト: ankurgarg1986/pants
  def _generate_ivy(self, targets, jars, excludes, ivyxml, confs):
    org, name = self.identify(targets)

    # As it turns out force is not transitive - it only works for dependencies pants knows about
    # directly (declared in BUILD files - present in generated ivy.xml). The user-level ivy docs
    # don't make this clear [1], but the source code docs do (see isForce docs) [2]. I was able to
    # edit the generated ivy.xml and use the override feature [3] though and that does work
    # transitively as you'd hope.
    #
    # [1] http://ant.apache.org/ivy/history/2.3.0/settings/conflict-managers.html
    # [2] https://svn.apache.org/repos/asf/ant/ivy/core/branches/2.3.0/
    #     src/java/org/apache/ivy/core/module/descriptor/DependencyDescriptor.java
    # [3] http://ant.apache.org/ivy/history/2.3.0/ivyfile/override.html
    dependencies = [self._generate_jar_template(jar, confs) for jar in jars]
    overrides = [self._generate_override_template(dep) for dep in dependencies if dep.force]

    excludes = [self._generate_exclude_template(exclude) for exclude in excludes]

    template_data = TemplateData(
        org=org,
        module=name,
        version='latest.integration',
        publications=None,
        configurations=confs,
        dependencies=dependencies,
        excludes=excludes,
        overrides=overrides)

    safe_mkdir(os.path.dirname(ivyxml))
    with open(ivyxml, 'w') as output:
      generator = Generator(pkgutil.get_data(__name__, self._template_path),
                            root_dir=get_buildroot(),
                            lib=template_data)
      generator.write(output)
コード例 #12
0
  def test_pantsd_invalidation_stale_sources(self):
    test_path = 'tests/python/pants_test/daemon_correctness_test_0001'
    test_build_file = os.path.join(test_path, 'BUILD')
    test_src_file = os.path.join(test_path, 'some_file.py')
    has_source_root_regex = r'"source_root": ".*/{}"'.format(test_path)
    export_cmd = ['export', test_path]

    try:
      with self.pantsd_successful_run_context() as (pantsd_run, checker, workdir, _):
        safe_mkdir(test_path, clean=True)

        pantsd_run(['help'])
        checker.assert_started()

        safe_file_dump(test_build_file, "python_library(sources=globs('some_non_existent_file.py'))")
        result = pantsd_run(export_cmd)
        checker.assert_running()
        self.assertNotRegexpMatches(result.stdout_data, has_source_root_regex)

        safe_file_dump(test_build_file, "python_library(sources=globs('*.py'))")
        result = pantsd_run(export_cmd)
        checker.assert_running()
        self.assertNotRegexpMatches(result.stdout_data, has_source_root_regex)

        safe_file_dump(test_src_file, 'import this\n')
        result = pantsd_run(export_cmd)
        checker.assert_running()
        self.assertRegexpMatches(result.stdout_data, has_source_root_regex)
    finally:
      rm_rf(test_path)
コード例 #13
0
ファイル: test_protobuf_gen.py プロジェクト: Gabriel439/pants
 def test_protos_extracted_under_build_root(self):
   """This testcase shows that you can put sources for protos outside the directory where the
   BUILD file is defined. This will be the case for .proto files that have been extracted
   under .pants.d.
   """
   # place a .proto file in a place outside of where the BUILD file is defined
   extracted_source_path = os.path.join(self.build_root, 'extracted-source')
   SourceRoot.register(extracted_source_path, JavaProtobufLibrary)
   safe_mkdir(os.path.join(extracted_source_path, 'sample-package'))
   sample_proto_path = os.path.join(extracted_source_path, 'sample-package', 'sample.proto')
   with open(sample_proto_path, 'w') as sample_proto:
     sample_proto.write(dedent('''
           package com.example;
           message sample {}
         '''))
   self.add_to_build_file('sample', dedent('''
       java_protobuf_library(name='sample',
         sources=['{sample_proto_path}'],
       )''').format(sample_proto_path=sample_proto_path))
   target = self.target("sample:sample")
   context = self.context(target_roots=[target])
   task = self.create_task(context=context)
   sources_by_base = task._calculate_sources([target])
   self.assertEquals(['extracted-source'], sources_by_base.keys())
   self.assertEquals(OrderedSet([sample_proto_path]), sources_by_base['extracted-source'])
コード例 #14
0
ファイル: go_compile.py プロジェクト: cosmicexplorer/pants
  def _sync_binary_dep_links(self, target, gopath, lib_binary_map):
    """Syncs symlinks under gopath to the library binaries of target's transitive dependencies.

    :param Target target: Target whose transitive dependencies must be linked.
    :param str gopath: $GOPATH of target whose "pkg/" directory must be populated with links
                       to library binaries.
    :param dict<Target, str> lib_binary_map: Dictionary mapping a remote/local Go library to the
                                             path of the compiled binary (the ".a" file) of the
                                             library.

    Required links to binary dependencies under gopath's "pkg/" dir are either created if
    non-existent, or refreshed if the link is older than the underlying binary. Any pre-existing
    links within gopath's "pkg/" dir that do not correspond to a transitive dependency of target
    are deleted.
    """
    required_links = set()
    for dep in target.closure():
      if dep == target:
        continue
      if not isinstance(dep, GoTarget):
        continue
      lib_binary = lib_binary_map[dep]
      lib_binary_link = os.path.join(gopath, os.path.relpath(lib_binary, self.get_gopath(dep)))
      safe_mkdir(os.path.dirname(lib_binary_link))
      if os.path.islink(lib_binary_link):
        if os.stat(lib_binary).st_mtime > os.lstat(lib_binary_link).st_mtime:
          # The binary under the link was updated after the link was created. Refresh
          # the link so the mtime (modification time) of the link is greater than the
          # mtime of the binary. This stops Go from needlessly re-compiling the library.
          os.unlink(lib_binary_link)
          os.symlink(lib_binary, lib_binary_link)
      else:
        os.symlink(lib_binary, lib_binary_link)
      required_links.add(lib_binary_link)
    self.remove_unused_links(os.path.join(gopath, 'pkg'), required_links)
コード例 #15
0
def main():
  """Anonymize a set of analysis files using the same replacements in all of them.

  This maintains enough consistency to make splitting/merging tests realistic.

  To run:

  ./pants goal run src/python/pants/backend/jvm/tasks/jvm_compile:anonymize_zinc_analysis -- \
    <wordfile> <classes dir in analysis files> <analysis file glob 1> <analysis file glob 2> ...
  """
  word_file = sys.argv[1]
  classes_dir = sys.argv[2]
  analysis_files = list(itertools.chain.from_iterable([glob.glob(p) for p in sys.argv[3:]]))

  with open(word_file, 'r') as infile:
    word_list = infile.read().split()
  anonymizer = Anonymizer(word_list)
  for analysis_file in analysis_files:
    analysis = ZincAnalysisParser(classes_dir).parse_from_path(analysis_file)
    analysis.anonymize(anonymizer)
    output_dir = os.path.join(os.path.dirname(analysis_file), 'anon')
    safe_mkdir(output_dir)
    anonymized_filename = anonymizer.convert(os.path.basename(analysis_file))
    analysis.write_to_path(os.path.join(output_dir, anonymized_filename))
  anonymizer.check_for_comprehensiveness()
コード例 #16
0
 def test_addresses_in_spec_path_no_build_files(self):
   self.create_build_files()
   safe_mkdir(os.path.join(self.build_root, 'foo'))
   mapper = self.address_mapper
   with self.assertRaises(AddressMapper.BuildFileScanError) as cm:
     mapper.addresses_in_spec_path('foo')
   self.assertIn('does not match any targets.', str(cm.exception))
コード例 #17
0
  def _do_test_caching(self, *compiles):
    """Tests that the given compiles within the same workspace produce the given artifact counts."""
    with temporary_dir() as cache_dir, \
        self.temporary_workdir() as workdir, \
        temporary_dir(root_dir=get_buildroot()) as src_dir:

      def complete_config(config):
        # Clone the input config and add cache settings.
        cache_settings = {'write_to': [cache_dir], 'read_from': [cache_dir]}
        return dict(config.items() + [('cache.compile.zinc', cache_settings)])

      buildfile = os.path.join(src_dir, 'BUILD')
      spec = os.path.join(src_dir, ':cachetest')
      artifact_dir = os.path.join(cache_dir,
                                  ZincCompile.stable_name(),
                                  '{}.cachetest'.format(os.path.basename(src_dir)))

      for c in compiles:
        # Clear the src directory and recreate the files.
        safe_mkdir(src_dir, clean=True)
        self.create_file(buildfile,
                        """java_library(name='cachetest', sources=rglobs('*.java', '*.scala'))""")
        for name, content in c.srcfiles.items():
          self.create_file(os.path.join(src_dir, name), content)

        # Compile, and confirm that we have the right count of artifacts.
        self.run_compile(spec, complete_config(c.config), workdir)
        self.assertEquals(c.artifact_count, len(os.listdir(artifact_dir)))
コード例 #18
0
ファイル: jacoco.py プロジェクト: baroquebobcat/pants
  def report(self, output_dir, execution_failed_exception=None):
    if execution_failed_exception:
      self._settings.log.warn('Test failed: {}'.format(execution_failed_exception))
      if self._coverage_force:
        self._settings.log.warn('Generating report even though tests failed, because the'
                                'coverage-force flag is set.')
      else:
        return

    report_dir = os.path.join(output_dir, 'coverage', 'reports')
    safe_mkdir(report_dir, clean=True)

    datafiles = list(self._iter_datafiles(output_dir))
    if len(datafiles) == 1:
      datafile = datafiles[0]
    else:
      datafile = os.path.join(output_dir, '{}.merged'.format(self._DATAFILE_NAME))
      args = ['merge'] + datafiles + ['--destfile={}'.format(datafile)]
      self._execute_jacoco_cli(workunit_name='jacoco-merge', args=args)

    for report_format in ('xml', 'csv', 'html'):
      target_path = os.path.join(report_dir, report_format)
      args = (['report', datafile] +
              self._get_target_classpaths() +
              self._get_source_roots() +
              ['--{report_format}={target_path}'.format(report_format=report_format,
                                                        target_path=target_path)])
      self._execute_jacoco_cli(workunit_name='jacoco-report-' + report_format, args=args)

    if self._settings.coverage_open:
      return os.path.join(report_dir, 'html', 'index.html')
コード例 #19
0
  def store_and_use_artifact(self, cache_key, src, results_dir=None):
    """Store and then extract the artifact from the given `src` iterator for the given cache_key.

    :param cache_key: Cache key for the artifact.
    :param src: Iterator over binary data to store for the artifact.
    :param str results_dir: The path to the expected destination of the artifact extraction: will
      be cleared both before extraction, and after a failure to extract.
    """
    with self._tmpfile(cache_key, 'read') as tmp:
      for chunk in src:
        tmp.write(chunk)
      tmp.close()
      tarball = self._store_tarball(cache_key, tmp.name)
      artifact = self._artifact(tarball)

      if results_dir is not None:
        safe_mkdir(results_dir, clean=True)

      try:
        artifact.extract()
      except Exception:
        # Do our best to clean up after a failed artifact extraction. If a results_dir has been
        # specified, it is "expected" to represent the output destination of the extracted
        # artifact, and so removing it should clear any partially extracted state.
        if results_dir is not None:
          safe_mkdir(results_dir, clean=True)
        safe_delete(tarball)
        raise

      return True
コード例 #20
0
ファイル: jvm_compile.py プロジェクト: cheister/pants
  def compile_chunk(self,
                    invalidation_check,
                    compile_contexts,
                    invalid_targets,
                    extra_compile_time_classpath_elements):
    """Executes compilations for the invalid targets contained in a single chunk."""
    assert invalid_targets, "compile_chunk should only be invoked if there are invalid targets."

    # Prepare the output directory for each invalid target, and confirm that analysis is valid.
    for target in invalid_targets:
      cc = compile_contexts[target]
      safe_mkdir(cc.classes_dir)
      self.validate_analysis(cc.analysis_file)

    # Get the classpath generated by upstream JVM tasks and our own prepare_compile().
    classpath_products = self.context.products.get_data('runtime_classpath')

    extra_compile_time_classpath = self._compute_extra_classpath(
        extra_compile_time_classpath_elements)

    # Now create compile jobs for each invalid target one by one.
    jobs = self._create_compile_jobs(classpath_products,
                                     compile_contexts,
                                     extra_compile_time_classpath,
                                     invalid_targets,
                                     invalidation_check.invalid_vts)

    exec_graph = ExecutionGraph(jobs)
    try:
      exec_graph.execute(self._worker_pool, self.context.log)
    except ExecutionFailure as e:
      raise TaskError("Compilation failure: {}".format(e))
コード例 #21
0
  def run_thrifts(self):
    """
    Generate Python thrift code.

    Thrift fields conflicting with Python keywords are suffixed with a trailing
    underscore (e.g.: from_).
    """

    def is_py_thrift(target):
      return isinstance(target, PythonThriftLibrary)

    all_thrifts = set()

    def collect_sources(target):
      abs_target_base = os.path.join(get_buildroot(), target.target_base)
      for source in target.payload.sources.relative_to_buildroot():
        source_root_relative_source = os.path.relpath(source, abs_target_base)
        all_thrifts.add((target.target_base, source_root_relative_source))

    self.target.walk(collect_sources, predicate=is_py_thrift)

    copied_sources = set()
    for base, relative_source in all_thrifts:
      abs_source = os.path.join(base, relative_source)
      copied_source = os.path.join(self._workdir, relative_source)

      safe_mkdir(os.path.dirname(copied_source))
      shutil.copyfile(abs_source, copied_source)
      copied_sources.add(copied_source)

    for src in copied_sources:
      if not self._run_thrift(src):
        raise PythonThriftBuilder.CodeGenerationException(
          "Could not generate .py from {}!".format(src))
コード例 #22
0
ファイル: artifact_cache_stats.py プロジェクト: pythorn/pants
    def __init__(self, dir=None):
        def init_stat():
            return CacheStat([], [])

        self.stats_per_cache = defaultdict(init_stat)
        self._dir = dir
        safe_mkdir(self._dir)
コード例 #23
0
ファイル: junit_run.py プロジェクト: tsdeng/pants
 def instrument(self, targets, tests, compute_junit_classpath):
   junit_classpath = compute_junit_classpath()
   safe_mkdir(self._coverage_instrument_dir, clean=True)
   self._emma_classpath = self._task_exports.tool_classpath('emma')
   with binary_util.safe_args(self.get_coverage_patterns(targets),
                              self._task_exports.task_options) as patterns:
     args = [
       'instr',
       '-out', self._coverage_metadata_file,
       '-d', self._coverage_instrument_dir,
       '-cp', os.pathsep.join(junit_classpath),
       '-exit'
       ]
     for pattern in patterns:
       args.extend(['-filter', pattern])
     main = 'emma'
     execute_java = self.preferred_jvm_distribution_for_targets(targets).execute_java
     result = execute_java(classpath=self._emma_classpath,
                           main=main,
                           jvm_options=self._coverage_jvm_options,
                           args=args,
                           workunit_factory=self._context.new_workunit,
                           workunit_name='emma-instrument')
     if result != 0:
       raise TaskError("java {0} ... exited non-zero ({1})"
                       " 'failed to instrument'".format(main, result))
コード例 #24
0
  def _rearrange_output_for_package(self, target_workdir, java_package):
    """Rearrange the output files to match a standard Java structure.

    Antlr emits a directory structure based on the relative path provided
    for the grammar file. If the source root of the file is different from
    the Pants build root, then the Java files end up with undesired parent
    directories.
    """
    package_dir_rel = java_package.replace('.', os.path.sep)
    package_dir = os.path.join(target_workdir, package_dir_rel)
    safe_mkdir(package_dir)
    for root, dirs, files in safe_walk(target_workdir):
      if root == package_dir_rel:
        # This path is already in the correct location
        continue
      for f in files:
        os.rename(
          os.path.join(root, f),
          os.path.join(package_dir, f)
        )

    # Remove any empty directories that were left behind
    for root, dirs, files in safe_walk(target_workdir, topdown = False):
      for d in dirs:
        full_dir = os.path.join(root, d)
        if not os.listdir(full_dir):
          os.rmdir(full_dir)
コード例 #25
0
ファイル: cpp_compile.py プロジェクト: MathewJennings/pants
  def _compile(self, target, source):
    """Compile given source to an object file."""
    obj = self._objpath(target, source)
    safe_mkdir(os.path.dirname(obj))

    abs_source = os.path.join(get_buildroot(), source)

    # TODO: include dir should include dependent work dir when headers are copied there.
    include_dirs = []
    for dep in target.dependencies:
      if self.is_library(dep):
        include_dirs.extend([os.path.join(get_buildroot(), dep.target_base)])

    cmd = [self.cpp_toolchain.compiler]
    cmd.extend(['-c'])
    cmd.extend(('-I{0}'.format(i) for i in include_dirs))
    cmd.extend(['-o' + obj, abs_source])
    if self.get_options().cc_options != None:
      cmd.extend([self.get_options().cc_options])

    # TODO: submit_async_work with self.run_command, [(cmd)] as a Work object.
    with self.context.new_workunit(name='cpp-compile', labels=[WorkUnit.COMPILER]) as workunit:
      self.run_command(cmd, workunit)

    self.context.log.info('Built c++ object: {0}'.format(obj))
コード例 #26
0
  def test_local_backed_remote_cache_corrupt_artifact(self):
    """Ensure that a combined cache clears outputs after a failure to extract an artifact."""
    with temporary_dir() as remote_cache_dir:
      with self.setup_server(cache_root=remote_cache_dir) as server:
        with self.setup_local_cache() as local:
          tmp = TempLocalArtifactCache(local.artifact_root, compression=1)
          remote = RESTfulArtifactCache(local.artifact_root, BestUrlSelector([server.url]), tmp)
          combined = RESTfulArtifactCache(local.artifact_root, BestUrlSelector([server.url]), local)

          key = CacheKey('muppet_key', 'fake_hash')

          results_dir = os.path.join(local.artifact_root, 'a/sub/dir')
          safe_mkdir(results_dir)
          self.assertTrue(os.path.exists(results_dir))

          with self.setup_test_file(results_dir) as path:
            # Add to only the remote cache.
            remote.insert(key, [path])

            # Corrupt the artifact in the remote storage.
            self.assertTrue(server.corrupt_artifacts(r'.*muppet_key.*') == 1)

            # An attempt to read the corrupt artifact should fail.
            self.assertFalse(combined.use_cached_files(key, results_dir=results_dir))

            # The local artifact should not have been stored, and the results_dir should exist,
            # but be empty.
            self.assertFalse(local.has(key))
            self.assertTrue(os.path.exists(results_dir))
            self.assertTrue(len(os.listdir(results_dir)) == 0)
コード例 #27
0
ファイル: errorprone.py プロジェクト: JieGhost/pants
  def errorprone(self, target):
    runtime_classpaths = self.context.products.get_data('runtime_classpath')
    runtime_classpath = [jar for conf, jar in runtime_classpaths.get_for_targets(target.closure(bfs=True))]

    output_dir = os.path.join(self.workdir, target.id)
    safe_mkdir(output_dir)
    runtime_classpath.append(output_dir)

    args = [
      '-classpath', ':'.join(runtime_classpath),
      '-d', output_dir,
    ]

    for opt in self.get_options().command_line_options:
      args.extend(safe_shlex_split(opt))

    args.extend(self.calculate_sources(target))

    result = self.runjava(classpath=self.tool_classpath('errorprone'),
                          main=self._ERRORPRONE_MAIN,
                          jvm_options=self.get_options().jvm_options,
                          args=args,
                          workunit_name='errorprone',
                          workunit_labels=[WorkUnitLabel.LINT])

    self.context.log.debug('java {main} ... exited with result ({result})'.format(
                           main=self._ERRORPRONE_MAIN, result=result))

    return result
コード例 #28
0
ファイル: go_thrift_gen.py プロジェクト: JieGhost/pants
  def _generate_thrift(self, target, target_workdir):
    target_cmd = self._thrift_cmd[:]

    bases = OrderedSet(tgt.target_base for tgt in target.closure() if self.is_gentarget(tgt))
    for base in bases:
      target_cmd.extend(('-I', base))

    target_cmd.extend(('-o', target_workdir))

    all_sources = list(target.sources_relative_to_buildroot())
    if len(all_sources) != 1:
      raise TaskError('go_thrift_library only supports a single .thrift source file for {}.', target)

    source = all_sources[0]
    target_cmd.append(os.path.join(get_buildroot(), source))
    with self.context.new_workunit(name=source,
                                   labels=[WorkUnitLabel.TOOL],
                                   cmd=' '.join(target_cmd)) as workunit:
      result = subprocess.call(target_cmd,
                               stdout=workunit.output('stdout'),
                              stderr=workunit.output('stderr'))
      if result != 0:
        raise TaskError('{} ... exited non-zero ({})'.format(self._thrift_binary, result))

    gen_dir = os.path.join(target_workdir, 'gen-go')
    src_dir = os.path.join(target_workdir, 'src')
    safe_mkdir(src_dir)
    go_dir = os.path.join(target_workdir, 'src', 'go')
    os.rename(gen_dir, go_dir)
コード例 #29
0
ファイル: jvm_compile.py プロジェクト: aaronmitchell/pants
 def _record_compile_classpath(self, classpath, targets, outdir):
   text = '\n'.join(classpath)
   for target in targets:
     path = os.path.join(outdir, 'compile_classpath', '{}.txt'.format(target.id))
     safe_mkdir(os.path.dirname(path), clean=False)
     with open(path, 'w') as f:
       f.write(text)
コード例 #30
0
ファイル: cobertura.py プロジェクト: foursquare/pants
  def report(self, output_dir, execution_failed_exception=None):
    if self.should_report(execution_failed_exception):
      datafiles = list(self._iter_datafiles(output_dir))
      if len(datafiles) == 1:
        datafile = datafiles[0]
      else:
        datafile = os.path.join(output_dir, '{}.merged'.format(self._DATAFILE_NAME))
        self._execute_cobertura(workunit_name='cobertura-merge',
                                tool_classpath='cobertura-merge',
                                main='net.sourceforge.cobertura.merge.MergeMain',
                                args=['--datafile', datafile] + datafiles)

      base_report_dir = os.path.join(output_dir, 'coverage', 'reports')
      safe_mkdir(base_report_dir, clean=True)

      source_roots = {t.target_base for t in self._targets if Cobertura.is_coverage_target(t)}
      base_args = list(source_roots) + ['--datafile', datafile]
      for report_format in ('xml', 'html'):
        report_dir = os.path.join(base_report_dir, report_format)
        safe_mkdir(report_dir, clean=True)
        self._execute_cobertura(workunit_name='cobertura-report-{}'.format(report_format),
                                tool_classpath='cobertura-report',
                                main='net.sourceforge.cobertura.reporting.ReportMain',
                                args=base_args + ['--destination', report_dir,
                                                  '--format', report_format])
      if self._settings.coverage_open:
        return os.path.join(base_report_dir, 'html', 'index.html')
コード例 #31
0
ファイル: reporting.py プロジェクト: jdh339/pants
    def update_reporting(self, global_options, is_quiet, run_tracker):
        """Updates reporting config once we've parsed cmd-line flags."""

        # Get any output silently buffered in the old console reporter, and remove it.
        removed_reporter = run_tracker.report.remove_reporter('capturing')
        buffered_out = self._consume_stringio(
            removed_reporter.settings.outfile)
        buffered_err = self._consume_stringio(
            removed_reporter.settings.errfile)

        log_level = Report.log_level_from_string(global_options.level
                                                 or 'info')
        # Ideally, we'd use terminfo or somesuch to discover whether a
        # terminal truly supports color, but most that don't set TERM=dumb.
        color = global_options.colors and (os.getenv('TERM') != 'dumb')
        timing = global_options.time
        cache_stats = global_options.time  # TODO: Separate flag for this?

        if is_quiet:
            console_reporter = QuietReporter(
                run_tracker,
                QuietReporter.Settings(log_level=log_level,
                                       color=color,
                                       timing=timing,
                                       cache_stats=cache_stats))
        else:
            # Set up the new console reporter.
            stdout = sys.stdout.buffer if PY3 else sys.stdout
            stderr = sys.stderr.buffer if PY3 else sys.stderr
            settings = PlainTextReporter.Settings(
                log_level=log_level,
                outfile=stdout,
                errfile=stderr,
                color=color,
                indent=True,
                timing=timing,
                cache_stats=cache_stats,
                label_format=self.get_options().console_label_format,
                tool_output_format=self.get_options(
                ).console_tool_output_format)
            console_reporter = PlainTextReporter(run_tracker, settings)
            console_reporter.emit(buffered_out, dest=ReporterDestination.OUT)
            console_reporter.emit(buffered_err, dest=ReporterDestination.ERR)
            console_reporter.flush()
        run_tracker.report.add_reporter('console', console_reporter)

        if global_options.logdir:
            # Also write plaintext logs to a file. This is completely separate from the html reports.
            safe_mkdir(global_options.logdir)
            run_id = run_tracker.run_info.get_info('id')
            outfile = open(
                os.path.join(global_options.logdir, '{}.log'.format(run_id)),
                'wb')
            errfile = open(
                os.path.join(global_options.logdir,
                             '{}.err.log'.format(run_id)), 'wb')
            settings = PlainTextReporter.Settings(
                log_level=log_level,
                outfile=outfile,
                errfile=errfile,
                color=False,
                indent=True,
                timing=True,
                cache_stats=True,
                label_format=self.get_options().console_label_format,
                tool_output_format=self.get_options(
                ).console_tool_output_format)
            logfile_reporter = PlainTextReporter(run_tracker, settings)
            logfile_reporter.emit(buffered_out, dest=ReporterDestination.OUT)
            logfile_reporter.emit(buffered_err, dest=ReporterDestination.ERR)
            logfile_reporter.flush()
            run_tracker.report.add_reporter('logfile', logfile_reporter)

        invalidation_report = self._get_invalidation_report()
        if invalidation_report:
            run_id = run_tracker.run_info.get_info('id')
            outfile = os.path.join(self.get_options().reports_dir, run_id,
                                   'invalidation-report.csv')
            invalidation_report.set_filename(outfile)

        return invalidation_report
コード例 #32
0
 def write(package, name, content):
     package_path = os.path.join(td, SetupPy.SOURCE_ROOT,
                                 to_path(package))
     safe_mkdir(os.path.dirname(os.path.join(package_path, name)))
     with open(os.path.join(package_path, name), 'w') as fp:
         fp.write(content)
コード例 #33
0
ファイル: watchman.py プロジェクト: zvikihouzz/pants
 def _maybe_init_metadata(self):
     safe_mkdir(self._watchman_work_dir)
     # Initialize watchman with an empty, but valid statefile so it doesn't complain on startup.
     safe_file_dump(self._state_file, b'{}', mode='wb')
コード例 #34
0
 def create_results_dir(self, dir):
     safe_mkdir(dir)
     self._results_dir = dir
コード例 #35
0
ファイル: interpreter_cache.py プロジェクト: lgirault/pants
 def _cache_dir(self):
     cache_dir = self.python_setup.interpreter_cache_dir
     safe_mkdir(cache_dir)
     return cache_dir
コード例 #36
0
ファイル: jvm_compile.py プロジェクト: omerzach/pants
 def _capture_logs(self, workunit, destination):
   safe_mkdir(destination, clean=True)
   for idx, name, output_name, path in self._find_logs(workunit):
     os.link(path, os.path.join(destination, '{}-{}-{}.log'.format(name, idx, output_name)))
コード例 #37
0
    def execute(self):
        if self.context.products.is_required_data('resources_by_target'):
            self.context.products.safe_create_data(
                'resources_by_target',
                lambda: defaultdict(MultipleRootedProducts))

        # `targets` contains the transitive subgraph in pre-order, which is approximately how
        # we want them ordered on the classpath. Thus, we preserve ordering here.
        targets = self.context.targets()
        if len(targets) == 0:
            return

        def extract_resources(target):
            return target.resources if target.has_resources else ()

        all_resources_tgts = OrderedSet()
        for resources_tgts in map(extract_resources, targets):
            all_resources_tgts.update(resources_tgts)

        def compute_target_dir(tgt):
            # Sources are all relative to their roots: relativize directories as well to
            # breaking filesystem limits.
            return relativize_path(os.path.join(self.workdir, tgt.id),
                                   self._buildroot)

        with self.invalidated(all_resources_tgts) as invalidation_check:
            invalid_targets = set()
            for vt in invalidation_check.invalid_vts:
                invalid_targets.update(vt.targets)

            for resources_tgt in invalid_targets:
                target_dir = compute_target_dir(resources_tgt)
                safe_mkdir(target_dir, clean=True)
                for resource_file_from_source_root in resources_tgt.sources_relative_to_source_root(
                ):
                    basedir = os.path.dirname(resource_file_from_source_root)
                    destdir = os.path.join(target_dir, basedir)
                    safe_mkdir(destdir)
                    # TODO: Symlink instead?
                    shutil.copy(
                        os.path.join(resources_tgt.target_base,
                                     resource_file_from_source_root),
                        os.path.join(target_dir,
                                     resource_file_from_source_root))

            resources_by_target = self.context.products.get_data(
                'resources_by_target')
            compile_classpath = self.context.products.get_data(
                'compile_classpath')

            for resources_tgt in all_resources_tgts:
                target_dir = compute_target_dir(resources_tgt)
                for conf in self.confs:
                    # TODO(John Sirois): Introduce the notion of RuntimeClasspath and populate that product
                    # instead of mutating the compile_classpath.
                    compile_classpath.add_for_targets(targets,
                                                      [(conf, target_dir)])
                if resources_by_target is not None:
                    resources_by_target[resources_tgt].add_rel_paths(
                        target_dir,
                        resources_tgt.sources_relative_to_source_root())
コード例 #38
0
    def _maybe_emit_coverage_data(self, targets, chroot, pex, workunit):
        coverage = self.get_options().coverage
        if coverage is None:
            yield []
            return

        def read_coverage_list(prefix):
            return coverage[len(prefix):].split(',')

        coverage_modules = None
        if coverage.startswith('modules:'):
            # NB: pytest-cov maps these modules to the `[run] sources` config.  So for
            # `modules:pants.base,pants.util` the config emitted has:
            # [run]
            # source =
            #   pants.base
            #   pants.util
            #
            # Now even though these are not paths, coverage sees the dots and switches to a module
            # prefix-matching mode.  Unfortunately, neither wildcards nor top-level module prefixes
            # like `pants.` serve to engage this module prefix-matching as one might hope.  It
            # appears that `pants.` is treated as a path and `pants.*` is treated as a literal
            # module prefix name.
            coverage_modules = read_coverage_list('modules:')
        elif coverage.startswith('paths:'):
            coverage_modules = []
            for path in read_coverage_list('paths:'):
                if not os.path.exists(path) and not os.path.isabs(path):
                    # Look for the source in the PEX chroot since its not available from CWD.
                    path = os.path.join(chroot, path)
                coverage_modules.append(path)

        with self._cov_setup(
                targets, chroot,
                coverage_modules=coverage_modules) as (args, coverage_rc):
            try:
                yield args
            finally:
                with environment_as(PEX_MODULE='coverage.cmdline:main'):

                    def pex_run(args):
                        return self._pex_run(pex, workunit, args=args)

                    # On failures or timeouts, the .coverage file won't be written.
                    if not os.path.exists('.coverage'):
                        self.context.log.warn(
                            'No .coverage file was found! Skipping coverage reporting.'
                        )
                    else:
                        # Normalize .coverage.raw paths using combine and `paths` config in the rc file.
                        # This swaps the /tmp pex chroot source paths for the local original source paths
                        # the pex was generated from and which the user understands.
                        shutil.move('.coverage', '.coverage.raw')
                        pex_run(args=['combine', '--rcfile', coverage_rc])
                        pex_run(args=['report', '-i', '--rcfile', coverage_rc])

                        # TODO(wickman): If coverage is enabled and we are not using fast mode, write an
                        # intermediate .html that points to each of the coverage reports generated and
                        # webbrowser.open to that page.
                        # TODO(John Sirois): Possibly apply the same logic to the console report.  In fact,
                        # consider combining coverage files from all runs in this Tasks's execute and then
                        # producing just 1 console and 1 html report whether or not the tests are run in fast
                        # mode.
                        if self.get_options().coverage_output_dir:
                            target_dir = self.get_options().coverage_output_dir
                        else:
                            relpath = Target.maybe_readable_identify(targets)
                            pants_distdir = self.context.options.for_global_scope(
                            ).pants_distdir
                            target_dir = os.path.join(pants_distdir,
                                                      'coverage', relpath)
                        safe_mkdir(target_dir)
                        pex_run(args=[
                            'html', '-i', '--rcfile', coverage_rc, '-d',
                            target_dir
                        ])
                        coverage_xml = os.path.join(target_dir, 'coverage.xml')
                        pex_run(args=[
                            'xml', '-i', '--rcfile', coverage_rc, '-o',
                            coverage_xml
                        ])
コード例 #39
0
ファイル: scrooge_gen.py プロジェクト: Gabriel439/pants
  def gen(self, partial_cmd, targets):
    fp_strategy = JavaThriftLibraryFingerprintStrategy(self._thrift_defaults)
    with self.invalidated(targets,
                          fingerprint_strategy=fp_strategy,
                          invalidate_dependents=True) as invalidation_check:
      invalid_targets = []
      for vt in invalidation_check.invalid_vts:
        invalid_targets.extend(vt.targets)
      import_paths, changed_srcs = calculate_compile_sources(invalid_targets, self.is_scroogetarget)
      outdir = self._outdir(partial_cmd)
      if changed_srcs:
        args = []

        for import_path in import_paths:
          args.extend(['--import-path', import_path])

        args.extend(['--language', partial_cmd.language])

        for lhs, rhs in partial_cmd.namespace_map:
          args.extend(['--namespace-map', '%s=%s' % (lhs, rhs)])

        if partial_cmd.rpc_style == 'ostrich':
          args.append('--finagle')
          args.append('--ostrich')
        elif partial_cmd.rpc_style == 'finagle':
          args.append('--finagle')

        args.extend(['--dest', outdir])
        safe_mkdir(outdir)

        if not self.get_options().strict:
          args.append('--disable-strict')

        if self.get_options().verbose:
          args.append('--verbose')

        gen_file_map_path = os.path.relpath(self._tempname())
        args.extend(['--gen-file-map', gen_file_map_path])

        args.extend(changed_srcs)

        classpath = self.tool_classpath('scrooge-gen')
        jvm_options = list(self.get_options().jvm_options)
        jvm_options.append('-Dfile.encoding=UTF-8')
        returncode = self.runjava(classpath=classpath,
                                  main='com.twitter.scrooge.Main',
                                  jvm_options=jvm_options,
                                  args=args,
                                  workunit_name='scrooge-gen')
        try:
          if 0 == returncode:
            gen_files_for_source = self.parse_gen_file_map(gen_file_map_path, outdir)
          else:
            gen_files_for_source = None
        finally:
          os.remove(gen_file_map_path)

        if 0 != returncode:
          raise TaskError('Scrooge compiler exited non-zero ({0})'.format(returncode))
        self.write_gen_file_map(gen_files_for_source, invalid_targets, outdir)

    return self.gen_file_map(targets, outdir)
コード例 #40
0
ファイル: python_eval.py プロジェクト: guymarom/pants
    def _compile_target(self, vt):
        """'Compiles' a python target.

    'Compiling' means forming an isolated chroot of its sources and transitive deps and then
    attempting to import each of the target's sources in the case of a python library or else the
    entry point in the case of a python binary.

    For a library with sources lib/core.py and lib/util.py a "compiler" main file would look like:

      if __name__ == '__main__':
        import lib.core
        import lib.util

    For a binary with entry point lib.bin:main the "compiler" main file would look like:

      if __name__ == '__main__':
        from lib.bin import main

    In either case the main file is executed within the target chroot to reveal missing BUILD
    dependencies.
    """
        target = vt.target
        with self.context.new_workunit(name=target.address.spec):
            modules = self._get_modules(target)
            if not modules:
                # Nothing to eval, so a trivial compile success.
                return 0

            interpreter = self._get_interpreter_for_target_closure(target)
            reqs_pex = self._resolve_requirements_for_versioned_target_closure(
                interpreter, vt)
            srcs_pex = self._source_pex_for_versioned_target_closure(
                interpreter, vt)

            # Create the executable pex.
            exec_pex_parent = os.path.join(self.workdir, 'executable_pex')
            executable_file_content = self._get_executable_file_content(
                exec_pex_parent, modules)
            hasher = hashlib.sha1()
            hasher.update(executable_file_content)
            exec_file_hash = hasher.hexdigest()
            exec_pex_path = os.path.realpath(
                os.path.join(exec_pex_parent, exec_file_hash))
            if not os.path.isdir(exec_pex_path):
                with safe_concurrent_creation(exec_pex_path) as safe_path:
                    # Write the entry point.
                    safe_mkdir(safe_path)
                    with open(
                            os.path.join(safe_path,
                                         '{}.py'.format(self._EXEC_NAME)),
                            'w') as outfile:
                        outfile.write(executable_file_content)
                    pex_info = (target.pexinfo if isinstance(
                        target, PythonBinary) else None) or PexInfo()
                    # Override any user-specified entry point, under the assumption that the
                    # executable_file_content does what the user intends (including, probably, calling that
                    # underlying entry point).
                    pex_info.entry_point = self._EXEC_NAME
                    builder = PEXBuilder(safe_path,
                                         interpreter,
                                         pex_info=pex_info)
                    builder.freeze()

            exec_pex = PEX(exec_pex_path, interpreter)
            extra_pex_paths = [
                pex.path()
                for pex in [_f for _f in [reqs_pex, srcs_pex] if _f]
            ]
            pex = WrappedPEX(exec_pex, extra_pex_paths)

            with self.context.new_workunit(
                    name='eval',
                    labels=[
                        WorkUnitLabel.COMPILER, WorkUnitLabel.RUN,
                        WorkUnitLabel.TOOL
                    ],
                    cmd=' '.join(exec_pex.cmdline())) as workunit:
                returncode = pex.run(stdout=workunit.output('stdout'),
                                     stderr=workunit.output('stderr'))
                workunit.set_outcome(WorkUnit.SUCCESS if returncode ==
                                     0 else WorkUnit.FAILURE)
                if returncode != 0:
                    self.context.log.error('Failed to eval {}'.format(
                        target.address.spec))
                return returncode
コード例 #41
0
  def test_workdir_stale_builds_cleanup(self):
    """Ensure that current and previous build result_dirs and the newest `--workdir-max-build-entries` number of dirs
    will be kept, and the rest will be purged.
    """

    with temporary_dir() as tmp_dir:
      workdir = os.path.join(tmp_dir, '.pants.d')

      self.assert_success(self.run_pants_with_workdir([
        'compile',
        'export-classpath',
        'testprojects/src/java/org/pantsbuild/testproject/unicode/main',
      ], workdir))

      # Use the static exported classpath symlink to access the artifact in workdir
      # in order to avoid computing hashed task version used in workdir.
      classpath = 'dist/export-classpath/testprojects.src.java.org.pantsbuild.testproject.unicode.main.main-0.jar'

      # <workdir>/compile/zinc/d4600a981d5d/testprojects.src.java.org.pantsbuild.testproject.unicode.main.main/1a317a2504f6/z.jar'
      jar_path_in_pantsd = os.path.realpath(classpath)

      # <workdir>/compile/zinc/d4600a981d5d/testprojects.src.java.org.pantsbuild.testproject.unicode.main.main/
      target_dir_in_pantsd = os.path.dirname(os.path.dirname(jar_path_in_pantsd))

      old_cache_dirnames = set([
        'old_cache_test1_dir/',
        'old_cache_test2_dir/',
        'old_cache_test3_dir/',
      ])
      new_cache_dirnames = set([
        'old_cache_test4_dir/',
        'old_cache_test5_dir/',
      ])
      old_cache_entries = {os.path.join(target_dir_in_pantsd, subdir) for subdir in old_cache_dirnames}
      new_cache_entries = {os.path.join(target_dir_in_pantsd, subdir) for subdir in new_cache_dirnames}
      for old_entry in old_cache_entries:
        safe_mkdir(old_entry)
      # sleep for a bit so these files are all newer than the other ones
      time.sleep(1.1)
      for new_entry in new_cache_entries:
        safe_mkdir(new_entry)
      expected_dirs = set([os.path.join(target_dir_in_pantsd, 'current/')]) | old_cache_entries | new_cache_entries

      # stable symlink, current version directory, and synthetically created directories.
      remaining_cache_dir_fingerprinted = self.get_cache_subdir(target_dir_in_pantsd, other_dirs=expected_dirs)
      fingerprinted_realdir = os.path.realpath(os.path.join(target_dir_in_pantsd, 'current'))
      self.assertEqual(
        fingerprinted_realdir,
        remaining_cache_dir_fingerprinted.rstrip('/'))

      max_entries_per_target = 2
      self.assert_success(self.run_pants_with_workdir([
        'compile',
        'export-classpath',
        'testprojects/src/java/org/pantsbuild/testproject/unicode/main',
        '--workdir-max-build-entries={}'.format(max_entries_per_target)
      ], workdir))

      # stable (same as before), current, and 2 newest dirs
      self.assertEqual(os.path.dirname(os.path.dirname(os.path.realpath(classpath))), target_dir_in_pantsd)
      newest_expected_dirs = expected_dirs - old_cache_entries
      other_cache_dir_fingerprinted = self.get_cache_subdir(target_dir_in_pantsd, other_dirs=newest_expected_dirs)
      self.assertEqual(other_cache_dir_fingerprinted, remaining_cache_dir_fingerprinted)
      self.assertEqual(
        os.path.realpath(os.path.join(target_dir_in_pantsd, 'current')),
        fingerprinted_realdir)

      self.assert_success(self.run_pants_with_workdir([
        'compile',
        'export-classpath',
        'testprojects/src/java/org/pantsbuild/testproject/unicode/main',
        '--compile-zinc-debug-symbols',
        '--workdir-max-build-entries={}'.format(max_entries_per_target)
      ], workdir))

      # stable, current, and 2 newest dirs
      self.assertEqual(os.path.dirname(os.path.dirname(os.path.realpath(classpath))), target_dir_in_pantsd)
      new_cache_dir_fingerprinted = self.get_cache_subdir(target_dir_in_pantsd, other_dirs=newest_expected_dirs)
      # subsequent run with --compile-zinc-debug-symbols will invalidate previous build thus triggering the clean up.
      self.assertNotEqual(new_cache_dir_fingerprinted, remaining_cache_dir_fingerprinted)
      new_fingerprinted_realdir = os.path.realpath(os.path.join(target_dir_in_pantsd, 'current'))
      self.assertEqual(new_fingerprinted_realdir,
                       new_cache_dir_fingerprinted.rstrip('/'))
コード例 #42
0
    def _generate_ivy_report(self, result):
        def make_empty_report(report, organisation, module, conf):
            no_deps_xml_template = dedent("""
                <?xml version="1.0" encoding="UTF-8"?>
                <?xml-stylesheet type="text/xsl" href="ivy-report.xsl"?>
                <ivy-report version="1.0">
                  <info
                    organisation="{organisation}"
                    module="{module}"
                    revision="latest.integration"
                    conf="{conf}"
                    confs="{conf}"
                    date="{timestamp}"/>
                </ivy-report>
                """).format(
                organisation=organisation,
                module=module,
                conf=conf,
                timestamp=time.strftime("%Y%m%d%H%M%S"),
            )
            with open(report, "w") as report_handle:
                print(no_deps_xml_template, file=report_handle)

        tool_classpath = self.tool_classpath("xalan")

        report = None
        org = IvyUtils.INTERNAL_ORG_NAME
        name = result.resolve_hash_name
        xsl = os.path.join(self.ivy_resolution_cache_dir, "ivy-report.xsl")

        # Xalan needs this dir to exist - ensure that, but do no more - we have no clue where this
        # points.
        safe_mkdir(self._outdir, clean=False)

        for conf in self.get_options().confs:
            xml_path = result.report_for_conf(conf)
            if not os.path.exists(xml_path):
                # Make it clear that this is not the original report from Ivy by changing its name.
                xml_path = xml_path[:-4] + "-empty.xml"
                make_empty_report(xml_path, org, name, conf)
            out = os.path.join(self._outdir, f"{org}-{name}-{conf}.html")
            args = ["-IN", xml_path, "-XSL", xsl, "-OUT", out]

            # The ivy-report.xsl generates tab links to files with extension 'xml' by default, we
            # override that to point to the html files we generate.
            args.extend(["-param", "extension", "html"])

            if 0 != self.runjava(
                    classpath=tool_classpath,
                    main="org.apache.xalan.xslt.Process",
                    args=args,
                    workunit_name="report",
            ):
                raise self.Error(
                    "Failed to create html report from xml ivy report.")

            # The ivy-report.xsl is already smart enough to generate an html page with tab links to all
            # confs for a given report coordinate (org, name).  We need only display 1 of the generated
            # htmls and the user can then navigate to the others via the tab links.
            if report is None:
                report = out

        css = os.path.join(self._outdir, "ivy-report.css")
        if os.path.exists(css):
            os.unlink(css)
        shutil.copy(
            os.path.join(self.ivy_resolution_cache_dir, "ivy-report.css"),
            self._outdir)

        if self._open and report:
            try:
                desktop.ui_open(report)
            except desktop.OpenError as e:
                raise TaskError(e)
コード例 #43
0
 def setUp(self):
     super().setUp()
     safe_mkdir(self.push_db_basedir, clean=True)
コード例 #44
0
ファイル: util.py プロジェクト: nadeemnazeer/pants
 def create_symlink_to_clean_workdir():
     # Executed when no link exists. We treat this as equivalent to a request to have deleted
     # this state. Operations like `clean-all` will already have purged the destination, but in
     # cases like manual removal of the symlink, we want to treat the case as equivalent.
     safe_mkdir(workdir_dst, clean=True)
     absolute_symlink(workdir_dst, workdir_src)
コード例 #45
0
 def _setup_interpreter(self, interpreter):
   interpreter_dir = os.path.join(self._path, str(interpreter.identity))
   safe_mkdir(interpreter_dir)
   _safe_link(interpreter.binary, os.path.join(interpreter_dir, 'python'))
   return _resolve(self._config, interpreter, logger=self._logger)
コード例 #46
0
    def test_workdir_stale_builds_cleanup(self):
        """Ensure that current and previous build result_dirs and the newest `--workdir-max-build-entries` number of dirs
    will be kept, and the rest will be purged.
    """

        with temporary_dir() as tmp_dir:
            workdir = os.path.join(tmp_dir, '.pants.d')
            pants_run = self.run_pants_with_workdir([
                'compile',
                'export-classpath',
                'testprojects/src/java/org/pantsbuild/testproject/unicode/main',
            ], workdir)
            self.assert_success(pants_run)

            # Use the static exported classpath symlink to access the artifact in workdir
            # in order to avoid computing hashed task version used in workdir.
            classpath = 'dist/export-classpath/testprojects.src.java.org.pantsbuild.testproject.unicode.main.main-0.jar'

            # <workdir>/compile/zinc/d4600a981d5d/testprojects.src.java.org.pantsbuild.testproject.unicode.main.main/1a317a2504f6/z.jar'
            jar_path_in_pantsd = os.path.realpath(classpath)

            # <workdir>/compile/zinc/d4600a981d5d/testprojects.src.java.org.pantsbuild.testproject.unicode.main.main/
            target_dir_in_pantsd = os.path.dirname(
                os.path.dirname(jar_path_in_pantsd))

            safe_mkdir(
                os.path.join(target_dir_in_pantsd, 'old_cache_test1_dir'))
            safe_mkdir(
                os.path.join(target_dir_in_pantsd, 'old_cache_test2_dir'))
            safe_mkdir(
                os.path.join(target_dir_in_pantsd, 'old_cache_test3_dir'))
            time.sleep(1.1)
            safe_mkdir(
                os.path.join(target_dir_in_pantsd, 'old_cache_test4_dir'))
            safe_mkdir(
                os.path.join(target_dir_in_pantsd, 'old_cache_test5_dir'))

            # stable symlink, current version directory, and synthetically created directories.
            self.assertTrue(
                os.path.exists(os.path.join(target_dir_in_pantsd, 'current')))
            self.assertEqual(len(os.listdir(target_dir_in_pantsd)), 7)

            max_entries_per_target = 2
            # 2nd run with --compile-zinc-debug-symbols will invalidate previous build thus triggering the clean up.
            pants_run_2 = self.run_pants_with_workdir([
                'compile', 'export-classpath',
                'testprojects/src/java/org/pantsbuild/testproject/unicode/main',
                '--compile-zinc-debug-symbols',
                '--workdir-max-build-entries={}'.format(max_entries_per_target)
            ], workdir)
            self.assert_success(pants_run_2)
            # stable, current, previous builds stay, and 2 newest dirs
            self.assertEqual(len(os.listdir(target_dir_in_pantsd)), 5)
            self.assertTrue(
                os.path.exists(os.path.join(target_dir_in_pantsd, 'current')))
            self.assertTrue(
                os.path.exists(
                    os.path.join(target_dir_in_pantsd, 'old_cache_test4_dir')))
            self.assertTrue(
                os.path.exists(
                    os.path.join(target_dir_in_pantsd, 'old_cache_test5_dir')))

            self.assertFalse(
                os.path.exists(
                    os.path.join(target_dir_in_pantsd, 'old_cache_test1_dir')))
            self.assertFalse(
                os.path.exists(
                    os.path.join(target_dir_in_pantsd, 'old_cache_test2_dir')))
            self.assertFalse(
                os.path.exists(
                    os.path.join(target_dir_in_pantsd, 'old_cache_test3_dir')))
コード例 #47
0
 def _maybe_init_metadata_dir(self):
   safe_mkdir(self.get_metadata_dir())
コード例 #48
0
    def bundle(self, app):
        """Create a self-contained application bundle.

    The bundle will contain the target classes, dependencies and resources.
    """
        assert (isinstance(app, BundleCreate.App))

        def verbose_symlink(src, dst):
            try:
                os.symlink(src, dst)
            except OSError as e:
                self.context.log.error(
                    "Unable to create symlink: {0} -> {1}".format(src, dst))
                raise e

        bundle_dir = os.path.join(self._outdir, '%s-bundle' % app.basename)
        self.context.log.info('creating %s' %
                              os.path.relpath(bundle_dir, get_buildroot()))

        safe_mkdir(bundle_dir, clean=True)

        classpath = OrderedSet()
        # If creating a deployjar, we add the external dependencies to the bundle as
        # loose classes, and have no classpath. Otherwise we add the external dependencies
        # to the bundle as jars in a libs directory.
        if not self._create_deployjar:
            lib_dir = os.path.join(bundle_dir, 'libs')
            os.mkdir(lib_dir)

            jarmap = self.context.products.get('jars')

            def add_jars(target):
                generated = jarmap.get(target)
                if generated:
                    for base_dir, internal_jars in generated.items():
                        for internal_jar in internal_jars:
                            verbose_symlink(
                                os.path.join(base_dir, internal_jar),
                                os.path.join(lib_dir, internal_jar))
                            classpath.add(internal_jar)

            app.binary.walk(add_jars, lambda t: t != app.binary)

            # Add external dependencies to the bundle.
            for basedir, external_jar in self.list_external_jar_dependencies(
                    app.binary):
                path = os.path.join(basedir, external_jar)
                verbose_symlink(path, os.path.join(lib_dir, external_jar))
                classpath.add(external_jar)

        bundle_jar = os.path.join(bundle_dir, '%s.jar' % app.binary.basename)

        with self.monolithic_jar(
                app.binary, bundle_jar,
                with_external_deps=self._create_deployjar) as jar:
            self.add_main_manifest_entry(jar, app.binary)
            if classpath:
                jar.classpath([os.path.join('libs', jar) for jar in classpath])

        for bundle in app.bundles:
            for path, relpath in bundle.filemap.items():
                bundle_path = os.path.join(bundle_dir, relpath)
                safe_mkdir(os.path.dirname(bundle_path))
                verbose_symlink(path, bundle_path)

        return bundle_dir
コード例 #49
0
ファイル: rsc_compile.py プロジェクト: wiwa/pants
 def ensure_output_dirs_exist(self):
     safe_mkdir(os.path.dirname(self.rsc_jar_file.path))
コード例 #50
0
 def ensure_output_dirs_exist(self):
   safe_mkdir(os.path.dirname(self.rsc_mjar_file))
   safe_mkdir(self.rsc_index_dir)
コード例 #51
0
 def clobber_symlink(vt):
     # Munge the state to mimic a common error found before we added the clean- it accidentally clobbers the symlink!
     # Commonly caused by safe_mkdir(vt.results_dir, clean=True), broken up here to keep the tests from being brittle.
     safe_rmtree(vt.results_dir)
     safe_mkdir(vt.results_dir)
コード例 #52
0
 def open(self):
     """Implementation of Reporter callback."""
     safe_mkdir(os.path.dirname(self._html_dir))
     self._report_file = open(self.report_path(), 'w')
コード例 #53
0
ファイル: coursier_resolve.py プロジェクト: zvikihouzz/pants
    def _map_coord_to_resolved_jars(cls, result, coursier_cache_path,
                                    pants_jar_path_base):
        """
    Map resolved files to each org:name:version

    Example:
    {
      "conflict_resolution": {},
      "dependencies": [
        {
          "coord": "a",
          "dependencies": ["b", "c"],
          "file": "a.jar"
        },
        {
          "coord": "b",
          "dependencies": [],
          "file": "b.jar"
        },
        {
          "coord": "c",
          "dependencies": [],
          "file": "c.jar"
        },
        {
          "coord": "a:sources",
          "dependencies": ["b", "c"],
          "file": "a-sources.jar"
        },
      ]
    }

    Should return:
    {
      M2Coordinate("a", ...):                             ResolvedJar(classifier='', path/cache_path="a.jar"),
      M2Coordinate("a", ..., classifier="sources"):       ResolvedJar(classifier='sources', path/cache_path="a-sources.jar"),
      M2Coordinate("b", ...):                             ResolvedJar(classifier='', path/cache_path="b.jar"),
      M2Coordinate("c", ...):                             ResolvedJar(classifier='', path/cache_path="c.jar"),
    }

    :param result: coursier json output
    :param coursier_cache_path: coursier cache location
    :param pants_jar_path_base: location under pants workdir to store the hardlink to the coursier cache
    :return: a map from maven coordinate to a resolved jar.
    """

        coord_to_resolved_jars = dict()

        for dep in result['dependencies']:
            coord = dep['coord']
            jar_path = dep.get('file', None)
            if not jar_path:
                # NB: Not all coordinates will have associated files.
                #     This is fine. Some coordinates will just have dependencies.
                continue

            if not os.path.exists(jar_path):
                raise CoursierResultNotFound(
                    "Jar path not found: {}".format(jar_path))

            pants_path = cls._get_path_to_jar(coursier_cache_path,
                                              pants_jar_path_base, jar_path)

            if not os.path.exists(pants_path):
                safe_mkdir(os.path.dirname(pants_path))
                safe_hardlink_or_copy(jar_path, pants_path)

            coord = cls.to_m2_coord(coord)
            resolved_jar = ResolvedJar(coord,
                                       cache_path=jar_path,
                                       pants_path=pants_path)
            coord_to_resolved_jars[coord] = resolved_jar
        return coord_to_resolved_jars
コード例 #54
0
ファイル: junit_run.py プロジェクト: sikopet/pants
  def report(self, targets, tests, junit_classpath):
    # Link files in the real source tree to files named using the classname.
    # Do not include class file names containing '$', as these will always have
    # a corresponding $-less class file, and they all point back to the same
    # source.
    # Put all these links to sources under self._coverage_dir/src
    all_classes = set()
    for basedir, classes in self._rootdirs.items():
      all_classes.update([cls for cls in classes if '$' not in cls])
    sources_by_class = self._build_sources_by_class()
    coverage_source_root_dir = os.path.join(self._coverage_dir, 'src')
    safe_rmtree(coverage_source_root_dir)
    for cls in all_classes:
      source_file = sources_by_class.get(cls)
      if source_file:
        # the class in @cls
        #    (e.g., 'com/pants/example/hello/welcome/WelcomeEverybody.class')
        # was compiled from the file in @source_file
        #    (e.g., 'src/scala/com/pants/example/hello/welcome/Welcome.scala')
        # Note that, in the case of scala files, the path leading up to Welcome.scala does not
        # have to match the path in the corresponding .class file AT ALL. In this example,
        # @source_file could very well have been 'src/hello-kitty/Welcome.scala'.
        # However, cobertura expects the class file path to match the corresponding source
        # file path below the source base directory(ies) (passed as (a) positional argument(s)),
        # while it still gets the source file basename from the .class file.
        # Here we create a fake hierachy under coverage_dir/src to mimic what cobertura expects.

        class_dir = os.path.dirname(cls)   # e.g., 'com/pants/example/hello/welcome'
        fake_source_directory = os.path.join(coverage_source_root_dir, class_dir)
        safe_mkdir(fake_source_directory)
        fake_source_file = os.path.join(fake_source_directory, os.path.basename(source_file))
        try:
          os.symlink(os.path.relpath(source_file, fake_source_directory),
                     fake_source_file)
        except OSError as e:
          # These warnings appear when source files contain multiple classes.
          self._context.log.warn(
            'Could not symlink %s to %s: %s' %
            (source_file, fake_source_file, e))
      else:
        self._context.log.error('class %s does not exist in a source file!' % cls)
    report_formats = []
    if self._coverage_report_xml:
      report_formats.append('xml')
    if self._coverage_report_html:
      report_formats.append('html')
    for report_format in report_formats:
      report_dir = os.path.join(self._coverage_dir, report_format)
      safe_mkdir(report_dir, clean=True)
      args = [
        coverage_source_root_dir,
        '--datafile',
        self._coverage_datafile,
        '--destination',
        report_dir,
        '--format',
        report_format,
        ]
      main = 'net.sourceforge.cobertura.reporting.ReportMain'
      result = execute_java(classpath=self._cobertura_classpath,
                            main=main,
                            args=args,
                            workunit_factory=self._context.new_workunit,
                            workunit_name='cobertura-report-' + report_format)
      if result != 0:
        raise TaskError("java %s ... exited non-zero (%i)"
                        " 'failed to report'" % (main, result))
コード例 #55
0
ファイル: coursier_resolve.py プロジェクト: zvikihouzz/pants
 def _prepare_workdir(self):
     """Prepare the location in our task workdir to store all the hardlinks to coursier cache dir."""
     pants_jar_base_dir = os.path.join(self.versioned_workdir, 'cache')
     safe_mkdir(pants_jar_base_dir)
     return pants_jar_base_dir
コード例 #56
0
 def resolution_lock(self):
     safe_mkdir(self._ivy_resolution_cache_dir)
     with self._lock:
         yield
コード例 #57
0
ファイル: build_invalidator.py プロジェクト: sid-kap/pants
 def __init__(self, root):
     self._root = os.path.join(root, GLOBAL_CACHE_KEY_GEN_VERSION)
     safe_mkdir(self._root)
コード例 #58
0
ファイル: coursier_resolve.py プロジェクト: zvikihouzz/pants
    def _get_result_from_coursier(self, jars_to_resolve, global_excludes,
                                  pinned_coords, coursier_cache_path, sources,
                                  javadoc, executor):
        """
    Calling coursier and return the result per invocation.

    If coursier was called once for classifier '' and once for classifier 'tests', then the return value
    would be: {'default': [<first coursier output>, <second coursier output>]}

    :param jars_to_resolve: List of `JarDependency`s to resolve
    :param global_excludes: List of `M2Coordinate`s to exclude globally
    :param pinned_coords: List of `M2Coordinate`s that need to be pinned.
    :param coursier_cache_path: path to where coursier cache is stored.
    :param executor: An instance of `pants.java.executor.Executor`

    :return: The aggregation of results by conf from coursier. Each coursier call could return
    the following:
        {
          "conflict_resolution": {
            "org:name:version" (requested): "org:name:version" (reconciled)
          },
          "dependencies": [
            {
              "coord": "orgA:nameA:versionA",
              "file": <path>,
              "dependencies": [ // coodinates for its transitive dependencies
                <orgX:nameX:versionX>,
                <orgY:nameY:versionY>,
              ]
            },
            {
              "coord": "orgB:nameB:jar:classifier:versionB",
              "file": <path>,
              "dependencies": [ // coodinates for its transitive dependencies
                <orgX:nameX:versionX>,
                <orgZ:nameZ:versionZ>,
              ]
            },
            ... // more about orgX:nameX:versionX, orgY:nameY:versionY, orgZ:nameZ:versionZ
          ]
        }
    Hence the aggregation of the results will be in the following format, for example when default classifier
    and sources are fetched:
    {
      'default': [<result from coursier call with default conf with classifier X>,
                  <result from coursier call with default conf with classifier Y>],
      'src_doc': [<result from coursier call with --sources and/or --javadoc>],
    }
    """
        # Prepare coursier args
        coursier_subsystem_instance = CoursierSubsystem.global_instance()
        coursier_jar = coursier_subsystem_instance.bootstrap_coursier(
            self.context.new_workunit)

        repos = coursier_subsystem_instance.get_options().repos
        # make [repoX, repoY] -> ['-r', repoX, '-r', repoY]
        repo_args = list(
            itertools.chain(*list(zip(['-r'] * len(repos), repos))))
        artifact_types_arg = [
            '-A',
            ','.join(coursier_subsystem_instance.get_options().artifact_types)
        ]
        advanced_options = coursier_subsystem_instance.get_options(
        ).fetch_options
        common_args = [
            'fetch',
            # Print the resolution tree
            '-t',
            '--cache',
            coursier_cache_path
        ] + repo_args + artifact_types_arg + advanced_options

        coursier_work_temp_dir = os.path.join(self.versioned_workdir, 'tmp')
        safe_mkdir(coursier_work_temp_dir)

        results_by_conf = self._get_default_conf_results(
            common_args, coursier_jar, global_excludes, jars_to_resolve,
            coursier_work_temp_dir, pinned_coords, executor)
        if sources or javadoc:
            non_default_conf_results = self._get_non_default_conf_results(
                common_args, coursier_jar, global_excludes, jars_to_resolve,
                coursier_work_temp_dir, pinned_coords, sources, javadoc,
                executor)
            results_by_conf.update(non_default_conf_results)

        return results_by_conf
コード例 #59
0
ファイル: logging.py プロジェクト: rkstap/pants
def setup_logging(level, console_stream=None, log_dir=None, scope=None, log_name=None):
  """Configures logging for a given scope, by default the global scope.

  :param str level: The logging level to enable, must be one of the level names listed here:
                    https://docs.python.org/2/library/logging.html#levels
  :param file console_stream: The stream to use for default (console) logging. If None (default),
                              this will disable console logging.
  :param str log_dir: An optional directory to emit logs files in.  If unspecified, no disk logging
                      will occur.  If supplied, the directory will be created if it does not already
                      exist and all logs will be tee'd to a rolling set of log files in that
                      directory.
  :param str scope: A logging scope to configure.  The scopes are hierarchichal logger names, with
                    The '.' separator providing the scope hierarchy.  By default the root logger is
                    configured.
  :param str log_name: The base name of the log file (defaults to 'pants.log').
  :returns: The full path to the main log file if file logging is configured or else `None`.
  :rtype: str
  """

  # TODO(John Sirois): Consider moving to straight python logging.  The divide between the
  # context/work-unit logging and standard python logging doesn't buy us anything.

  # TODO(John Sirois): Support logging.config.fileConfig so a site can setup fine-grained
  # logging control and we don't need to be the middleman plumbing an option for each python
  # standard logging knob.

  log_filename = None
  file_handler = None

  # A custom log handler for sub-debug trace logging.
  def trace(self, message, *args, **kwargs):
    if self.isEnabledFor(TRACE):
      self._log(TRACE, message, args, **kwargs)

  logging.Logger.trace = trace

  logger = logging.getLogger(scope)
  for handler in logger.handlers:
    logger.removeHandler(handler)

  if console_stream:
    console_handler = StreamHandler(stream=console_stream)
    console_handler.setFormatter(Formatter(fmt='%(levelname)s] %(message)s'))
    console_handler.setLevel(level)
    logger.addHandler(console_handler)

  if log_dir:
    safe_mkdir(log_dir)
    log_filename = os.path.join(log_dir, log_name or 'pants.log')
    file_handler = FileHandler(log_filename)

    class GlogFormatter(Formatter):
      LEVEL_MAP = {
        logging.FATAL: 'F',
        logging.ERROR: 'E',
        logging.WARN: 'W',
        logging.INFO: 'I',
        logging.DEBUG: 'D',
        TRACE: 'T'
      }

      def format(self, record):
        datetime = time.strftime('%m%d %H:%M:%S', time.localtime(record.created))
        micros = int((record.created - int(record.created)) * 1e6)
        return '{levelchar}{datetime}.{micros:06d} {process} {filename}:{lineno}] {msg}'.format(
          levelchar=self.LEVEL_MAP[record.levelno],
          datetime=datetime,
          micros=micros,
          process=record.process,
          filename=record.filename,
          lineno=record.lineno,
          msg=record.getMessage()
        )

    file_handler.setFormatter(GlogFormatter())
    file_handler.setLevel(level)
    logger.addHandler(file_handler)

  logger.setLevel(level)

  # This routes warnings through our loggers instead of straight to raw stderr.
  logging.captureWarnings(True)

  _maybe_configure_extended_logging(logger)

  return LoggingSetupResult(log_filename, file_handler)
コード例 #60
0
ファイル: build_invalidator.py プロジェクト: sid-kap/pants
 def force_invalidate_all(self):
     """Force-invalidates all cached items."""
     safe_mkdir(self._root, clean=True)