Esempio n. 1
0
 def __init__(self, *args, **kwargs):
   super(JvmRun, self).__init__(*args, **kwargs)
   self.jvm_args = self.context.config.getlist('jvm-run', 'jvm_args', default=[])
   if self.context.options.run_jvmargs:
     for arg in self.context.options.run_jvmargs:
       self.jvm_args.extend(safe_shlex_split(arg))
   self.args = []
   if self.context.options.run_args:
     for arg in self.context.options.run_args:
       self.args.extend(safe_shlex_split(arg))
   if self.context.options.run_debug:
     self.jvm_args.extend(JvmDebugConfig.debug_args(self.context.config))
   self.confs = self.context.config.getlist('jvm-run', 'confs', default=['default'])
   self.only_write_cmd_line = self.context.options.only_write_cmd_line
Esempio n. 2
0
  def __init__(self, *args, **kwargs):
    super(JvmTask, self).__init__(*args, **kwargs)

    self.jvm_options = []
    for jvm_option in self.get_options().jvm_options:
      self.jvm_options.extend(safe_shlex_split(jvm_option))

    if self.get_options().debug:
      self.jvm_options.extend(JvmDebugConfig.debug_args(self.context.config))

    self.args = []
    for arg in self.get_options().args:
      self.args.extend(safe_shlex_split(arg))

    self.confs = self.get_options().confs
Esempio n. 3
0
  def execute(self):
    def is_python_test(target):
      # Note that we ignore PythonTestSuite, because we'll see the PythonTests targets
      # it depends on anyway,so if we don't we'll end up running the tests twice.
      # TODO(benjy): Once we're off the 'build' command we can get rid of python_test_suite,
      # or make it an alias of dependencies().
      return isinstance(target, PythonTests)

    test_targets = list(filter(is_python_test, self.context.targets()))
    if test_targets:
      self.context.release_lock()

      debug = self.get_options().level == 'debug'

      args = [] if self.get_options().no_colors else ['--color', 'yes']
      for options in self.get_options().options + self.get_passthru_args():
        args.extend(safe_shlex_split(options))
      test_builder = PythonTestBuilder(context=self.context,
                                       targets=test_targets,
                                       args=args,
                                       interpreter=self.interpreter,
                                       fast=self.get_options().fast,
                                       debug=debug)
      with self.context.new_workunit(name='run',
                                     labels=[WorkUnit.TOOL, WorkUnit.TEST]) as workunit:
        # pytest uses py.io.terminalwriter for output. That class detects the terminal
        # width and attempts to use all of it. However we capture and indent the console
        # output, leading to weird-looking line wraps. So we trick the detection code
        # into thinking the terminal window is narrower than it is.
        cols = os.environ.get('COLUMNS', 80)
        with environment_as(COLUMNS=str(int(cols) - 30)):
          stdout = workunit.output('stdout') if workunit else None
          stderr = workunit.output('stderr') if workunit else None
          if test_builder.run(stdout=stdout, stderr=stderr):
            raise TaskError()
Esempio n. 4
0
  def errorprone(self, target):
    runtime_classpaths = self.context.products.get_data('runtime_classpath')
    runtime_classpath = [jar for conf, jar in runtime_classpaths.get_for_targets(target.closure(bfs=True))]

    output_dir = os.path.join(self.workdir, target.id)
    safe_mkdir(output_dir)
    runtime_classpath.append(output_dir)

    args = [
      '-classpath', ':'.join(runtime_classpath),
      '-d', output_dir,
    ]

    for opt in self.get_options().command_line_options:
      args.extend(safe_shlex_split(opt))

    args.extend(self.calculate_sources(target))

    result = self.runjava(classpath=self.tool_classpath('errorprone'),
                          main=self._ERRORPRONE_MAIN,
                          jvm_options=self.get_options().jvm_options,
                          args=args,
                          workunit_name='errorprone',
                          workunit_labels=[WorkUnitLabel.LINT])

    self.context.log.debug('java {main} ... exited with result ({result})'.format(
                           main=self._ERRORPRONE_MAIN, result=result))

    return result
Esempio n. 5
0
  def __init__(self, *args, **kwargs):
    super(JavaCompile, self).__init__(*args, **kwargs)
    self.set_distribution(jdk=True)

    self._buildroot = get_buildroot()

    self._depfile = os.path.join(self._analysis_dir, 'global_depfile')

    self._jmake_bootstrap_key = 'jmake'
    external_tools = self.context.config.getlist('java-compile',
                                                 'jmake-bootstrap-tools',
                                                 default=['//:jmake'])
    self.register_jvm_tool(self._jmake_bootstrap_key, external_tools)

    self._compiler_bootstrap_key = 'java-compiler'
    compiler_bootstrap_tools = self.context.config.getlist('java-compile',
                                                           'compiler-bootstrap-tools',
                                                           default=['//:java-compiler'])
    self.register_jvm_tool(self._compiler_bootstrap_key, compiler_bootstrap_tools)

    self.configure_args(args_defaults=_JAVA_COMPILE_ARGS_DEFAULT,
                        warning_defaults=_JAVA_COMPILE_WARNING_ARGS_DEFAULT,
                        no_warning_defaults=_JAVA_COMPILE_WARNING_ARGS_DEFAULT)

    self._javac_opts = []
    if self.get_options().args:
      for arg in self.get_options().args:
        self._javac_opts.extend(safe_shlex_split(arg))
    else:
      self._javac_opts.extend(self.context.config.getlist('java-compile',
                                                          'javac_args', default=[]))
Esempio n. 6
0
  def _do_run_tests(self, targets, workunit):
    if not targets:
      return PythonTestResult.rc(0)

    sources = list(itertools.chain(*[t.sources_relative_to_buildroot() for t in targets]))
    if not sources:
      return PythonTestResult.rc(0)

    with self._test_runner(targets, workunit) as (pex, test_args):

      def run_and_analyze(resultlog_path):
        result = self._do_run_tests_with_args(pex, workunit, args)
        failed_targets = self._get_failed_targets_from_resultlogs(resultlog_path, targets)
        return result.with_failed_targets(failed_targets)

      args = []
      if self._debug:
        args.extend(['-s'])
      if self.get_options().colors:
        args.extend(['--color', 'yes'])
      for options in self.get_options().options + self.get_passthru_args():
        args.extend(safe_shlex_split(options))
      args.extend(test_args)
      args.extend(sources)

      # The user might have already specified the resultlog option. In such case, reuse it.
      resultlogs = [arg.split('=', 1)[-1] for arg in args if arg.startswith('--resultlog=')]

      if resultlogs:
        return run_and_analyze(resultlogs[-1])
      else:
        with temporary_file_path() as resultlog_path:
          args.append('--resultlog={0}'.format(resultlog_path))
          return run_and_analyze(resultlog_path)
Esempio n. 7
0
  def __init__(self, task_exports, context):
    super(_Coverage, self).__init__(task_exports, context)
    options = task_exports.task_options
    self._coverage = options.coverage
    self._coverage_filters = options.coverage_patterns or []

    self._coverage_jvm_options = []
    for jvm_option in options.coverage_jvm_options:
      self._coverage_jvm_options.extend(safe_shlex_split(jvm_option))

    self._coverage_dir = os.path.join(task_exports.workdir, 'coverage')
    self._coverage_instrument_dir = os.path.join(self._coverage_dir, 'classes')
    # TODO(ji): These may need to be transferred down to the Emma class, as the suffixes
    # may be emma-specific. Resolve when we also provide cobertura support.
    self._coverage_metadata_file = os.path.join(self._coverage_dir, 'coverage.em')
    self._coverage_file = os.path.join(self._coverage_dir, 'coverage.ec')
    self._coverage_report_console = options.coverage_console
    self._coverage_console_file = os.path.join(self._coverage_dir, 'coverage.txt')

    self._coverage_report_xml = options.coverage_xml
    self._coverage_xml_file = os.path.join(self._coverage_dir, 'coverage.xml')

    self._coverage_report_html_open = options.coverage_html_open
    self._coverage_report_html = self._coverage_report_html_open or options.coverage_html
    self._coverage_html_file = os.path.join(self._coverage_dir, 'html', 'index.html')
    self._coverage_force = options.coverage_force
Esempio n. 8
0
 def execute(self):
     binary = self.require_single_root_target()
     if isinstance(binary, PythonBinary):
         # We can't throw if binary isn't a PythonBinary, because perhaps we were called on a
         # jvm_binary, in which case we have to no-op and let jvm_run do its thing.
         # TODO(benjy): Some more elegant way to coordinate how tasks claim targets.
         interpreter = self.select_interpreter_for_targets(binary.closure())
         with self.cached_chroot(
             interpreter=interpreter, pex_info=binary.pexinfo, targets=[binary], platforms=binary.platforms
         ) as chroot:
             pex = chroot.pex()
             self.context.release_lock()
             with self.context.new_workunit(name="run", labels=[WorkUnitLabel.RUN]):
                 args = []
                 for arg in self.get_options().args:
                     args.extend(safe_shlex_split(arg))
                 args += self.get_passthru_args()
                 po = pex.run(blocking=False, args=args)
                 try:
                     result = po.wait()
                     if result != 0:
                         msg = "{interpreter} {entry_point} {args} ... exited non-zero ({code})".format(
                             interpreter=interpreter.binary,
                             entry_point=binary.entry_point,
                             args=" ".join(args),
                             code=result,
                         )
                         raise TaskError(msg, exit_code=result)
                 except KeyboardInterrupt:
                     po.send_signal(signal.SIGINT)
                     raise
Esempio n. 9
0
  def execute(self):
    binary = self.require_single_root_target()
    if isinstance(binary, PythonBinary):
      # We can't throw if binary isn't a PythonBinary, because perhaps we were called on a
      # jvm_binary, in which case we have to no-op and let jvm_run do its thing.
      # TODO(benjy): Use MutexTask to coordinate this.

      pex = self.create_pex(binary.pexinfo)
      args = []
      for arg in self.get_options().args:
        args.extend(safe_shlex_split(arg))
      args += self.get_passthru_args()

      self.context.release_lock()
      with self.context.new_workunit(name='run',
                                     cmd=pex.cmdline(args),
                                     labels=[WorkUnitLabel.TOOL, WorkUnitLabel.RUN]):
        po = pex.run(blocking=False, args=args, env=os.environ.copy())
        try:
          result = po.wait()
          if result != 0:
            msg = '{interpreter} {entry_point} {args} ... exited non-zero ({code})'.format(
                interpreter=pex.interpreter.binary,
                entry_point=binary.entry_point,
                args=' '.join(args),
                code=result)
            raise TaskError(msg, exit_code=result)
        except KeyboardInterrupt:
          po.send_signal(signal.SIGINT)
          raise
Esempio n. 10
0
 def __init__(self, *args, **kwargs):
   super(ScalaRepl, self).__init__(*args, **kwargs)
   self.jvm_args = self.context.config.getlist('scala-repl', 'jvm_args', default=[])
   if self.context.options.run_jvmargs:
     for arg in self.context.options.run_jvmargs:
       self.jvm_args.extend(safe_shlex_split(arg))
   self.confs = self.context.config.getlist('scala-repl', 'confs', default=['default'])
   self._bootstrap_key = 'scala-repl'
   bootstrap_tools = self.context.config.getlist('scala-repl', 'bootstrap-tools',
                                                 default=['//:scala-repl-2.9.3'])
   self.register_jvm_tool(self._bootstrap_key, bootstrap_tools)
   self.main = self.context.config.get('scala-repl', 'main')
   self.args = self.context.config.getlist('scala-repl', 'args', default=[])
   if self.context.options.run_args:
     for arg in self.context.options.run_args:
       self.args.extend(safe_shlex_split(arg))
Esempio n. 11
0
  def __init__(self, *args, **kwargs):
    super(JvmTask, self).__init__(*args, **kwargs)

    self.jvm_options = []
    for jvm_option in self.get_options().jvm_options:
      self.jvm_options.extend(safe_shlex_split(jvm_option))

    if self.get_options().debug:
      debug_port = self.get_options().debug_port
      self.jvm_options.extend(
        arg.format(debug_port=debug_port) for arg in self.get_options().debug_args)

    self.args = []
    for arg in self.get_options().args:
      self.args.extend(safe_shlex_split(arg))

    self.confs = self.get_options().confs
Esempio n. 12
0
  def errorprone(self, target):
    runtime_classpaths = self.context.products.get_data('runtime_classpath')
    runtime_classpath = [jar for conf, jar in runtime_classpaths.get_for_targets(target.closure(bfs=True))]

    output_dir = os.path.join(self.workdir, target.id)
    safe_mkdir(output_dir)
    runtime_classpath.append(output_dir)

    # Try to run errorprone with the same java version as the target
    # The minimum JDK for errorprone is JDK 1.8
    min_jdk_version = max(target.platform.target_level, Revision.lenient('1.8'))
    if min_jdk_version.components[0] == 1:
      max_jdk_version = Revision(min_jdk_version.components[0], min_jdk_version.components[1], '9999')
    else:
      max_jdk_version = Revision(min_jdk_version.components[0], '9999')
    self.set_distribution(minimum_version=min_jdk_version, maximum_version=max_jdk_version, jdk=True)

    jvm_options = self.get_options().jvm_options[:]
    if self.dist.version < Revision.lenient('9'):
      # For Java 8 we need to add the errorprone javac jar to the bootclasspath to
      # avoid the "java.lang.NoSuchFieldError: ANNOTATION_PROCESSOR_MODULE_PATH" error
      # See https://github.com/google/error-prone/issues/653 for more information
      jvm_options.extend(['-Xbootclasspath/p:{}'.format(self.tool_classpath('errorprone-javac')[0])])

    args = [
      '-d', output_dir,
    ]

    # Errorprone does not recognize source or target 10 yet
    if target.platform.source_level < Revision.lenient('10'):
      args.extend(['-source', str(target.platform.source_level)])

    if target.platform.target_level < Revision.lenient('10'):
      args.extend(['-target', str(target.platform.target_level)])

    errorprone_classpath_file = os.path.join(self.workdir, '{}.classpath'.format(os.path.basename(output_dir)))
    with open(errorprone_classpath_file, 'w') as f:
      f.write('-classpath ')
      f.write(':'.join(runtime_classpath))
    args.append('@{}'.format(errorprone_classpath_file))

    for opt in self.get_options().command_line_options:
      args.extend(safe_shlex_split(opt))

    with argfile.safe_args(self.calculate_sources(target), self.get_options()) as batched_sources:
      args.extend(batched_sources)
      result = self.runjava(classpath=self.tool_classpath('errorprone'),
                            main=self._ERRORPRONE_MAIN,
                            jvm_options=jvm_options,
                            args=args,
                            workunit_name='errorprone',
                            workunit_labels=[WorkUnitLabel.LINT])

      self.context.log.debug('java {main} ... exited with result ({result})'.format(
        main=self._ERRORPRONE_MAIN, result=result))

    return result
Esempio n. 13
0
File: jvm.py Progetto: Gointer/pants
  def get_program_args(self):
    """Get the program args to run this JVM with.

    These are the arguments passed to main() and are program-specific.
    """
    ret = []
    for arg in self.get_options().program_args:
      ret.extend(safe_shlex_split(arg))
    return ret
Esempio n. 14
0
  def _do_run_tests(self, targets):
    if not targets:
      return PythonTestResult.rc(0)

    buildroot = get_buildroot()
    source_chroot = os.path.relpath(
      self.context.products.get_data(GatherSources.PYTHON_SOURCES).path(), buildroot)
    sources_map = {}  # Path from chroot -> Path from buildroot.
    for t in targets:
      for p in t.sources_relative_to_source_root():
        sources_map[os.path.join(source_chroot, p)] = os.path.join(t.target_base, p)

    if not sources_map:
      return PythonTestResult.rc(0)

    with self._test_runner(targets, sources_map) as (pex, test_args):
      # Validate that the user didn't provide any passthru args that conflict
      # with those we must set ourselves.
      for arg in self.get_passthru_args():
        if arg.startswith('--junitxml') or arg.startswith('--confcutdir'):
          raise TaskError('Cannot pass this arg through to pytest: {}'.format(arg))

      junitxml_path = self._get_junit_xml_path(targets)
      # N.B. the `--confcutdir` here instructs pytest to stop scanning for conftest.py files at the
      # top of the buildroot. This prevents conftest.py files from outside (e.g. in users home dirs)
      # from leaking into pants test runs. See: https://github.com/pantsbuild/pants/issues/2726
      args = ['--junitxml', junitxml_path, '--confcutdir', get_buildroot(),
              '--continue-on-collection-errors']
      if self.get_options().fail_fast:
        args.extend(['-x'])
      if self._debug:
        args.extend(['-s'])
      if self.get_options().colors:
        args.extend(['--color', 'yes'])
      for options in self.get_options().options + self.get_passthru_args():
        args.extend(safe_shlex_split(options))
      args.extend(test_args)
      args.extend(sources_map.keys())

      result = self._do_run_tests_with_args(pex, args)
      external_junit_xml_dir = self.get_options().junit_xml_dir
      if external_junit_xml_dir:
        safe_mkdir(external_junit_xml_dir)
        shutil.copy(junitxml_path, external_junit_xml_dir)
      failed_targets = self._get_failed_targets_from_junitxml(junitxml_path, targets)

      def parse_error_handler(parse_error):
        # Simple error handler to pass to xml parsing function.
        raise TaskError('Error parsing xml file at {}: {}'
          .format(parse_error.xml_path, parse_error.cause))

      all_tests_info = self.parse_test_info(junitxml_path, parse_error_handler, ['file', 'name'])
      for test_name, test_info in all_tests_info.items():
        test_target = self._get_target_from_test(test_info, targets)
        self.report_all_info_for_single_test(self.options_scope, test_target, test_name, test_info)

      return result.with_failed_targets(failed_targets)
Esempio n. 15
0
  def execute(self):
    # We drive creation of setup.py distributions from the original target graph, grabbing codegen'd
    # sources when needed. We ignore PythonDistribution targets.
    def is_exported_python_target(t):
      return t.is_original and self.has_provides(t) and not is_local_python_dist(t)

    exported_python_targets = OrderedSet(t for t in self.context.target_roots
                                         if is_exported_python_target(t))
    if not exported_python_targets:
      raise TaskError('setup-py target(s) must provide an artifact.')

    dist_dir = self.get_options().pants_distdir

    # NB: We have to create and then run in 2 steps so that we can discover all exported targets
    # in-play in the creation phase which then allows a tsort of these exported targets in the run
    # phase to ensure an exported target is, for example (--run="sdist upload"), uploaded before any
    # exported target that depends on it is uploaded.

    created = {}

    def create(exported_python_target):
      if exported_python_target not in created:
        self.context.log.info('Creating setup.py project for {}'.format(exported_python_target))
        subject = self.derived_by_original.get(exported_python_target, exported_python_target)
        setup_dir, dependencies = self.create_setup_py(subject, dist_dir)
        created[exported_python_target] = setup_dir
        if self._recursive:
          for dep in dependencies:
            if is_exported_python_target(dep):
              create(dep)

    for exported_python_target in exported_python_targets:
      create(exported_python_target)

    interpreter = self.context.products.get_data(PythonInterpreter)
    python_dists = self.context.products.register_data(self.PYTHON_DISTS_PRODUCT, {})
    for exported_python_target in reversed(sort_targets(list(created.keys()))):
      setup_dir = created.get(exported_python_target)
      if setup_dir:
        if not self._run:
          self.context.log.info('Running packager against {}'.format(setup_dir))
          setup_runner = Packager(setup_dir, interpreter=interpreter)
          tgz_name = os.path.basename(setup_runner.sdist())
          sdist_path = os.path.join(dist_dir, tgz_name)
          self.context.log.info('Writing {}'.format(sdist_path))
          shutil.move(setup_runner.sdist(), sdist_path)
          safe_rmtree(setup_dir)
          python_dists[exported_python_target] = sdist_path
        else:
          self.context.log.info('Running {} against {}'.format(self._run, setup_dir))
          split_command = safe_shlex_split(self._run)
          setup_runner = SetupPyRunner(setup_dir, split_command, interpreter=interpreter)
          installed = setup_runner.run()
          if not installed:
            raise TaskError('Install failed.')
          python_dists[exported_python_target] = setup_dir
Esempio n. 16
0
 def _go_install(self, target, gopath):
   build_flags = re.sub(r'^"|"$', '', self.get_options().build_flags)
   args = safe_shlex_split(build_flags) + [target.import_path]
   result, go_cmd = self.go_dist.execute_go_cmd(
     'install', gopath=gopath, args=args,
     workunit_factory=self.context.new_workunit,
     workunit_name='install {}'.format(target.address.spec),
     workunit_labels=[WorkUnitLabel.COMPILER])
   if result != 0:
     raise TaskError('{} failed with exit code {}'.format(go_cmd, result))
Esempio n. 17
0
  def __init__(self, *args, **kwargs):
    super(IvyResolve, self).__init__(*args, **kwargs)

    self._outdir = self.get_options().outdir or os.path.join(self.workdir, 'reports')
    self._open = self.get_options().open
    self._report = self._open or self.get_options().report

    self._args = []
    for arg in self.get_options().args:
      self._args.extend(safe_shlex_split(arg))
Esempio n. 18
0
  def _do_run_tests(self, targets, workunit):

    def _extract_resultlog_filename(args):
      resultlogs = [arg[arg.find('=') + 1:] for arg in args if arg.startswith('--resultlog=')]
      if resultlogs:
        return resultlogs[0]
      else:
        try:
          return args[args.index('--resultlog') + 1]
        except IndexError:
          self.context.log.error('--resultlog specified without an argument')
          return None
        except ValueError:
          return None

    if not targets:
      return PythonTestResult.rc(0)

    rel_sources = list(itertools.chain(*[t.sources_relative_to_source_root() for t in targets]))
    if not rel_sources:
      return PythonTestResult.rc(0)
    source_root = self.context.products.get_data(GatherSources.PYTHON_SOURCES).path()
    sources = [os.path.join(source_root, p) for p in rel_sources]

    with self._test_runner(targets, workunit) as (pex, test_args):
      def run_and_analyze(resultlog_path):
        result = self._do_run_tests_with_args(pex, workunit, args)
        failed_targets = self._get_failed_targets_from_resultlogs(resultlog_path, targets)
        return result.with_failed_targets(failed_targets)

      # N.B. the `--confcutdir` here instructs pytest to stop scanning for conftest.py files at the
      # top of the buildroot. This prevents conftest.py files from outside (e.g. in users home dirs)
      # from leaking into pants test runs. See: https://github.com/pantsbuild/pants/issues/2726
      args = ['--confcutdir', get_buildroot()]
      if self.get_options().fail_fast:
        args.extend(['-x'])
      if self._debug:
        args.extend(['-s'])
      if self.get_options().colors:
        args.extend(['--color', 'yes'])
      for options in self.get_options().options + self.get_passthru_args():
        args.extend(safe_shlex_split(options))
      args.extend(test_args)
      args.extend(sources)

      # The user might have already specified the resultlog option. In such case, reuse it.
      resultlog_arg = _extract_resultlog_filename(args)

      if resultlog_arg:
        return run_and_analyze(resultlog_arg)
      else:
        with temporary_file_path() as resultlog_path:
          args.insert(0, '--resultlog={0}'.format(resultlog_path))
          return run_and_analyze(resultlog_path)
Esempio n. 19
0
File: base.py Progetto: jduan/pants
  def __init__(self, settings):
    self._settings = settings
    options = settings.options
    self._context = settings.context
    self._coverage = options.coverage

    self._coverage_jvm_options = []
    for jvm_option in options.coverage_jvm_options:
      self._coverage_jvm_options.extend(safe_shlex_split(jvm_option))

    self._coverage_open = options.coverage_open
    self._coverage_force = options.coverage_force
Esempio n. 20
0
  def __init__(self, *args, **kwargs):
    super(IvyResolve, self).__init__(*args, **kwargs)

    self._cachedir = IvySubsystem.global_instance().get_options().cache_dir
    self._classpath_dir = os.path.join(self.workdir, 'mapped')
    self._outdir = self.get_options().outdir or os.path.join(self.workdir, 'reports')
    self._open = self.get_options().open
    self._report = self._open or self.get_options().report
    self._confs = None

    self._args = []
    for arg in self.get_options().args:
      self._args.extend(safe_shlex_split(arg))
Esempio n. 21
0
  def _do_run_tests(self, targets, workunit):

    def _extract_resultlog_filename(args):
      resultlogs = [arg[arg.find('=') + 1:] for arg in args if arg.startswith('--resultlog=')]
      if resultlogs:
        return resultlogs[0]
      else:
        try:
          return args[args.index('--resultlog') + 1]
        except IndexError:
          self.context.log.error('--resultlog specified without an argument')
          return None
        except ValueError:
          return None

    if not targets:
      return PythonTestResult.rc(0)

    sources = list(itertools.chain(*[t.sources_relative_to_buildroot() for t in targets]))
    if not sources:
      return PythonTestResult.rc(0)

    with self._test_runner(targets, workunit) as (pex, test_args):

      def run_and_analyze(resultlog_path):
        result = self._do_run_tests_with_args(pex, workunit, args)
        failed_targets = self._get_failed_targets_from_resultlogs(resultlog_path, targets)
        return result.with_failed_targets(failed_targets)

      # N.B. the `--confcutdir` here instructs pytest to stop scanning for conftest.py files at the
      # top of the buildroot. This prevents conftest.py files from outside (e.g. in users home dirs)
      # from leaking into pants test runs. See: https://github.com/pantsbuild/pants/issues/2726
      args = ['--confcutdir', get_buildroot()]
      if self._debug:
        args.extend(['-s'])
      if self.get_options().colors:
        args.extend(['--color', 'yes'])
      for options in self.get_options().options + self.get_passthru_args():
        args.extend(safe_shlex_split(options))
      args.extend(test_args)
      args.extend(sources)

      # The user might have already specified the resultlog option. In such case, reuse it.
      resultlog_arg = _extract_resultlog_filename(args)

      if resultlog_arg:
        return run_and_analyze(resultlog_arg)
      else:
        with temporary_file_path() as resultlog_path:
          args.insert(0, '--resultlog={0}'.format(resultlog_path))
          return run_and_analyze(resultlog_path)
Esempio n. 22
0
    def execute(self):
        binary = self.require_single_root_target()
        if isinstance(binary, PythonBinary):
            # We can't throw if binary isn't a PythonBinary, because perhaps we were called on a
            # jvm_binary, in which case we have to no-op and let jvm_run do its thing.
            # TODO(benjy): Use MutexTask to coordinate this.
            interpreter = self.context.products.get_data(PythonInterpreter)

            with temporary_dir() as tmpdir:
                # Create a wrapper pex to "merge" the other pexes into via PEX_PATH.
                builder = PEXBuilder(tmpdir,
                                     interpreter,
                                     pex_info=binary.pexinfo)
                builder.freeze()

                pexes = [
                    self.context.products.get_data(
                        ResolveRequirements.REQUIREMENTS_PEX),
                    self.context.products.get_data(
                        GatherSources.PYTHON_SOURCES)
                ]

                # TODO: Expose the path as a property in pex, instead of relying on
                # fishing it out of the cmdline.
                pex_path = os.pathsep.join([pex.cmdline()[1] for pex in pexes])

                pex = PEX(tmpdir, interpreter)

                self.context.release_lock()
                with self.context.new_workunit(name='run',
                                               labels=[WorkUnitLabel.RUN]):
                    args = []
                    for arg in self.get_options().args:
                        args.extend(safe_shlex_split(arg))
                    args += self.get_passthru_args()
                    po = pex.run(blocking=False,
                                 args=args,
                                 env={'PEX_PATH': pex_path})
                    try:
                        result = po.wait()
                        if result != 0:
                            msg = '{interpreter} {entry_point} {args} ... exited non-zero ({code})'.format(
                                interpreter=interpreter.binary,
                                entry_point=binary.entry_point,
                                args=' '.join(args),
                                code=result)
                            raise TaskError(msg, exit_code=result)
                    except KeyboardInterrupt:
                        po.send_signal(signal.SIGINT)
                        raise
Esempio n. 23
0
    def __init__(self, *args, **kwargs):
        super(IvyResolve, self).__init__(*args, **kwargs)

        self._cachedir = IvySubsystem.global_instance().get_options().cache_dir
        self._classpath_dir = os.path.join(self.workdir, 'mapped')
        self._outdir = self.get_options().outdir or os.path.join(
            self.workdir, 'reports')
        self._open = self.get_options().open
        self._report = self._open or self.get_options().report
        self._confs = None

        self._args = []
        for arg in self.get_options().args:
            self._args.extend(safe_shlex_split(arg))
Esempio n. 24
0
File: jvm.py Progetto: pcurry/pants
  def get_jvm_options(self):
    """Return the options to run this JVM with.

    These are options to the JVM itself, such as -Dfoo=bar, -Xmx=1g, -XX:-UseParallelGC and so on.

    Thus named because get_options() already exists (and returns this object's Pants options).
    """
    ret = []
    for opt in self.get_options().options:
      ret.extend(safe_shlex_split(opt))

    if self.get_options().debug:
      debug_port = self.get_options().debug_port
      ret.extend(arg.format(debug_port=debug_port) for arg in self.get_options().debug_args)
    return ret
Esempio n. 25
0
  def _do_run_tests(self, targets, workunit):

    def _extract_resultlog_filename(args):
      resultlogs = [arg[arg.find('=') + 1:] for arg in args if arg.startswith('--resultlog=')]
      if resultlogs:
        return resultlogs[0]
      else:
        try:
          return args[args.index('--resultlog') + 1]
        except IndexError:
          self.context.log.error('--resultlog specified without an argument')
          return None
        except ValueError:
          return None

    if not targets:
      return PythonTestResult.rc(0)

    sources = list(itertools.chain(*[t.sources_relative_to_buildroot() for t in targets]))
    if not sources:
      return PythonTestResult.rc(0)

    with self._test_runner(targets, workunit) as (pex, test_args):

      def run_and_analyze(resultlog_path):
        result = self._do_run_tests_with_args(pex, workunit, args)
        failed_targets = self._get_failed_targets_from_resultlogs(resultlog_path, targets)
        return result.with_failed_targets(failed_targets)

      args = []
      if self._debug:
        args.extend(['-s'])
      if self.get_options().colors:
        args.extend(['--color', 'yes'])
      for options in self.get_options().options + self.get_passthru_args():
        args.extend(safe_shlex_split(options))
      args.extend(test_args)
      args.extend(sources)

      # The user might have already specified the resultlog option. In such case, reuse it.
      resultlog_arg = _extract_resultlog_filename(args)

      if resultlog_arg:
        return run_and_analyze(resultlog_arg)
      else:
        with temporary_file_path() as resultlog_path:
          args.insert(0, '--resultlog={0}'.format(resultlog_path))
          return run_and_analyze(resultlog_path)
Esempio n. 26
0
  def _do_run_tests(self, targets, workunit):

    def _extract_resultlog_filename(args):
      resultlogs = [arg[arg.find('=') + 1:] for arg in args if arg.startswith('--resultlog=')]
      if resultlogs:
        return resultlogs[0]
      else:
        try:
          return args[args.index('--resultlog') + 1]
        except IndexError:
          self.context.log.error('--resultlog specified without an argument')
          return None
        except ValueError:
          return None

    if not targets:
      return PythonTestResult.rc(0)

    sources = list(itertools.chain(*[t.sources_relative_to_buildroot() for t in targets]))
    if not sources:
      return PythonTestResult.rc(0)

    with self._test_runner(targets, workunit) as (pex, test_args):

      def run_and_analyze(resultlog_path):
        result = self._do_run_tests_with_args(pex, workunit, args)
        failed_targets = self._get_failed_targets_from_resultlogs(resultlog_path, targets)
        return result.with_failed_targets(failed_targets)

      args = []
      if self._debug:
        args.extend(['-s'])
      if self.get_options().colors:
        args.extend(['--color', 'yes'])
      for options in self.get_options().options + self.get_passthru_args():
        args.extend(safe_shlex_split(options))
      args.extend(test_args)
      args.extend(sources)

      # The user might have already specified the resultlog option. In such case, reuse it.
      resultlog_arg = _extract_resultlog_filename(args)

      if resultlog_arg:
        return run_and_analyze(resultlog_arg)
      else:
        with temporary_file_path() as resultlog_path:
          args.insert(0, '--resultlog={0}'.format(resultlog_path))
          return run_and_analyze(resultlog_path)
Esempio n. 27
0
  def __init__(self, *args, **kwargs):
    super(IvyResolve, self).__init__(*args, **kwargs)

    self._cachedir = IvySubsystem.global_instance().get_options().cache_dir
    self._classpath_dir = os.path.join(self.workdir, 'mapped')
    self._outdir = self.get_options().outdir or os.path.join(self.workdir, 'reports')
    self._open = self.get_options().open
    self._report = self._open or self.get_options().report
    self._confs = None

    self._args = []
    for arg in self.get_options().args:
      self._args.extend(safe_shlex_split(arg))

    # Typically this should be a local cache only, since classpaths aren't portable.
    self.setup_artifact_cache()
Esempio n. 28
0
  def _do_run_tests(self, targets, workunit):
    if not targets:
      return PythonTestResult.rc(0)

    buildroot = get_buildroot()
    source_chroot = os.path.relpath(
      self.context.products.get_data(GatherSources.PYTHON_SOURCES).path(), buildroot)
    sources_map = {}  # Path from chroot -> Path from buildroot.
    for t in targets:
      for p in t.sources_relative_to_source_root():
        sources_map[os.path.join(source_chroot, p)] = os.path.join(t.target_base, p)

    if not sources_map:
      return PythonTestResult.rc(0)

    with self._test_runner(targets, sources_map, workunit) as (pex, test_args):
      # Validate that the user didn't provide any passthru args that conflict
      # with those we must set ourselves.
      for arg in self.get_passthru_args():
        if arg.startswith('--junitxml') or arg.startswith('--confcutdir'):
          raise TaskError('Cannot pass this arg through to pytest: {}'.format(arg))

      junitxml_path = self._get_junit_xml_path(targets)
      # N.B. the `--confcutdir` here instructs pytest to stop scanning for conftest.py files at the
      # top of the buildroot. This prevents conftest.py files from outside (e.g. in users home dirs)
      # from leaking into pants test runs. See: https://github.com/pantsbuild/pants/issues/2726
      args = ['--junitxml', junitxml_path, '--confcutdir', get_buildroot(),
              '--continue-on-collection-errors']
      if self.get_options().fail_fast:
        args.extend(['-x'])
      if self._debug:
        args.extend(['-s'])
      if self.get_options().colors:
        args.extend(['--color', 'yes'])
      for options in self.get_options().options + self.get_passthru_args():
        args.extend(safe_shlex_split(options))
      args.extend(test_args)
      args.extend(sources_map.keys())

      result = self._do_run_tests_with_args(pex, workunit, args)
      external_junit_xml_dir = self.get_options().junit_xml_dir
      if external_junit_xml_dir:
        safe_mkdir(external_junit_xml_dir)
        shutil.copy(junitxml_path, external_junit_xml_dir)
      failed_targets = self._get_failed_targets_from_junitxml(junitxml_path, targets)
      return result.with_failed_targets(failed_targets)
Esempio n. 29
0
  def get_jvm_options(self):
    """Return the options to run this JVM with.

    These are options to the JVM itself, such as -Dfoo=bar, -Xmx=1g, -XX:-UseParallelGC and so on.

    Thus named because get_options() already exists (and returns this object's Pants options).
    """
    ret = []
    for opt in self.get_options().options:
      ret.extend(safe_shlex_split(opt))

    if (self.get_options().debug or
        self.get_options().is_flagged('debug_port') or
        self.get_options().is_flagged('debug_args')):
      debug_port = self.get_options().debug_port
      ret.extend(arg.format(debug_port=debug_port) for arg in self.get_options().debug_args)
    return ret
Esempio n. 30
0
    def __init__(self, *args, **kwargs):
        super(IvyResolve, self).__init__(*args, **kwargs)

        self._ivy_bootstrapper = Bootstrapper.instance()
        self._cachedir = self._ivy_bootstrapper.ivy_cache_dir
        self._classpath_dir = os.path.join(self.workdir, 'mapped')
        self._outdir = self.get_options().outdir or os.path.join(
            self.workdir, 'reports')
        self._open = self.get_options().open
        self._report = self._open or self.get_options().report
        self._confs = None

        self._args = []
        for arg in self.get_options().args:
            self._args.extend(safe_shlex_split(arg))

        # Typically this should be a local cache only, since classpaths aren't portable.
        self.setup_artifact_cache()
Esempio n. 31
0
    def execute(self):
        binary = self.require_single_root_target()
        if isinstance(binary, PythonBinary):
            # We can't throw if binary isn't a PythonBinary, because perhaps we were called on a
            # jvm_binary, in which case we have to no-op and let jvm_run do its thing.
            # TODO(benjy): Use MutexTask to coordinate this.
            interpreter = self.context.products.get_data(PythonInterpreter)

            with temporary_dir() as tmpdir:
                # Create a wrapper pex to "merge" the other pexes into via PEX_PATH.
                builder = PEXBuilder(tmpdir, interpreter, pex_info=binary.pexinfo)
                builder.freeze()

                pexes = [
                    self.context.products.get_data(ResolveRequirements.REQUIREMENTS_PEX),
                    self.context.products.get_data(GatherSources.PYTHON_SOURCES),
                ]

                # TODO: Expose the path as a property in pex, instead of relying on
                # fishing it out of the cmdline.
                pex_path = os.pathsep.join([pex.cmdline()[1] for pex in pexes])

                pex = PEX(tmpdir, interpreter)

                self.context.release_lock()
                with self.context.new_workunit(name="run", labels=[WorkUnitLabel.RUN]):
                    args = []
                    for arg in self.get_options().args:
                        args.extend(safe_shlex_split(arg))
                    args += self.get_passthru_args()
                    po = pex.run(blocking=False, args=args, env={"PEX_PATH": pex_path})
                    try:
                        result = po.wait()
                        if result != 0:
                            msg = "{interpreter} {entry_point} {args} ... exited non-zero ({code})".format(
                                interpreter=interpreter.binary,
                                entry_point=binary.entry_point,
                                args=" ".join(args),
                                code=result,
                            )
                            raise TaskError(msg, exit_code=result)
                    except KeyboardInterrupt:
                        po.send_signal(signal.SIGINT)
                        raise
Esempio n. 32
0
  def __init__(self, settings, copy2=shutil.copy2, copytree=shutil.copytree, is_file=os.path.isfile,
               safe_md=safe_mkdir):
    self._settings = settings
    options = settings.options
    self._context = settings.context
    self._coverage = options.coverage

    self._coverage_jvm_options = []
    for jvm_option in options.coverage_jvm_options:
      self._coverage_jvm_options.extend(safe_shlex_split(jvm_option))

    self._coverage_open = options.coverage_open
    self._coverage_force = options.coverage_force

    # Injecting these methods to make testing cleaner.
    self._copy2 = copy2
    self._copytree = copytree
    self._is_file = is_file
    self._safe_makedir = safe_md
Esempio n. 33
0
  def __init__(self, settings, copy2=shutil.copy2, copytree=shutil.copytree, is_file=os.path.isfile,
               safe_md=safe_mkdir):
    self._settings = settings
    options = settings.options
    self._context = settings.context
    self._coverage = options.coverage

    self._coverage_jvm_options = []
    for jvm_option in options.coverage_jvm_options:
      self._coverage_jvm_options.extend(safe_shlex_split(jvm_option))

    self._coverage_open = options.coverage_open
    self._coverage_force = options.coverage_force

    # Injecting these methods to make testing cleaner.
    self._copy2 = copy2
    self._copytree = copytree
    self._is_file = is_file
    self._safe_makedir = safe_md
Esempio n. 34
0
  def __init__(self, task_exports, context):
    super(_Coverage, self).__init__(task_exports, context)
    options = task_exports.task_options
    self._coverage = options.coverage
    self._coverage_filters = options.coverage_patterns or []

    self._coverage_jvm_options = []
    for jvm_option in options.coverage_jvm_options:
      self._coverage_jvm_options.extend(safe_shlex_split(jvm_option))

    self._coverage_dir = os.path.join(task_exports.workdir, 'coverage')
    self._coverage_instrument_dir = os.path.join(self._coverage_dir, 'classes')
    # TODO(ji): These may need to be transferred down to the Emma class, as the suffixes
    # may be emma-specific. Resolve when we also provide cobertura support.
    self._coverage_metadata_file = os.path.join(self._coverage_dir, 'coverage.em')
    self._coverage_file = os.path.join(self._coverage_dir, 'coverage.ec')
    self._coverage_console_file = os.path.join(self._coverage_dir, 'coverage.txt')
    self._coverage_xml_file = os.path.join(self._coverage_dir, 'coverage.xml')
    self._coverage_html_file = os.path.join(self._coverage_dir, 'html', 'index.html')
    self._coverage_open = options.coverage_open
    self._coverage_force = options.coverage_force
Esempio n. 35
0
    def __init__(self,
                 options,
                 context,
                 workdir,
                 tool_classpath,
                 confs,
                 log,
                 copy2=shutil.copy2,
                 copytree=shutil.copytree,
                 is_file=os.path.isfile,
                 safe_md=safe_mkdir):
        self.options = options
        self.context = context
        self.workdir = workdir
        self.tool_classpath = tool_classpath
        self.confs = confs
        self.log = log

        self.coverage_dir = os.path.join(self.workdir, 'coverage')
        self.coverage_instrument_dir = os.path.join(self.coverage_dir,
                                                    'classes')
        self.coverage_console_file = os.path.join(self.coverage_dir,
                                                  'coverage.txt')
        self.coverage_xml_file = os.path.join(self.coverage_dir,
                                              'coverage.xml')
        self.coverage_html_file = os.path.join(self.coverage_dir, 'html',
                                               'index.html')

        self.coverage_jvm_options = []
        for jvm_option in options.coverage_jvm_options:
            self.coverage_jvm_options.extend(safe_shlex_split(jvm_option))

        self.coverage_open = options.coverage_open
        self.coverage_force = options.coverage_force

        # Injecting these methods to make unit testing cleaner.
        self.copy2 = copy2
        self.copytree = copytree
        self.is_file = is_file
        self.safe_makedir = safe_md
Esempio n. 36
0
    def execute(self):
        binary = self.require_single_root_target()
        if isinstance(binary, PythonBinary):
            # We can't throw if binary isn't a PythonBinary, because perhaps we were called on a
            # jvm_binary, in which case we have to no-op and let jvm_run do its thing.
            # TODO(benjy): Some more elegant way to coordinate how tasks claim targets.
            interpreter = self.select_interpreter_for_targets(
                self.context.targets())
            with self.temporary_pex_builder(
                    interpreter=interpreter,
                    pex_info=binary.pexinfo) as builder:
                chroot = PythonChroot(context=self.context,
                                      targets=[binary],
                                      builder=builder,
                                      platforms=binary.platforms,
                                      interpreter=interpreter)

                chroot.dump()
                builder.freeze()
                pex = PEX(builder.path(), interpreter=interpreter)
                self.context.release_lock()
                with self.context.new_workunit(name='run',
                                               labels=[WorkUnit.RUN]):
                    args = []
                    for arg in self.get_options().args:
                        args.extend(safe_shlex_split(arg))
                    args += self.get_passthru_args()
                    po = pex.run(blocking=False, args=args)
                    try:
                        result = po.wait()
                        if result != 0:
                            msg = '{interpreter} {entry_point} {args} ... exited non-zero ({code})'.format(
                                interpreter=interpreter.binary,
                                entry_point=binary.entry_point,
                                args=' '.join(args),
                                code=result)
                            raise TaskError(msg, exit_code=result)
                    except KeyboardInterrupt:
                        po.send_signal(signal.SIGINT)
                        raise
Esempio n. 37
0
    def execute(self):
        def is_python_test(target):
            # Note that we ignore PythonTestSuite, because we'll see the PythonTests targets
            # it depends on anyway,so if we don't we'll end up running the tests twice.
            # TODO(benjy): Once we're off the 'build' command we can get rid of python_test_suite,
            # or make it an alias of dependencies().
            return isinstance(target, PythonTests)

        test_targets = list(filter(is_python_test, self.context.targets()))
        if test_targets:
            self.context.release_lock()

            debug = self.get_options().level == 'debug'

            args = ['--color', 'yes'] if self.get_options().colors else []
            for options in self.get_options().options + self.get_passthru_args(
            ):
                args.extend(safe_shlex_split(options))
            test_builder = PythonTestBuilder(context=self.context,
                                             targets=test_targets,
                                             args=args,
                                             interpreter=self.interpreter,
                                             fast=self.get_options().fast,
                                             debug=debug)
            with self.context.new_workunit(
                    name='run', labels=[WorkUnit.TOOL,
                                        WorkUnit.TEST]) as workunit:
                # pytest uses py.io.terminalwriter for output. That class detects the terminal
                # width and attempts to use all of it. However we capture and indent the console
                # output, leading to weird-looking line wraps. So we trick the detection code
                # into thinking the terminal window is narrower than it is.
                cols = os.environ.get('COLUMNS', 80)
                with environment_as(COLUMNS=str(int(cols) - 30)):
                    stdout = workunit.output('stdout') if workunit else None
                    stderr = workunit.output('stderr') if workunit else None
                    if test_builder.run(stdout=stdout, stderr=stderr):
                        raise TaskError()
Esempio n. 38
0
    def execute(self):
        binary = self.require_single_root_target()
        if not self.source_target_constraint.satisfied_by(binary):
            return

        extra_args = []
        for arg in self.get_options().args:
            extra_args.extend(safe_shlex_split(arg))
        passthru_args = self.get_passthru_args()
        full_argv = extra_args + [binary.script_path, '--'] + passthru_args

        perl6_env = self.context.products.get_data(CollectPerl6Env.Perl6Env)

        self.context.release_lock()

        try:
            self._perl6.invoke_perl6(
                full_argv,
                perl6_env,
                workunit_factory=self._run_workunit_factory)
        except Perl6.Perl6InvocationError as e:
            raise self.Perl6RunError("Error running perl 6: {}".format(e),
                                     e,
                                     exit_code=e.exit_code)
Esempio n. 39
0
    def errorprone(self, target):
        runtime_classpaths = self.context.products.get_data(
            'runtime_classpath')
        runtime_classpath = [
            jar for conf, jar in runtime_classpaths.get_for_targets(
                target.closure(bfs=True))
        ]

        output_dir = os.path.join(self.workdir, target.id)
        safe_mkdir(output_dir)
        runtime_classpath.append(output_dir)

        args = [
            '-classpath',
            ':'.join(runtime_classpath),
            '-d',
            output_dir,
        ]

        for opt in self.get_options().command_line_options:
            args.extend(safe_shlex_split(opt))

        args.extend(self.calculate_sources(target))

        result = self.runjava(classpath=self.tool_classpath('errorprone'),
                              main=self._ERRORPRONE_MAIN,
                              jvm_options=self.get_options().jvm_options,
                              args=args,
                              workunit_name='errorprone',
                              workunit_labels=[WorkUnitLabel.LINT])

        self.context.log.debug(
            'java {main} ... exited with result ({result})'.format(
                main=self._ERRORPRONE_MAIN, result=result))

        return result
Esempio n. 40
0
  def __init__(self, options, context, workdir, tool_classpath, confs, log,
               copy2=shutil.copy2, copytree=shutil.copytree, is_file=os.path.isfile,
               safe_md=safe_mkdir):
    self.options = options
    self.context = context
    self.workdir = workdir
    self.tool_classpath = tool_classpath
    self.confs = confs
    self.log = log

    self.coverage_dir = os.path.join(self.workdir, 'coverage')

    self.coverage_jvm_options = []
    for jvm_option in options.coverage_jvm_options:
      self.coverage_jvm_options.extend(safe_shlex_split(jvm_option))

    self.coverage_open = options.coverage_open
    self.coverage_force = options.coverage_force

    # Injecting these methods to make unit testing cleaner.
    self.copy2 = copy2
    self.copytree = copytree
    self.is_file = is_file
    self.safe_makedir = safe_md
Esempio n. 41
0
 def get_rcopts(command, key):
     return safe_shlex_split(config.get(
         command, key)) if config.has_option(command, key) else []
Esempio n. 42
0
    def _run_pytest(self, fail_fast, test_targets, workdirs):
        if not test_targets:
            return PytestResult.rc(0)

        # Absolute path to chrooted test file -> Path to original test file relative to the buildroot.
        sources_map = OrderedDict()
        for t in test_targets:
            for p in t.sources_relative_to_source_root():
                sources_map[os.path.join(self._source_chroot_path,
                                         p)] = os.path.join(t.target_base, p)

        if not sources_map:
            return PytestResult.rc(0)

        with self._test_runner(workdirs, test_targets,
                               sources_map) as (pytest_binary, test_args,
                                                get_pytest_rootdir):
            # Validate that the user didn't provide any passthru args that conflict
            # with those we must set ourselves.
            for arg in self.get_passthru_args():
                if arg.startswith('--junitxml') or arg.startswith(
                        '--confcutdir'):
                    raise TaskError(
                        'Cannot pass this arg through to pytest: {}'.format(
                            arg))

            junitxml_path = workdirs.junitxml_path(*test_targets)

            # N.B. the `--confcutdir` here instructs pytest to stop scanning for conftest.py files at the
            # top of the buildroot. This prevents conftest.py files from outside (e.g. in users home dirs)
            # from leaking into pants test runs. See: https://github.com/pantsbuild/pants/issues/2726
            args = [
                '-c', pytest_binary.config_path, '--junitxml', junitxml_path,
                '--confcutdir',
                get_buildroot(), '--continue-on-collection-errors'
            ]
            if fail_fast:
                args.extend(['-x'])
            if self._debug:
                args.extend(['-s'])
            if self.get_options().colors:
                args.extend(['--color', 'yes'])

            if self.get_options().options:
                for opt in self.get_options().options:
                    args.extend(safe_shlex_split(opt))
            args.extend(self.get_passthru_args())

            args.extend(test_args)
            args.extend(sources_map.keys())

            # We want to ensure our reporting based off junit xml is from this run so kill results from
            # prior runs.
            if os.path.exists(junitxml_path):
                os.unlink(junitxml_path)

            with self._maybe_run_in_chroot():
                result = self._do_run_tests_with_args(pytest_binary.pex, args)

            # There was a problem prior to test execution preventing junit xml file creation so just let
            # the failure result bubble.
            if not os.path.exists(junitxml_path):
                return result

            pytest_rootdir = get_pytest_rootdir()
            failed_targets = self._get_failed_targets_from_junitxml(
                junitxml_path, test_targets, pytest_rootdir)

            def parse_error_handler(parse_error):
                # Simple error handler to pass to xml parsing function.
                raise TaskError('Error parsing xml file at {}: {}'.format(
                    parse_error.xml_path, parse_error.cause))

            all_tests_info = self.parse_test_info(
                junitxml_path, parse_error_handler,
                ['file', 'name', 'classname'])
            for test_name, test_info in all_tests_info.items():
                test_target = self._get_target_from_test(
                    test_info, test_targets, pytest_rootdir)
                self.report_all_info_for_single_test(self.options_scope,
                                                     test_target, test_name,
                                                     test_info)

            return result.with_failed_targets(failed_targets)
Esempio n. 43
0
 def _build_and_test_flags(self):
     return [
         single_flag for flags_section in
         self.get_options().shlexed_build_and_test_flags
         for single_flag in safe_shlex_split(flags_section)
     ]
Esempio n. 44
0
 def _split_build_flags(build_flags):
   return safe_shlex_split(build_flags)  # Visible for testing
Esempio n. 45
0
 def get_rcopts(command, key):
   return safe_shlex_split(config.get(command, key)) if config.has_option(command, key) else []
Esempio n. 46
0
    def execute(self):
        # We drive creation of setup.py distributions from the original target graph, grabbing codegen'd
        # sources when needed. We ignore PythonDistribution targets.
        def is_exported_python_target(t):
            return t.is_original and self.has_provides(
                t) and not is_local_python_dist(t)

        exported_python_targets = OrderedSet(t
                                             for t in self.context.target_roots
                                             if is_exported_python_target(t))

        dist_dir = self.get_options().pants_distdir

        # NB: We have to create and then run in 2 steps so that we can discover all exported targets
        # in-play in the creation phase which then allows a tsort of these exported targets in the run
        # phase to ensure an exported target is, for example (--run="sdist upload"), uploaded before any
        # exported target that depends on it is uploaded.

        created: Dict[PythonTarget, Path] = {}

        def create(exported_python_target):
            if exported_python_target not in created:
                self.context.log.info(
                    "Creating setup.py project for {}".format(
                        exported_python_target))
                subject = self.derived_by_original.get(exported_python_target,
                                                       exported_python_target)
                setup_dir, dependencies = self.create_setup_py(
                    subject, dist_dir)
                created[exported_python_target] = Path(setup_dir)
                if self._recursive:
                    for dep in dependencies:
                        if is_exported_python_target(dep):
                            create(dep)

        for exported_python_target in exported_python_targets:
            create(exported_python_target)

        interpreter = self.context.products.get_data(PythonInterpreter)
        python_dists = self.context.products.register_data(
            self.PYTHON_DISTS_PRODUCT, {})

        setup_runner = SetupPyRunner.Factory.create(
            scope=self,
            interpreter=interpreter,
            pex_file_path=os.path.join(self.workdir, self.fingerprint,
                                       "setup-py-runner.pex"),
        )
        for exported_python_target in reversed(
                sort_targets(list(created.keys()))):
            setup_dir = created.get(exported_python_target)
            if setup_dir:
                if not self._run:
                    self.context.log.info(
                        "Running sdist against {}".format(setup_dir))
                    sdist = setup_runner.sdist(setup_dir)
                    tgz_name = sdist.name
                    sdist_path = os.path.join(dist_dir, tgz_name)
                    self.context.log.info("Writing {}".format(sdist_path))
                    shutil.move(sdist, sdist_path)
                    safe_rmtree(str(setup_dir))
                    python_dists[exported_python_target] = sdist_path
                else:
                    self.context.log.info("Running {} against {}".format(
                        self._run, setup_dir))
                    split_command = safe_shlex_split(self._run)
                    try:
                        setup_runner.run_setup_command(
                            source_dir=setup_dir, setup_command=split_command)
                    except SetupPyRunner.CommandFailure as e:
                        raise TaskError(f"Install failed: {e}")
                    python_dists[exported_python_target] = setup_dir
Esempio n. 47
0
 def _split_build_flags(build_flags):
     return safe_shlex_split(build_flags)  # Visible for testing
Esempio n. 48
0
 def _build_and_test_flags(self):
     return safe_shlex_split(self.get_options().build_and_test_flags)
Esempio n. 49
0
    def execute(self):
        # We drive creation of setup.py distributions from the original target graph, grabbing codegen'd
        # sources when needed. We ignore PythonDistribution targets.
        def is_exported_python_target(t):
            return t.is_original and self.has_provides(
                t) and not is_local_python_dist(t)

        exported_python_targets = OrderedSet(t
                                             for t in self.context.target_roots
                                             if is_exported_python_target(t))
        if not exported_python_targets:
            raise TaskError('setup-py target(s) must provide an artifact.')

        dist_dir = self.get_options().pants_distdir

        # NB: We have to create and then run in 2 steps so that we can discover all exported targets
        # in-play in the creation phase which then allows a tsort of these exported targets in the run
        # phase to ensure an exported target is, for example (--run="sdist upload"), uploaded before any
        # exported target that depends on it is uploaded.

        created = {}

        def create(exported_python_target):
            if exported_python_target not in created:
                self.context.log.info(
                    'Creating setup.py project for {}'.format(
                        exported_python_target))
                subject = self.derived_by_original.get(exported_python_target,
                                                       exported_python_target)
                setup_dir, dependencies = self.create_setup_py(
                    subject, dist_dir)
                created[exported_python_target] = setup_dir
                if self._recursive:
                    for dep in dependencies:
                        if is_exported_python_target(dep):
                            create(dep)

        for exported_python_target in exported_python_targets:
            create(exported_python_target)

        interpreter = self.context.products.get_data(PythonInterpreter)
        python_dists = self.context.products.register_data(
            self.PYTHON_DISTS_PRODUCT, {})
        for exported_python_target in reversed(
                sort_targets(list(created.keys()))):
            setup_dir = created.get(exported_python_target)
            if setup_dir:
                if not self._run:
                    self.context.log.info(
                        'Running packager against {}'.format(setup_dir))
                    setup_runner = Packager(setup_dir, interpreter=interpreter)
                    tgz_name = os.path.basename(setup_runner.sdist())
                    sdist_path = os.path.join(dist_dir, tgz_name)
                    self.context.log.info('Writing {}'.format(sdist_path))
                    shutil.move(setup_runner.sdist(), sdist_path)
                    safe_rmtree(setup_dir)
                    python_dists[exported_python_target] = sdist_path
                else:
                    self.context.log.info('Running {} against {}'.format(
                        self._run, setup_dir))
                    split_command = safe_shlex_split(self._run)
                    setup_runner = SetupPyRunner(setup_dir,
                                                 split_command,
                                                 interpreter=interpreter)
                    setup_runner.run()
                    python_dists[exported_python_target] = setup_dir
Esempio n. 50
0
    def errorprone(self, target):
        runtime_classpaths = self.context.products.get_data(
            'runtime_classpath')
        runtime_classpath = [
            jar for conf, jar in runtime_classpaths.get_for_targets(
                target.closure(bfs=True))
        ]

        output_dir = os.path.join(self.workdir, target.id)
        safe_mkdir(output_dir)
        runtime_classpath.append(output_dir)

        # Try to run errorprone with the same java version as the target
        # The minimum JDK for errorprone is JDK 1.8
        min_jdk_version = max(target.platform.target_level,
                              Revision.lenient('1.8'))
        if min_jdk_version.components[0] == 1:
            max_jdk_version = Revision(min_jdk_version.components[0],
                                       min_jdk_version.components[1], '9999')
        else:
            max_jdk_version = Revision(min_jdk_version.components[0], '9999')
        self.set_distribution(minimum_version=min_jdk_version,
                              maximum_version=max_jdk_version,
                              jdk=True)

        jvm_options = self.get_options().jvm_options[:]
        if self.dist.version < Revision.lenient('9'):
            # For Java 8 we need to add the errorprone javac jar to the bootclasspath to
            # avoid the "java.lang.NoSuchFieldError: ANNOTATION_PROCESSOR_MODULE_PATH" error
            # See https://github.com/google/error-prone/issues/653 for more information
            jvm_options.extend([
                '-Xbootclasspath/p:{}'.format(
                    self.tool_classpath('errorprone-javac')[0])
            ])

        args = [
            '-d',
            output_dir,
        ]

        # Errorprone does not recognize source or target 10 yet
        if target.platform.source_level < Revision.lenient('10'):
            args.extend(['-source', str(target.platform.source_level)])

        if target.platform.target_level < Revision.lenient('10'):
            args.extend(['-target', str(target.platform.target_level)])

        errorprone_classpath_file = os.path.join(
            self.workdir, '{}.classpath'.format(os.path.basename(output_dir)))
        with open(errorprone_classpath_file, 'w') as f:
            f.write('-classpath ')
            f.write(':'.join(runtime_classpath))
        args.append('@{}'.format(errorprone_classpath_file))

        for opt in self.get_options().command_line_options:
            args.extend(safe_shlex_split(opt))

        with argfile.safe_args(self.calculate_sources(target),
                               self.get_options()) as batched_sources:
            args.extend(batched_sources)
            result = self.runjava(classpath=self.tool_classpath('errorprone'),
                                  main=self._ERRORPRONE_MAIN,
                                  jvm_options=jvm_options,
                                  args=args,
                                  workunit_name='errorprone',
                                  workunit_labels=[WorkUnitLabel.LINT])

            self.context.log.debug(
                'java {main} ... exited with result ({result})'.format(
                    main=self._ERRORPRONE_MAIN, result=result))

        return result
Esempio n. 51
0
  def _run_pytest(self, fail_fast, test_targets, workdirs):
    if not test_targets:
      return PytestResult.rc(0)

    # Absolute path to chrooted test file -> Path to original test file relative to the buildroot.
    sources_map = OrderedDict()
    for t in test_targets:
      for p in t.sources_relative_to_source_root():
        sources_map[os.path.join(self._source_chroot_path, p)] = os.path.join(t.target_base, p)

    if not sources_map:
      return PytestResult.rc(0)

    with self._test_runner(workdirs, test_targets, sources_map) as (pytest_binary,
                                                                    test_args,
                                                                    get_pytest_rootdir):
      # Validate that the user didn't provide any passthru args that conflict
      # with those we must set ourselves.
      for arg in self.get_passthru_args():
        if arg.startswith('--junitxml') or arg.startswith('--confcutdir'):
          raise TaskError('Cannot pass this arg through to pytest: {}'.format(arg))

      junitxml_path = workdirs.junitxml_path(*test_targets)

      # N.B. the `--confcutdir` here instructs pytest to stop scanning for conftest.py files at the
      # top of the buildroot. This prevents conftest.py files from outside (e.g. in users home dirs)
      # from leaking into pants test runs. See: https://github.com/pantsbuild/pants/issues/2726
      args = ['-c', pytest_binary.config_path,
              '--junitxml', junitxml_path,
              '--confcutdir', get_buildroot(),
              '--continue-on-collection-errors']
      if fail_fast:
        args.extend(['-x'])
      if self._debug:
        args.extend(['-s'])
      if self.get_options().colors:
        args.extend(['--color', 'yes'])

      if self.get_options().options:
        for opt in self.get_options().options:
          args.extend(safe_shlex_split(opt))
      args.extend(self.get_passthru_args())

      args.extend(test_args)
      args.extend(sources_map.keys())

      # We want to ensure our reporting based off junit xml is from this run so kill results from
      # prior runs.
      if os.path.exists(junitxml_path):
        os.unlink(junitxml_path)

      with self._maybe_run_in_chroot():
        result = self._do_run_tests_with_args(pytest_binary.pex, args)

      # There was a problem prior to test execution preventing junit xml file creation so just let
      # the failure result bubble.
      if not os.path.exists(junitxml_path):
        return result

      pytest_rootdir = get_pytest_rootdir()
      failed_targets = self._get_failed_targets_from_junitxml(junitxml_path,
                                                              test_targets,
                                                              pytest_rootdir)

      def parse_error_handler(parse_error):
        # Simple error handler to pass to xml parsing function.
        raise TaskError('Error parsing xml file at {}: {}'
                        .format(parse_error.xml_path, parse_error.cause))

      all_tests_info = self.parse_test_info(junitxml_path, parse_error_handler,
                                            ['file', 'name', 'classname'])
      for test_name, test_info in all_tests_info.items():
        test_target = self._get_target_from_test(test_info, test_targets, pytest_rootdir)
        self.report_all_info_for_single_test(self.options_scope, test_target, test_name, test_info)

      return result.with_failed_targets(failed_targets)