def _isolation(self, all_targets): run_dir = '_runs' output_dir = os.path.join(self.workdir, run_dir, Target.identify(all_targets)) safe_mkdir(output_dir, clean=True) coverage = None options = self.get_options() if options.coverage or options.is_flagged('coverage_open'): coverage_processor = options.coverage_processor if coverage_processor == 'cobertura': settings = CoberturaTaskSettings.from_task(self, workdir=output_dir) coverage = Cobertura(settings) else: raise TaskError('unknown coverage processor {0}'.format( coverage_processor)) self.context.release_lock() if coverage: coverage.instrument( targets=all_targets, compute_junit_classpath=lambda: self.classpath(all_targets), execute_java_for_targets=self.execute_java_for_coverage) def do_report(exc=None): if coverage: coverage.report(all_targets, self.execute_java_for_coverage, tests_failed_exception=exc) if self._html_report: html_file_path = JUnitHtmlReport().report( output_dir, os.path.join(output_dir, 'reports')) if self._open: desktop.ui_open(html_file_path) try: yield output_dir, do_report, coverage finally: # NB: Deposit of the "current" test output in the root workdir (.pants.d/test/junit) is a # defacto public API and so we implement that behavior here to maintain backwards # compatibility for non-pants report file consumers. # TODO(John Sirois): Deprecate this ~API and provide a stable directory solution for test # output: https://github.com/pantsbuild/pants/issues/3879 lock_file = '.file_lock' with OwnerPrintingInterProcessFileLock( os.path.join(self.workdir, lock_file)): # Kill everything except the isolated runs/ dir. for name in os.listdir(self.workdir): path = os.path.join(self.workdir, name) if name not in (run_dir, lock_file): if os.path.isdir(path): safe_rmtree(path) else: os.unlink(path) # Link all the isolated run/ dir contents back up to the stable workdir for name in os.listdir(output_dir): path = os.path.join(output_dir, name) os.symlink(path, os.path.join(self.workdir, name))
def __init__(self, *args, **kwargs): super(JUnitRun, self).__init__(*args, **kwargs) options = self.get_options() self._coverage = None if options.coverage or options.is_flagged('coverage_open'): coverage_processor = options.coverage_processor if coverage_processor == 'cobertura': settings = CoberturaTaskSettings.from_task(self) self._coverage = Cobertura(settings) else: raise TaskError('unknown coverage processor {0}'.format(coverage_processor)) self._tests_to_run = options.test self._batch_size = options.batch_size self._fail_fast = options.fail_fast self._working_dir = options.cwd or get_buildroot() self._strict_jvm_version = options.strict_jvm_version self._args = copy.copy(self.args) self._failure_summary = options.failure_summary if options.output_mode == 'ALL': self._args.append('-output-mode=ALL') elif options.output_mode == 'FAILURE_ONLY': self._args.append('-output-mode=FAILURE_ONLY') else: self._args.append('-output-mode=NONE') if self._fail_fast: self._args.append('-fail-fast') self._args.append('-outdir') self._args.append(self.workdir) if options.per_test_timer: self._args.append('-per-test-timer') # TODO(zundel): Simply remove when --default_parallel finishes deprecation if options.default_parallel: self._args.append('-default-parallel') if options.default_concurrency == junit_tests.CONCURRENCY_PARALLEL_BOTH: self.context.log.warn('--default-concurrency=PARALLEL_BOTH is experimental.') self._args.append('-default-parallel') self._args.append('-parallel-methods') elif options.default_concurrency == junit_tests.CONCURRENCY_PARALLEL_CLASSES: self._args.append('-default-parallel') elif options.default_concurrency == junit_tests.CONCURRENCY_PARALLEL_METHODS: self.context.log.warn('--default-concurrency=PARALLEL_METHODS is not implemented.') raise NotImplementedError() elif options.default_concurrency == junit_tests.CONCURRENCY_SERIAL: # TODO(zundel): we can't do anything here yet while the --default-parallel # option is in deprecation mode. pass self._args.append('-parallel-threads') self._args.append(str(options.parallel_threads)) if options.test_shard: self._args.append('-test-shard') self._args.append(options.test_shard)
def _isolation(self, all_targets): run_dir = '_runs' output_dir = os.path.join(self.workdir, run_dir, Target.identify(all_targets)) safe_mkdir(output_dir, clean=True) coverage = None options = self.get_options() if options.coverage or options.is_flagged('coverage_open'): coverage_processor = options.coverage_processor if coverage_processor == 'cobertura': settings = CoberturaTaskSettings.from_task(self, workdir=output_dir) coverage = Cobertura(settings) else: raise TaskError('unknown coverage processor {0}'.format(coverage_processor)) self.context.release_lock() if coverage: coverage.instrument(targets=all_targets, compute_junit_classpath=lambda: self.classpath(all_targets), execute_java_for_targets=self.execute_java_for_coverage) def do_report(exc=None): if coverage: coverage.report(all_targets, self.execute_java_for_coverage, tests_failed_exception=exc) if self._html_report: html_file_path = JUnitHtmlReport().report(output_dir, os.path.join(output_dir, 'reports')) if self._open: desktop.ui_open(html_file_path) try: yield output_dir, do_report, coverage finally: # NB: Deposit of the "current" test output in the root workdir (.pants.d/test/junit) is a # defacto public API and so we implement that behavior here to maintain backwards # compatibility for non-pants report file consumers. # TODO(John Sirois): Deprecate this ~API and provide a stable directory solution for test # output: https://github.com/pantsbuild/pants/issues/3879 lock_file = '.file_lock' with OwnerPrintingInterProcessFileLock(os.path.join(self.workdir, lock_file)): # Kill everything except the isolated runs/ dir. for name in os.listdir(self.workdir): path = os.path.join(self.workdir, name) if name not in (run_dir, lock_file): if os.path.isdir(path): safe_rmtree(path) else: os.unlink(path) # Link all the isolated run/ dir contents back up to the stable workdir for name in os.listdir(output_dir): path = os.path.join(output_dir, name) os.symlink(path, os.path.join(self.workdir, name))
def _isolation(self, all_targets): run_dir = '_runs' output_dir = os.path.join(self.workdir, run_dir, Target.identify(all_targets)) safe_mkdir(output_dir, clean=False) if self._html_report: junit_html_report = JUnitHtmlReport.create(output_dir, self.context.log) else: junit_html_report = NoJunitHtmlReport() if self.get_options().coverage or self.get_options().is_flagged( 'coverage_open'): settings = CoberturaTaskSettings.from_task(self, workdir=output_dir) coverage = Cobertura(settings, all_targets, self.execute_java_for_coverage) else: coverage = NoCoverage() reports = self.Reports(junit_html_report, coverage) self.context.release_lock() try: yield output_dir, reports, coverage finally: # NB: Deposit of the "current" test output in the root workdir (.pants.d/test/junit) is a # defacto public API and so we implement that behavior here to maintain backwards # compatibility for non-pants report file consumers. # TODO(John Sirois): Deprecate this ~API and provide a stable directory solution for test # output: https://github.com/pantsbuild/pants/issues/3879 lock_file = '.file_lock' with OwnerPrintingInterProcessFileLock( os.path.join(self.workdir, lock_file)): # Kill everything except the isolated `_runs/` dir. for name in os.listdir(self.workdir): path = os.path.join(self.workdir, name) if name not in (run_dir, lock_file): if os.path.isdir(path): safe_rmtree(path) else: os.unlink(path) # Link all the isolated run/ dir contents back up to the stable workdir for name in os.listdir(output_dir): path = os.path.join(output_dir, name) os.symlink(path, os.path.join(self.workdir, name))
def __init__(self, *args, **kwargs): super(JUnitRun, self).__init__(*args, **kwargs) options = self.get_options() self._coverage = None if options.coverage or options.is_flagged('coverage_open'): coverage_processor = options.coverage_processor if coverage_processor == 'cobertura': settings = CoberturaTaskSettings(self) self._coverage = Cobertura(settings) else: raise TaskError('unknown coverage processor {0}'.format( coverage_processor)) self._tests_to_run = options.test self._batch_size = options.batch_size self._fail_fast = options.fail_fast self._working_dir = options.cwd or get_buildroot() self._strict_jvm_version = options.strict_jvm_version self._args = copy.copy(self.args) self._failure_summary = options.failure_summary if (not options.suppress_output) or options.output_mode == 'ALL': self._args.append('-output-mode=ALL') elif options.output_mode == 'FAILURE_ONLY': self._args.append('-output-mode=FAILURE_ONLY') else: self._args.append('-output-mode=NONE') if self._fail_fast: self._args.append('-fail-fast') self._args.append('-outdir') self._args.append(self.workdir) if options.per_test_timer: self._args.append('-per-test-timer') if options.default_parallel: self._args.append('-default-parallel') self._args.append('-parallel-threads') self._args.append(str(options.parallel_threads)) if options.test_shard: self._args.append('-test-shard') self._args.append(options.test_shard) self._executor = None
def __init__(self, *args, **kwargs): super(JUnitRun, self).__init__(*args, **kwargs) options = self.get_options() self._coverage = None if options.coverage or options.is_flagged('coverage_open'): coverage_processor = options.coverage_processor if coverage_processor == 'cobertura': settings = CoberturaTaskSettings.from_task(self) self._coverage = Cobertura(settings) else: raise TaskError('unknown coverage processor {0}'.format(coverage_processor)) self._tests_to_run = options.test self._batch_size = options.batch_size self._fail_fast = options.fail_fast self._working_dir = options.cwd or get_buildroot() self._strict_jvm_version = options.strict_jvm_version self._args = copy.copy(self.args) self._failure_summary = options.failure_summary if (not options.suppress_output) or options.output_mode == 'ALL': self._args.append('-output-mode=ALL') elif options.output_mode == 'FAILURE_ONLY': self._args.append('-output-mode=FAILURE_ONLY') else: self._args.append('-output-mode=NONE') if self._fail_fast: self._args.append('-fail-fast') self._args.append('-outdir') self._args.append(self.workdir) if options.per_test_timer: self._args.append('-per-test-timer') if options.default_parallel: self._args.append('-default-parallel') self._args.append('-parallel-threads') self._args.append(str(options.parallel_threads)) if options.test_shard: self._args.append('-test-shard') self._args.append(options.test_shard) self._executor = None
def __init__(self, *args, **kwargs): super(JUnitRun, self).__init__(*args, **kwargs) options = self.get_options() self._coverage = None if options.coverage or options.is_flagged("coverage_open"): coverage_processor = options.coverage_processor if coverage_processor == "cobertura": settings = CoberturaTaskSettings.from_task(self) self._coverage = Cobertura(settings) else: raise TaskError("unknown coverage processor {0}".format(coverage_processor)) self._tests_to_run = options.test self._batch_size = options.batch_size self._fail_fast = options.fail_fast self._working_dir = options.cwd or get_buildroot() self._strict_jvm_version = options.strict_jvm_version self._args = copy.copy(self.args) self._failure_summary = options.failure_summary if options.output_mode == "ALL": self._args.append("-output-mode=ALL") elif options.output_mode == "FAILURE_ONLY": self._args.append("-output-mode=FAILURE_ONLY") else: self._args.append("-output-mode=NONE") if self._fail_fast: self._args.append("-fail-fast") self._args.append("-outdir") self._args.append(self.workdir) if options.per_test_timer: self._args.append("-per-test-timer") if options.default_parallel: self._args.append("-default-parallel") self._args.append("-parallel-threads") self._args.append(str(options.parallel_threads)) if options.test_shard: self._args.append("-test-shard") self._args.append(options.test_shard)
def _isolation(self, all_targets): run_dir = '_runs' output_dir = os.path.join(self.workdir, run_dir, Target.identify(all_targets)) safe_mkdir(output_dir, clean=False) if self._html_report: junit_html_report = JUnitHtmlReport.create(output_dir, self.context.log) else: junit_html_report = NoJunitHtmlReport() if self.get_options().coverage or self.get_options().is_flagged('coverage_open'): settings = CoberturaTaskSettings.from_task(self, workdir=output_dir) coverage = Cobertura(settings, all_targets, self.execute_java_for_coverage) else: coverage = NoCoverage() reports = self.Reports(junit_html_report, coverage) self.context.release_lock() try: yield output_dir, reports, coverage finally: # NB: Deposit of the "current" test output in the root workdir (.pants.d/test/junit) is a # defacto public API and so we implement that behavior here to maintain backwards # compatibility for non-pants report file consumers. # TODO(John Sirois): Deprecate this ~API and provide a stable directory solution for test # output: https://github.com/pantsbuild/pants/issues/3879 lock_file = '.file_lock' with OwnerPrintingInterProcessFileLock(os.path.join(self.workdir, lock_file)): # Kill everything except the isolated `_runs/` dir. for name in os.listdir(self.workdir): path = os.path.join(self.workdir, name) if name not in (run_dir, lock_file): if os.path.isdir(path): safe_rmtree(path) else: os.unlink(path) # Link all the isolated run/ dir contents back up to the stable workdir for name in os.listdir(output_dir): path = os.path.join(output_dir, name) os.symlink(path, os.path.join(self.workdir, name))
def __init__(self, *args, **kwargs): super(JUnitRun, self).__init__(*args, **kwargs) options = self.get_options() self._coverage = None if options.coverage or options.is_flagged('coverage_open'): coverage_processor = options.coverage_processor if coverage_processor == 'cobertura': settings = CoberturaTaskSettings.from_task(self) self._coverage = Cobertura(settings) else: raise TaskError('unknown coverage processor {0}'.format(coverage_processor)) self._tests_to_run = options.test self._batch_size = options.batch_size self._fail_fast = options.fail_fast self._working_dir = options.cwd or get_buildroot() self._strict_jvm_version = options.strict_jvm_version self._args = copy.copy(self.args) self._failure_summary = options.failure_summary self._open = options.open self._html_report = self._open or options.html_report if options.output_mode == 'ALL': self._args.append('-output-mode=ALL') elif options.output_mode == 'FAILURE_ONLY': self._args.append('-output-mode=FAILURE_ONLY') else: self._args.append('-output-mode=NONE') if self._fail_fast: self._args.append('-fail-fast') self._args.append('-outdir') self._args.append(self.workdir) if options.per_test_timer: self._args.append('-per-test-timer') if options.default_parallel: # TODO(zundel): Remove when --default_parallel finishes deprecation if options.default_concurrency != junit_tests.CONCURRENCY_SERIAL: self.context.log.warn('--default-parallel overrides --default-concurrency') self._args.append('-default-concurrency') self._args.append('PARALLEL_CLASSES') else: if options.default_concurrency == junit_tests.CONCURRENCY_PARALLEL_CLASSES_AND_METHODS: if not options.use_experimental_runner: self.context.log.warn( '--default-concurrency=PARALLEL_CLASSES_AND_METHODS is experimental, use --use-experimental-runner.') self._args.append('-default-concurrency') self._args.append('PARALLEL_CLASSES_AND_METHODS') elif options.default_concurrency == junit_tests.CONCURRENCY_PARALLEL_METHODS: if not options.use_experimental_runner: self.context.log.warn( '--default-concurrency=PARALLEL_METHODS is experimental, use --use-experimental-runner.') if options.test_shard: # NB(zundel): The experimental junit runner doesn't support test sharding natively. The # legacy junit runner allows both methods and classes to run in parallel with this option. self.context.log.warn( '--default-concurrency=PARALLEL_METHODS with test sharding will run classes in parallel too.') self._args.append('-default-concurrency') self._args.append('PARALLEL_METHODS') elif options.default_concurrency == junit_tests.CONCURRENCY_PARALLEL_CLASSES: self._args.append('-default-concurrency') self._args.append('PARALLEL_CLASSES') elif options.default_concurrency == junit_tests.CONCURRENCY_SERIAL: self._args.append('-default-concurrency') self._args.append('SERIAL') self._args.append('-parallel-threads') self._args.append(str(options.parallel_threads)) if options.test_shard: self._args.append('-test-shard') self._args.append(options.test_shard) if options.use_experimental_runner: self.context.log.info('Using experimental junit-runner logic.') self._args.append('-use-experimental-runner')
def __init__(self, *args, **kwargs): super(JUnitRun, self).__init__(*args, **kwargs) options = self.get_options() self._coverage = None if options.coverage or options.is_flagged('coverage_open'): coverage_processor = options.coverage_processor if coverage_processor == 'cobertura': settings = CoberturaTaskSettings.from_task(self) self._coverage = Cobertura(settings) else: raise TaskError('unknown coverage processor {0}'.format(coverage_processor)) self._tests_to_run = options.test self._batch_size = options.batch_size self._fail_fast = options.fail_fast self._working_dir = options.cwd or get_buildroot() self._strict_jvm_version = options.strict_jvm_version self._args = copy.copy(self.args) self._failure_summary = options.failure_summary if options.output_mode == 'ALL': self._args.append('-output-mode=ALL') elif options.output_mode == 'FAILURE_ONLY': self._args.append('-output-mode=FAILURE_ONLY') else: self._args.append('-output-mode=NONE') if self._fail_fast: self._args.append('-fail-fast') self._args.append('-outdir') self._args.append(self.workdir) if options.per_test_timer: self._args.append('-per-test-timer') # TODO(zundel): Simply remove when --default_parallel finishes deprecation if options.default_parallel: self._args.append('-default-parallel') if options.default_concurrency == junit_tests.CONCURRENCY_PARALLEL_BOTH: self.context.log.warn('--default-concurrency=PARALLEL_BOTH is experimental.') self._args.append('-default-concurrency') self._args.append('PARALLEL_BOTH') elif options.default_concurrency == junit_tests.CONCURRENCY_PARALLEL_CLASSES: self._args.append('-default-concurrency') self._args.append('PARALLEL_CLASSES') elif options.default_concurrency == junit_tests.CONCURRENCY_PARALLEL_METHODS: self.context.log.warn('--default-concurrency=PARALLEL_METHODS is experimental.') self._args.append('-default-concurrency') self._args.append('PARALLEL_METHODS') elif options.default_concurrency == junit_tests.CONCURRENCY_SERIAL: # TODO(zundel): we can't do anything here yet while the --default-parallel # option is in deprecation mode. pass self._args.append('-parallel-threads') self._args.append(str(options.parallel_threads)) if options.test_shard: self._args.append('-test-shard') self._args.append(options.test_shard) if options.use_experimental_runner: self._args.append('-use-experimental-runner')
def __init__(self, *args, **kwargs): super(JUnitRun, self).__init__(*args, **kwargs) options = self.get_options() self._coverage = None if options.coverage or options.is_flagged('coverage_open'): coverage_processor = options.coverage_processor if coverage_processor == 'cobertura': settings = CoberturaTaskSettings.from_task(self) self._coverage = Cobertura(settings) else: raise TaskError('unknown coverage processor {0}'.format( coverage_processor)) self._tests_to_run = options.test self._batch_size = options.batch_size self._fail_fast = options.fail_fast self._working_dir = options.cwd or get_buildroot() self._strict_jvm_version = options.strict_jvm_version self._args = copy.copy(self.args) self._failure_summary = options.failure_summary self._open = options.open self._html_report = self._open or options.html_report if options.output_mode == 'ALL': self._args.append('-output-mode=ALL') elif options.output_mode == 'FAILURE_ONLY': self._args.append('-output-mode=FAILURE_ONLY') else: self._args.append('-output-mode=NONE') if self._fail_fast: self._args.append('-fail-fast') self._args.append('-outdir') self._args.append(self.workdir) if options.per_test_timer: self._args.append('-per-test-timer') if options.default_parallel: # TODO(zundel): Remove when --default_parallel finishes deprecation if options.default_concurrency != junit_tests.CONCURRENCY_SERIAL: self.context.log.warn( '--default-parallel overrides --default-concurrency') self._args.append('-default-concurrency') self._args.append('PARALLEL_CLASSES') else: if options.default_concurrency == junit_tests.CONCURRENCY_PARALLEL_CLASSES_AND_METHODS: if not options.use_experimental_runner: self.context.log.warn( '--default-concurrency=PARALLEL_CLASSES_AND_METHODS is experimental, use --use-experimental-runner.' ) self._args.append('-default-concurrency') self._args.append('PARALLEL_CLASSES_AND_METHODS') elif options.default_concurrency == junit_tests.CONCURRENCY_PARALLEL_METHODS: if not options.use_experimental_runner: self.context.log.warn( '--default-concurrency=PARALLEL_METHODS is experimental, use --use-experimental-runner.' ) if options.test_shard: # NB(zundel): The experimental junit runner doesn't support test sharding natively. The # legacy junit runner allows both methods and classes to run in parallel with this option. self.context.log.warn( '--default-concurrency=PARALLEL_METHODS with test sharding will run classes in parallel too.' ) self._args.append('-default-concurrency') self._args.append('PARALLEL_METHODS') elif options.default_concurrency == junit_tests.CONCURRENCY_PARALLEL_CLASSES: self._args.append('-default-concurrency') self._args.append('PARALLEL_CLASSES') elif options.default_concurrency == junit_tests.CONCURRENCY_SERIAL: self._args.append('-default-concurrency') self._args.append('SERIAL') self._args.append('-parallel-threads') self._args.append(str(options.parallel_threads)) if options.test_shard: self._args.append('-test-shard') self._args.append(options.test_shard) if options.use_experimental_runner: self.context.log.info('Using experimental junit-runner logic.') self._args.append('-use-experimental-runner')