def __call__(self, args, executor: CommandExecutor): target_dir = self.fs.existing_dir(os.sep.join(['.', 'jmake_src', 'target'])) def investigate_builds(log): builds_to_compare = None if not args.compare_builds else args.compare_builds.split(',') if builds_to_compare is not None and len(builds_to_compare) != 2: log.error('Argument compare_builds should be in format: BN-FIRST,BN-SECOND') log.error('Found: %s' % args.compare_builds) return Callable.failure ec, hits_dir_before = self.get_hits_for_build(log, target_dir, builds_to_compare[0]) if ec != Callable.success: return ec ec, hits_dir_current = self.get_hits_for_build(log, target_dir, builds_to_compare[1]) if ec != Callable.success: return ec metrics_to_compare = None if args.metrics is None else args.metrics.split(',') self.perform_diff(log, hits_dir_current, hits_dir_before, metrics_to_compare) return Callable.success def investigate_local(log): failed_metrics_file = os.sep.join(['target', '.jmake.eh-metrics.failed-metrics.txt']) if not self.fs.file_exists(failed_metrics_file): log.warn('The file %s doesn\'t exists. Did you run eh-metrics? Has it failed?' % failed_metrics_file) if (not any([args.metrics, args.build_number, args.compare_builds])) and self.fs.file_exists(failed_metrics_file): commit_hash_unused, metrics_string, build_number_before = self.fs.read_lines(failed_metrics_file)[0].split(':') metrics_to_compare = metrics_string.split(',') else: metrics_to_compare = None if args.metrics is None else args.metrics.split(',') build_number_before = args.build_number if build_number_before is None: log.error('I don\'t know the build number to compare with, sorry :(. ' 'Did you run eh-metrics? Has it failed? You can always give me a build ' 'number using --build-number. But this message is unlikely to appear.') return Callable.failure hits_dir_current = os.sep.join([target_dir, 'eh-metrics-hits']) ec, hits_dir_before = self.get_hits_for_build(log, target_dir, build_number_before) if ec != Callable.success: return ec if not self.fs.dir_exists(hits_dir_current): log.error('Could not find current eh-metrics hits, did you run ./jmake eh-metrics?') return Callable.failure self.perform_diff(log, hits_dir_current, hits_dir_before, metrics_to_compare) return Callable.success if args.compare_builds is not None: executor.append(investigate_builds) else: executor.append(investigate_local)
class CommandExecutorTest(TestCase): def void(self, ret_code): return lambda logger: ret_code def push(self, i, ret=0): return lambda logger: self.execs.append(i) or ret def setUp(self): self.executor = CommandExecutor().set_logger(Logger().set_none()) self.executor.perform_console_reset = False self.execs = [] def test_empty_executables(self): self.executor.execute() def test_executables_should_be_executed_in_order(self): self.executor.append(self.push(1)) self.executor.append(self.push(2)) self.executor.append(self.push(3)) self.executor.append(self.push(4)) self.executor.append(self.push(5)) execution_ret = self.executor.execute() self.assertEqual(Callable.success, execution_ret) self.assertListEqual([1, 2, 3, 4, 5], self.execs) def post_execution_test_with_return_code(self, ret_code): self.executor.append(self.void(Callable.success)) self.executor.append(self.push(1, ret_code)) self.executor.append_post(self.push(7, Callable.success)) execution_ret = self.executor.execute() self.assertEqual(ret_code, execution_ret) self.assertListEqual([1, 7], self.execs) def test_post_execution_should_happen_when_zero_return_code(self): self.post_execution_test_with_return_code(Callable.success) def test_post_execution_should_happen_when_non_zero_return_code(self): self.post_execution_test_with_return_code(1) def test_post_execution_should_happen_always_do_not_proceed_return_code( self): self.post_execution_test_with_return_code(Callable.do_not_proceed) def test_execution_should_stop_on_error(self): self.executor.append(self.push(1, Callable.success)) self.executor.append(self.push(2, 1)) self.executor.append(self.push(3, Callable.success)) self.executor.append(self.push(4, 1)) self.executor.append(self.push(5, Callable.success)) self.executor.append_post(self.push(6, Callable.success)) self.executor.append_post(self.push(8, Callable.success)) self.executor.append_post(self.push(10, 1)) self.executor.append_post(self.push(12, Callable.success)) execution_ret = self.executor.execute() self.assertEqual(1, execution_ret) self.assertListEqual([1, 2, 6, 8, 10], self.execs) def test_execution_should_stop_on_do_not_proceed(self): self.executor.append_post(self.push(4, Callable.success)) self.executor.append_post(self.push(5, Callable.do_not_proceed)) self.executor.append_post(self.push(6, Callable.success)) self.executor.append(self.push(1, Callable.success)) self.executor.append(self.push(2, Callable.do_not_proceed)) self.executor.append(self.push(3, Callable.success)) execution_ret = self.executor.execute() self.assertEqual(Callable.do_not_proceed, execution_ret) self.assertListEqual([1, 2, 4, 5], self.execs)
def __call__(self, args, executor: CommandExecutor): target_dir = self.fs.existing_dir( os.sep.join(['.', 'jmake_src', 'target'])) def investigate_builds(log): builds_to_compare = None if not args.compare_builds else args.compare_builds.split( ',') if builds_to_compare is not None and len(builds_to_compare) != 2: log.error( 'Argument compare_builds should be in format: BN-FIRST,BN-SECOND' ) log.error('Found: %s' % args.compare_builds) return Callable.failure ec, hits_dir_before = self.get_hits_for_build( log, target_dir, builds_to_compare[0]) if ec != Callable.success: return ec ec, hits_dir_current = self.get_hits_for_build( log, target_dir, builds_to_compare[1]) if ec != Callable.success: return ec metrics_to_compare = None if args.metrics is None else args.metrics.split( ',') self.perform_diff(log, hits_dir_current, hits_dir_before, metrics_to_compare) return Callable.success def investigate_local(log): failed_metrics_file = os.sep.join( ['target', '.jmake.eh-metrics.failed-metrics.txt']) if not self.fs.file_exists(failed_metrics_file): log.warn( 'The file %s doesn\'t exists. Did you run eh-metrics? Has it failed?' % failed_metrics_file) if (not any([args.metrics, args.build_number, args.compare_builds ])) and self.fs.file_exists(failed_metrics_file): commit_hash_unused, metrics_string, build_number_before = self.fs.read_lines( failed_metrics_file)[0].split(':') metrics_to_compare = metrics_string.split(',') else: metrics_to_compare = None if args.metrics is None else args.metrics.split( ',') build_number_before = args.build_number if build_number_before is None: log.error( 'I don\'t know the build number to compare with, sorry :(. ' 'Did you run eh-metrics? Has it failed? You can always give me a build ' 'number using --build-number. But this message is unlikely to appear.' ) return Callable.failure hits_dir_current = os.sep.join([target_dir, 'eh-metrics-hits']) ec, hits_dir_before = self.get_hits_for_build( log, target_dir, build_number_before) if ec != Callable.success: return ec if not self.fs.dir_exists(hits_dir_current): log.error( 'Could not find current eh-metrics hits, did you run ./jmake eh-metrics?' ) return Callable.failure self.perform_diff(log, hits_dir_current, hits_dir_before, metrics_to_compare) return Callable.success if args.compare_builds is not None: executor.append(investigate_builds) else: executor.append(investigate_local)
class CommandExecutorTest(TestCase): def void(self, ret_code): return lambda logger: ret_code def push(self, i, ret=0): return lambda logger: self.execs.append(i) or ret def setUp(self): self.executor = CommandExecutor().set_logger(Logger().set_none()) self.executor.perform_console_reset = False self.execs = [] def test_empty_executables(self): self.executor.execute() def test_executables_should_be_executed_in_order(self): self.executor.append(self.push(1)) self.executor.append(self.push(2)) self.executor.append(self.push(3)) self.executor.append(self.push(4)) self.executor.append(self.push(5)) execution_ret = self.executor.execute() self.assertEqual(Callable.success, execution_ret) self.assertListEqual([1, 2, 3, 4, 5], self.execs) def post_execution_test_with_return_code(self, ret_code): self.executor.append(self.void(Callable.success)) self.executor.append(self.push(1, ret_code)) self.executor.append_post(self.push(7, Callable.success)) execution_ret = self.executor.execute() self.assertEqual(ret_code, execution_ret) self.assertListEqual([1, 7], self.execs) def test_post_execution_should_happen_when_zero_return_code(self): self.post_execution_test_with_return_code(Callable.success) def test_post_execution_should_happen_when_non_zero_return_code(self): self.post_execution_test_with_return_code(1) def test_post_execution_should_happen_always_do_not_proceed_return_code(self): self.post_execution_test_with_return_code(Callable.do_not_proceed) def test_execution_should_stop_on_error(self): self.executor.append(self.push(1, Callable.success)) self.executor.append(self.push(2, 1)) self.executor.append(self.push(3, Callable.success)) self.executor.append(self.push(4, 1)) self.executor.append(self.push(5, Callable.success)) self.executor.append_post(self.push(6, Callable.success)) self.executor.append_post(self.push(8, Callable.success)) self.executor.append_post(self.push(10, 1)) self.executor.append_post(self.push(12, Callable.success)) execution_ret = self.executor.execute() self.assertEqual(1, execution_ret) self.assertListEqual([1, 2, 6, 8, 10], self.execs) def test_execution_should_stop_on_do_not_proceed(self): self.executor.append_post(self.push(4, Callable.success)) self.executor.append_post(self.push(5, Callable.do_not_proceed)) self.executor.append_post(self.push(6, Callable.success)) self.executor.append(self.push(1, Callable.success)) self.executor.append(self.push(2, Callable.do_not_proceed)) self.executor.append(self.push(3, Callable.success)) execution_ret = self.executor.execute() self.assertEqual(Callable.do_not_proceed, execution_ret) self.assertListEqual([1, 2, 4, 5], self.execs)
def __call__(self, args, executor: CommandExecutor): def check_remotes(log): if len(self.git.get_remotes()) == 0: self.set_remote(log) return Callable.success executor.append(check_remotes) if not args.fast: executor.append( lambda log: Callable.success if self.git.fetch_notes("*") == 0 else log.error("FATAL: git: Failure to fetch notes from origin.") or Callable.do_not_proceed ) if args.branch: def branch_check(logger): current_branch = self.git.current_branch() if not current_branch == args.branch: logger.error( 'Branch check failed. You seem to be on "%s"; switch to "%s" first!' % (current_branch, args.branch) ) return Callable.do_not_proceed else: return Callable.success executor.append(branch_check) def check_workspace(log: Logger): if args.note or not args.non_interactive: if not self.git.is_clean_workspace(): if args.note: log.error( "I cannot write notes with local changes. Commit your work first, so that notes can " "be attached to your commit." ) return Callable.do_not_proceed else: log.warn( "You have uncommitted changes - if engineering health metrics are increased, you will " "not be able to add an exclusion note for the build." ) return Callable.success executor.append(check_workspace) def clean_logs(log: Logger): if self.fs.dir_exists(MetricsCollector.log_directory): log.debug("Removing directory: %s" % MetricsCollector.log_directory) self.fs.remove_dir(MetricsCollector.log_directory) return Callable.success executor.append(clean_logs) def record_commit(log: Logger): self.fs.write_lines( os.sep.join([self.fs.existing_dir(MetricsCollector.log_directory), ".commit"]), [self.git.current_commit()], ) return Callable.success executor.append(record_commit) metrics = DataBean() modules_descriptions = [ JIRADirectoryScanModulesDescription(args.fast, file_utils=self.fs), BundledPluginsModulesDescription(args.fast), JIRATestsModulesDescription(args.fast), ] executor.append(self.metrics_processor.process_metrics(args, modules_descriptions, metrics)) executor.append(self.metrics_processor.generate_report(metrics, self.fs, self.git)) executor.append(self.metrics_processor.check_values(args, metrics, self.git, self.fs)) if args.note: executor.append(lambda log: self.git.set_user("jmake stats runner", "*****@*****.**")) executor.append( lambda log: self.git.put_notes(self.json_writer.as_str(metrics), STATS_REF_NAME, "HEAD", True) ) executor.append(lambda log: self.git.push_notes(STATS_REF_NAME))
def __call__(self, args, executor: CommandExecutor): def check_remotes(log): if len(self.git.get_remotes()) == 0: self.set_remote(log) return Callable.success executor.append(check_remotes) if not args.fast: executor.append( lambda log: Callable.success if self.git.fetch_notes('*') == 0 else log.error( 'FATAL: git: Failure to fetch notes from origin.') or Callable.do_not_proceed) if args.branch: def branch_check(logger): current_branch = self.git.current_branch() if not current_branch == args.branch: logger.error( 'Branch check failed. You seem to be on "%s"; switch to "%s" first!' % (current_branch, args.branch)) return Callable.do_not_proceed else: return Callable.success executor.append(branch_check) def check_workspace(log: Logger): if args.note or not args.non_interactive: if not self.git.is_clean_workspace(): if args.note: log.error( 'I cannot write notes with local changes. Commit your work first, so that notes can ' 'be attached to your commit.') return Callable.do_not_proceed else: log.warn( 'You have uncommitted changes - if engineering health metrics are increased, you will ' 'not be able to add an exclusion note for the build.' ) return Callable.success executor.append(check_workspace) def clean_logs(log: Logger): if self.fs.dir_exists(MetricsCollector.log_directory): log.debug('Removing directory: %s' % MetricsCollector.log_directory) self.fs.remove_dir(MetricsCollector.log_directory) return Callable.success executor.append(clean_logs) def record_commit(log: Logger): self.fs.write_lines( os.sep.join([ self.fs.existing_dir(MetricsCollector.log_directory), '.commit' ]), [self.git.current_commit()]) return Callable.success executor.append(record_commit) metrics = DataBean() modules_descriptions = [ JIRADirectoryScanModulesDescription(args.fast, file_utils=self.fs), BundledPluginsModulesDescription(args.fast), JIRATestsModulesDescription(args.fast) ] executor.append( self.metrics_processor.process_metrics(args, modules_descriptions, metrics)) executor.append( self.metrics_processor.generate_report(metrics, self.fs, self.git)) executor.append( self.metrics_processor.check_values(args, metrics, self.git, self.fs)) if args.note: executor.append(lambda log: self.git.set_user( 'jmake stats runner', '*****@*****.**')) executor.append(lambda log: self.git.put_notes( self.json_writer.as_str(metrics), STATS_REF_NAME, 'HEAD', True) ) executor.append(lambda log: self.git.push_notes(STATS_REF_NAME))