def test_sh_captures_output_in_correct_order_with_fixed_timing(self): """Test if output contains stdout and stderr lines printed out in proper order, while there is a sleep between prints Notice: Test is interacting with shell, to reduce possibility of weird behavior it is retried multiple times """ for i in range(1, 30): self.maxDiff = None # unittest setting task = InitTask() task._io = IO() io = IO() out = StringIO() with io.capture_descriptors(stream=out, enable_standard_out=False): task.sh(''' set +e; sleep 0.05; echo "FIRST"; sleep 0.05; echo "SECOND" >&2; sleep 0.05; echo "THIRD"; ''') self.assertEqual("FIRST\r\nSECOND\r\nTHIRD\r\n", out.getvalue())
def test_sh_captures_output_in_correct_order_with_various_timing(self): """Test if output is containing stdout and stderr lines mixed in proper order (as it is defined in shell script) Notice: Test is interacting with shell, to reduce possibility of weird behavior it is retried multiple times """ for i in range(1, 100): self.maxDiff = None # unittest setting task = InitTask() task._io = IO() io = IO() out = StringIO() with io.capture_descriptors(stream=out, enable_standard_out=False): task.sh(''' set +e; sleep 0.05; echo "FIRST"; sleep 0.05; echo "SECOND" >&2; echo "THIRD"; echo "FOURTH" >&2; echo "FIFTH" >&2; echo "SIXTH"; echo "SEVENTH" >&2; echo "NINETH"; echo "TENTH"; ''') self.assertEqual( "FIRST\r\nSECOND\r\nTHIRD\r\nFOURTH\r\nFIFTH\r\nSIXTH\r\nSEVENTH\r\nNINETH\r\nTENTH\r\n", out.getvalue())
def test_full_command_is_shown_only_in_debug_output_level(self): """Test that sh() will show full bash script only in case, when '-rl debug' is used :return: """ task = InitTask() task._io = IO() io = IO() out = StringIO() with io.capture_descriptors(stream=out, enable_standard_out=False): # CASE 1 with self.subTest('NORMAL output level'): try: task.sh('python3 -m rkd :sh -c "exit 5"') except subprocess.CalledProcessError as e: self.assertIn( "Command 'exit 5' returned non-zero exit status 5.", e.output) # CASE 2 with self.subTest('DEBUG output level'): try: task.sh('python3 -m rkd -rl debug :sh -c "exit 5"') except subprocess.CalledProcessError as e: self.assertIn( "Command '#!/bin/bash -eopipefail \r\nset -euo pipefail;" + " exit 5' returned non-zero exit status 5.", e.output)
def run_and_capture_output(self, argv: list, verbose: bool = False) -> Tuple[str, int]: """ Run task(s) and capture output + exit code. Whole RKD from scratch will be bootstrapped there. Example usage: full_output, exit_code = self.run_and_capture_output([':tasks']) :param list argv: List of tasks, arguments, commandline switches :param bool verbose: Print all output also to stdout :return: """ io = IO() out = StringIO() exit_code = 0 try: with io.capture_descriptors(stream=out, enable_standard_out=verbose): app = RiotKitDoApplication() app.main(['test_functional.py'] + argv) except SystemExit as e: self._restore_standard_out() exit_code = e.code return out.getvalue(), exit_code
def test_sh_producing_large_outputs(self): """Process a few megabytes of output and assert that: - It will consume not more than 10 megabytes (assuming also output capturing in tests by io.capture_descriptors()) - The whole output would be printed correctly """ self.maxDiff = None # unittest setting task = InitTask() task._io = IO() io = IO() out = StringIO() text = "History isn't made by kings and politicians, it is made by us." memory_before = psutil.Process( os.getpid()).memory_info().rss / 1024 / 1024 with io.capture_descriptors(stream=out, enable_standard_out=False): task.py(''' for i in range(0, 1024 * 128): print("''' + text + '''") ''') iterations = 1024 * 128 text_with_newlines_length = len(text) + 2 # \r + \n memory_after = psutil.Process( os.getpid()).memory_info().rss / 1024 / 1024 self.assertEqual(iterations * text_with_newlines_length, len(out.getvalue())) self.assertLessEqual( memory_after - memory_before, 16, msg='Expected less than 16 megabytes of memory usage')
def from_config(cls, name: str, config: dict, io: IO) -> 'ConfiguredCheck': quiet_periods = config.get('quiet_periods', []) if not isinstance(quiet_periods, list): raise ConfigurationException.from_quiet_periods_should_be_a_list_error( ) if quiet_periods: for period in quiet_periods: period: dict if "starts" not in period or "duration" not in period: raise ConfigurationException.from_quiet_periods_invalid_structure( ) # cache life time is disabled if "results_cache_time" not in config or not config.get( 'results_cache_time'): io.debug('results_cache_time not configured for {}'.format(name)) return cls(name=name, check_type=config.get('type'), description=config.get('description', ''), input_variables=config.get('input', {}), hooks=config.get('hooks', {}), quiet_periods=quiet_periods, results_cache_time=int(config.get('results_cache_time')) if "results_cache_time" in config else None, io=io)
def test_functional_hooks_are_executed_when_exists_and_files_with_extension_only_are_skipped( self): """Given we have an example hooks in pre-upgrade/whoami.sh and in post-upgrade/history.sh And we try to run those hooks using hooks_executed() Then we will see output produced by those scripts And .dotfiles will be ignored """ self._prepare_test_data() buffer = StringIO() hooks_capturing_io = IO() task = TestTask() task._io = BufferedSystemIO() ctx = ExecutionContext(TaskDeclaration(task), args={}, env={}) with hooks_capturing_io.capture_descriptors(stream=buffer, enable_standard_out=True): with task.hooks_executed(ctx, 'upgrade'): pass self.assertIn('>> This is a whoami.sh hook, test:', buffer.getvalue(), msg='Expected pre-upgrade hook to be ran') self.assertIn('25 June 1978 the rainbow flag was first flown', buffer.getvalue(), msg='Expected post-upgrade hook to be ran') self.assertIn('pre-upgrade/whoami.sh', task._io.get_value()) self.assertNotIn('.gitkeep', task._io.get_value())
def test_one_failed_step_is_preventing_next_steps_from_execution_and_result_is_marked_as_failure(self): """Check the correctness of error handling""" io = IO() str_io = StringIO() buffered = BufferedSystemIO() task_declaration = get_test_declaration() BasicTestingCase.satisfy_task_dependencies(task_declaration.get_task_to_execute(), io=buffered) ctx = ExecutionContext(task_declaration) executor = DeclarativeExecutor() executor.add_step('python', 'this.io().outln("Peter Kropotkin"); return True', task_name=':first', rkd_path='', envs={}) executor.add_step('bash', 'echo "Buenaventura Durruti"; exit 1', task_name=':second', rkd_path='', envs={}) executor.add_step('python', 'this.io().outln("This one will not show"); return True', task_name=':third', rkd_path='', envs={}) with io.capture_descriptors(target_files=[], stream=str_io, enable_standard_out=False): final_result = executor.execute_steps_one_by_one(ctx, task_declaration.get_task_to_execute()) output = str_io.getvalue() + buffered.get_value() self.assertIn('Peter Kropotkin', output) self.assertIn('Buenaventura Durruti', output) self.assertNotIn('This one will not show', output) self.assertEqual(False, final_result)
def test_sh_rkd_in_rkd_shows_first_lines_on_error(self): """Bugfix: sh() was loosing first line(s) of output, when exception was raised Notice: Test is interacting with shell, to reduce possibility of weird behavior it is retried multiple times """ for i in range(1, 5): for std_redirect in ['', '>&2']: task = InitTask() task._io = IO() io = IO() out = StringIO() with io.capture_descriptors(stream=out, enable_standard_out=False): try: task.sh(''' python3 -m rkd --silent :sh -c 'echo "Bartolomeo Vanzetti" ''' + std_redirect + '''; exit 127' ''') except subprocess.CalledProcessError: pass self.assertIn( 'Bartolomeo Vanzetti', out.getvalue(), msg='Expected that output will be shown for std_redirect=%s' % std_redirect)
def test_bash_case_verify_env_variables_are_present(self): io = IO() out = StringIO() with io.capture_descriptors(stream=out, enable_standard_out=False): self._create_callable_tester('echo "Boolean: ${ARG_TEST}, Text: ${ARG_MESSAGE}"', language='bash') self.assertIn('ARG_TEST: unbound variable', out.getvalue())
def test_get_log_level_raises_exception_on_unset_level(self): """Check DEFAULT error level and validation of not set error logging""" io = IO() self.assertEqual('info', io.get_log_level()) io.log_level = None self.assertRaises(Exception, lambda: io.get_log_level())
def _create_runner() -> Runner: dirs = [ TESTS_PATH + '/../example/healthchecks', TESTS_PATH + '/infracheck/' ] return Runner(dirs, config_loader=ConfigLoader(dirs, IO()), repository=Repository(dirs), io=IO())
def test_quotes_are_escaped_in_shell_commands(self): task = InitTask() task._io = IO() io = IO() out = StringIO() with io.capture_descriptors(stream=out, enable_standard_out=False): task.sh('echo ${NAME}', env={'NAME': 'Ferdinando "Nicola" Sacco'}) self.assertIn('Ferdinando "Nicola" Sacco', out.getvalue())
def test_inherit_silent(self): """Silent mode inheritance from SystemIO""" sys_io = SystemIO() sys_io.silent = True io = IO() io.inherit_silent(sys_io) self.assertTrue(io.is_silent())
def execute_mocked_task_and_get_output(self, task: TaskInterface, args=None, env=None) -> str: """ Run a single task, capturing it's output in a simplified way. There is no whole RKD bootstrapped in this operation. :param TaskInterface task: :param dict args: :param dict env: :return: """ if args is None: args = {} if env is None: env = {} ctx = ApplicationContext([], [], '') ctx.io = BufferedSystemIO() task.internal_inject_dependencies( io=ctx.io, ctx=ctx, executor=OneByOneTaskExecutor(ctx=ctx), temp_manager=TempManager()) merged_env = deepcopy(os.environ) merged_env.update(env) r_io = IO() str_io = StringIO() defined_args = {} for arg, arg_value in args.items(): defined_args[arg] = {'default': ''} with r_io.capture_descriptors(enable_standard_out=True, stream=str_io): try: # noinspection PyTypeChecker result = task.execute( ExecutionContext(TaskDeclaration(task), args=args, env=merged_env, defined_args=defined_args)) except Exception: self._restore_standard_out() print(ctx.io.get_value() + "\n" + str_io.getvalue()) raise return ctx.io.get_value() + "\n" + str_io.getvalue( ) + "\nTASK_EXIT_RESULT=" + str(result)
def test_bash_successful_case(self): """ Bash callable test: Successful case """ io = IO() out = StringIO() with io.capture_descriptors(stream=out, enable_standard_out=False): self._create_callable_tester('python --version', language='bash') self.assertIn("Python", out.getvalue()) self.assertTrue(out.getvalue(), msg='python --version should result with a True')
def test_ps(self): """Simply check if docker-compose ps can be executed, if the standard switches are correct""" io_str = StringIO() io = IO() with io.capture_descriptors(stream=io_str, enable_standard_out=False): drv = self._get_prepared_compose_driver() drv.ps([]) self.assertIn('Name', io_str.getvalue()) self.assertIn('Ports', io_str.getvalue())
def _notify_hooks(hooks: dict, exit_status: bool, io: IO) -> str: mapping = {True: 'on_each_up', False: 'on_each_down'} out = "" if exit_status in mapping and mapping[exit_status] in hooks: commands = hooks[mapping[exit_status]] if type(commands).__name__ != 'list': raise RunnerException.from_expected_list_of_hooks( mapping[exit_status]) for command in commands: io.debug('Triggering hook command "{}"'.format(command)) try: out += subprocess.check_output( command, shell=True, timeout=1800).decode('utf-8').strip() except subprocess.CalledProcessError as e: io.error( 'Cannot execute hook command "{cmd}". Error: {err}'. format(cmd=command, err=str(e.output) + str(e.stderr))) except subprocess.TimeoutExpired: io.error( 'Cannot execute hook command "{cmd}. Timed out while executing command"' .format(cmd=command)) except Exception: io.error( 'Cannot execute hook command "{cmd}. Unknown error"'. format(cmd=command)) return out
def test_io_output_processing_does_not_break_on_exception_in_processing_method_when_error_level_is_not_debug( self): """ Verify error handling - when level is not "debug", then no any error should be present from processors because we cannot mess with the I/O that is written to the console """ mocked_output = [] io = IO() io.set_log_level('info') io._stderr = io._stdout = lambda txt: mocked_output.append(txt) def processor_that_raises_exceptions(txt, origin): raise Exception('Hello') io.add_output_processor(processor_that_raises_exceptions) io.info( '26 Jan 1932 4000 mainly Jewish tenants in New York attacked police reserve forces who were trying ' + 'to evict 17 tenants. The mob was led by women on rooftops who directed the action with megaphones ' + 'and hurled missiles at police.') self.assertIn('were trying to evict 17 tenants', str(mocked_output))
def test_io_output_processing_is_raising_exception_when_invalid_type_returned_in_debug_mode( self): """ Error handling - when level is "debug", then we should be raising exceptions Variant: returned invalid type (not a STR - returned INT) """ mocked_output = [] io = IO() io.set_log_level('debug') io._stderr = io._stdout = lambda txt: mocked_output.append(txt) def processor_that_raises_exceptions(txt, origin): return 123456 # noinspection PyTypeChecker io.add_output_processor(processor_that_raises_exceptions) with self.assertRaises(Exception): io.info( 'Face the facts, no thanks, "Your passport lacks stamps Please go back for war, ' + 'torture and the death camps" Join the ranks, labeled as illegal people, Cursed by those who ' + 'suck blood from golden calf’s nipple')
def test_io_capturing_is_restoring_both_stdout_and_stderr_to_previous_state( self): """Assert that capture_descriptors() restores sys.stdout and sys.stderr to original state after mocking them for output capturing""" io = IO() stdout_backup = sys.stdout stderr_backup = sys.stderr with io.capture_descriptors(target_files=None): pass self.assertEqual(stdout_backup, sys.stdout) self.assertEqual(stderr_backup, sys.stderr)
def test_creates_grouped_arguments_into_tasks__no_task_defined_goes_to_rkd_initialization( self): parsed = CommandlineParsingHelper(IO()).create_grouped_arguments( ['--help']) self.assertEqual("[TaskCall<rkd:initialize (['--help'])>]", str(parsed[0].tasks()))
def test_load_does_not_find_file(self): loader = ConfigLoader( [TESTS_PATH + '/example/healthchecks', TESTS_PATH + '/infracheck'], IO()) self.assertRaises(FileNotFoundError, lambda: loader.load('not-existing'))
def run_check(check_type: str, input_data: dict, hooks: dict) -> ExecutedCheckResult: project_dirs = [path + '/../../example/healthchecks', path + '/../infracheck/'] runner = Runner(project_dirs, config_loader=ConfigLoader(project_dirs, IO()), repository=Repository(project_dirs), io=IO()) return runner.run_single_check(ConfiguredCheck.from_config( 'example-check', { 'type': check_type, 'input': input_data, 'hooks': hooks }, IO() ))
def test_parse_env_preserves_variables_order(self): """Make sure that the environment variables are loaded in order they were defined """ yaml_content = ''' environment: FIRST: "Jolanta Brzeska" SECOND: "Maxwell Itoya" THIRD: "August Spies" FOURTH: "Samuel Fielden" ''' expected_order = [ "Jolanta Brzeska", "Maxwell Itoya", "August Spies", "Samuel Fielden" ] for i in range(1, 10000): parsed = yaml.load(yaml_content, yaml.FullLoader) io = IO() factory = YamlSyntaxInterpreter(io, YamlFileLoader([])) envs = factory.parse_env(parsed, 'makefile.yaml') names_in_order = [] for env_name, value in envs.items(): names_in_order.append(env_name) self.assertEqual(expected_order, list(envs.values()))
def test_resolves_aliased_task(self): """Checks 'alias groups' feature about to resolve some group name to other group name Example: :bella-ciao:sh -> :sh """ context = ApplicationContext( tasks=[TaskDeclaration(ShellCommandTask())], aliases=[], directory='') context.io = IO() context.compile() result_tasks = [] def assertion_callback(declaration: TaskDeclaration, task_num: int, parent: Union[GroupDeclaration, None] = None, args: list = []): result_tasks.append(declaration.to_full_name()) resolver = TaskResolver(context, parse_alias_groups_from_env(':bella-ciao->')) resolver.resolve([ ArgumentBlock([':bella-ciao:sh']).clone_with_tasks( [TaskArguments(':bella-ciao:sh', [])]) ], assertion_callback) self.assertEqual([':sh'], result_tasks)
def test_environment_is_passed_and_system_environment_still_available( self) -> None: os.environ['COMING_FROM_PARENT_CONTEXT'] = 'Buenaventura Durruti' io = IO() out = StringIO() try: with io.capture_descriptors(stream=out, enable_standard_out=False): check_call('env', env={'PROTEST_TYPE': 'Sabotage'}) finally: del os.environ['COMING_FROM_PARENT_CONTEXT'] self.assertIn('PROTEST_TYPE=Sabotage', out.getvalue()) self.assertIn('COMING_FROM_PARENT_CONTEXT=Buenaventura Durruti', out.getvalue())
def test_factory_checks_quiet_period_type(self): with self.assertRaises(ConfigurationException) as exc: ConfiguredCheck.from_config(name='test', config={'quiet_periods': ''}, io=IO()) self.assertEqual('"quiet_periods" should be a list', str(exc.exception))
def test_waits_for_output_that_shows_immediately(self): io = IO() signal = execute_app( WaitForOutputApp(container='', command='/bin/bash -c "echo hello"', pattern='hello', timeout=10, io=io) ) self.assertEqual(0, signal.exit_code) self.assertEqual('Match found', signal.message)
def test_find_matching_services_by_names(self): io = IO() selector = ServiceSelector('name.startswith("web")', io) names = list(map(lambda service: service.get_name(), selector.find_matching_services(self._provide_test_data()))) self.assertEqual(['web_abc_international', 'web_phillyabc', 'web_iwa_ait'], names)