def test_passed(self, m_boto): cwd = os.getcwd() try: config_path = Path( build_test_case("/tmp/lint_test_output/", test_cases[0])).resolve() project_root = config_path.parent.parent config = Config.create(project_config_path=config_path, project_root=project_root) templates = config.get_templates() lint = Lint(config=config, templates=templates) self.assertEqual(lint.passed, True) lint_key = list(lint.lints[0])[0] result_key = list(lint.lints[0][lint_key]["results"])[0] test = lint.lints[0][lint_key]["results"][result_key] rule = mock.Mock(return_val="[E0001] some error") rule.rule.id = "E0001" rule.linenumber = 123 rule.rule.shortdesc = "short error" rule.message = "some error" test.append(rule) lint.strict = True self.assertEqual(lint.passed, False) finally: shutil.rmtree("/tmp/lint_test_output/") os.chdir(cwd) pass
def test_passed(self): cwd = os.getcwd() try: lint = Lint( config=Config( project_config_path=str( build_test_case("/tmp/lint_test_output/", test_cases[0]) ), project_root="../", create_clients=False, ) ) self.assertEqual(lint.passed, True) lint_key = [t for t in lint.lints[0]][0] result_key = [t for t in lint.lints[0][lint_key]["results"]][0] test = lint.lints[0][lint_key]["results"][result_key] rule = mock.Mock(return_val="[E0001] some error") rule.rule.id = "E0001" rule.linenumber = 123 rule.rule.shortdesc = "short error" rule.message = "some error" test.append(rule) lint.strict = True self.assertEqual(lint.passed, False) finally: shutil.rmtree("/tmp/lint_test_output/") os.chdir(cwd) pass
def run(self) -> None: """Deploys the required Test resources in AWS. Raises: TaskCatException: If skip_upload is set without specifying s3_bucket in config. TaskCatException: If linting fails with errors. """ _trim_regions(self.regions, self.config) _trim_tests(self.test_names, self.config) boto3_cache = Boto3Cache() templates = self.config.get_templates() if self.skip_upload and not self.config.config.project.s3_bucket: raise TaskCatException( "cannot skip_buckets without specifying s3_bucket in config") buckets = self.config.get_buckets(boto3_cache) if not self.skip_upload: # 1. lint if not self.lint_disable: lint = TaskCatLint(self.config, templates) errors = lint.lints[1] lint.output_results() if errors or not lint.passed: raise TaskCatException("Lint failed with errors") # 2. build lambdas if self.config.config.project.package_lambda: LambdaBuild(self.config, self.config.project_root) # 3. s3 sync stage_in_s3(buckets, self.config.config.project.name, self.config.project_root) regions = self.config.get_regions(boto3_cache) parameters = self.config.get_rendered_parameters( buckets, regions, templates) tests = self.config.get_tests(templates, regions, buckets, parameters) # pre-hooks execute_hooks("prehooks", self.config, tests, parameters) self.test_definition = Stacker( self.config.config.project.name, tests, shorten_stack_name=self.config.config.project.shorten_stack_name, tags=self._extra_tags, ) self.test_definition.create_stacks() # post-hooks # TODO: pass in outputs, once there is a standard interface for a test_definition execute_hooks("posthooks", self.config, tests, parameters) self.printer.report_test_progress(stacker=self.test_definition) self.passed = True self.result = self.test_definition.stacks
def test_filter_unsupported_regions(self): mock_cf = mock.MagicMock() regions = [ AWSRegionObject(region_name="us-east-1", client_factory=mock_cf), AWSRegionObject(region_name="us-east-2", client_factory=mock_cf), AWSRegionObject(region_name="eu-central-1", client_factory=mock_cf), ] supported = Lint._filter_unsupported_regions(regions) expected = [r.name for r in regions] self.assertCountEqual(supported, expected) supported = Lint._filter_unsupported_regions( regions + [AWSRegionObject(region_name="non-exist-1", client_factory=mock_cf)] ) self.assertCountEqual(supported, expected)
def test_output_results(self, m_boto, mock_log_error, mock_log_warning, mock_log_info): cwd = os.getcwd() try: config_path = Path( build_test_case("/tmp/lint_test_output/", test_cases[0])).resolve() project_root = config_path.parent.parent config = Config.create(project_config_path=config_path, project_root=project_root) templates = config.get_templates() lint = Lint(config=config, templates=templates) lint.output_results() self.assertTrue(mock_log_info.call_args[0][0].startswith( f"Linting passed for file: {str(templates['test1'].template_path)}" )) self.assertEqual(mock_log_error.called, False) self.assertEqual(mock_log_warning.called, False) mock_log_info.reset_mock() lint_key = list(lint.lints[0])[0] result_key = list(lint.lints[0][lint_key]["results"])[0] test = lint.lints[0][lint_key]["results"][result_key] rule = mock.Mock(return_val="[W0001] some warning") rule.rule.id = "W0001" rule.linenumber = 123 rule.rule.shortdesc = "short warning" rule.message = "some warning" test.append(rule) lint.output_results() self.assertTrue(mock_log_warning.call_args_list[1][0][0].startswith( f"Linting detected issues in: {str(templates['test1'].template_path)}" )) mock_log_warning.assert_has_calls([ mock.call(" line 123 [0001] [short warning] some warning") ]) self.assertEqual(mock_log_info.called, False) self.assertEqual(mock_log_error.called, False) mock_log_warning.reset_mock() test.pop(0) rule = mock.Mock(return_val="[E0001] some error") rule.rule.id = "E0001" rule.linenumber = 123 rule.rule.shortdesc = "short error" rule.message = "some error" test.append(rule) lint.output_results() self.assertTrue(mock_log_warning.call_args[0][0].startswith( f"Linting detected issues in: {str(templates['test1'].template_path)}" )) mock_log_error.assert_called_once_with( " line 123 [0001] [short error] some error") self.assertEqual(mock_log_info.called, False) finally: shutil.rmtree("/tmp/lint_test_output/") os.chdir(cwd) pass
def __init__(self, input_file: str, project_root: str = "./", strict: bool = False): """ :param input_file: path to project config or CloudFormation template :param project_root: base path for project :param strict: fail on lint warnings as well as errors """ config = Config( project_config_path=input_file, project_root=project_root, create_clients=False, ) lint = TaskCatLint(config, strict) errors = lint.lints[1] lint.output_results() if errors or not lint.passed: raise TaskCatException("Lint failed with errors")
def __init__( self, input_file: str = ".taskcat.yml", project_root: str = "./", strict: bool = False, ): """ :param input_file: path to project config or CloudFormation template :param project_root: base path for project :param strict: fail on lint warnings as well as errors """ project_root_path: Path = Path(project_root).expanduser().resolve() input_file_path: Path = project_root_path / input_file config = Config.create(project_root=project_root_path, project_config_path=input_file_path) templates = config.get_templates() lint = TaskCatLint(config, templates, strict) errors = lint.lints[1] lint.output_results() if errors or not lint.passed: raise TaskCatException("Lint failed with errors")
def test_lint(self, m_boto): cwd = os.getcwd() base_path = "/tmp/lint_test/" mkdir(base_path) try: for test_case in test_cases: config_path = Path(build_test_case(base_path, test_case)).resolve() project_root = config_path.parent.parent config = Config.create(project_config_path=config_path, project_root=project_root) templates = config.get_templates() lint = Lint(config=config, templates=templates) self.assertEqual(test_case["expected_lints"], flatten_rule(lint.lints[0])) finally: shutil.rmtree(base_path) os.chdir(cwd) pass
def test_lint(self): cwd = os.getcwd() base_path = "/tmp/lint_test/" mkdir(base_path) try: for test_case in test_cases: config = Config( project_config_path=str(build_test_case(base_path, test_case)), project_root="../", create_clients=False, ) lint = Lint(config=config) self.assertEqual( test_case["expected_lints"], flatten_rule(lint.lints[0]) ) finally: shutil.rmtree(base_path) os.chdir(cwd) pass
def run( input_file: str = "./.taskcat.yml", project_root: str = "./", no_delete: bool = False, lint_disable: bool = False, enable_sig_v2: bool = False, keep_failed: bool = False, ): """tests whether CloudFormation templates are able to successfully launch :param input_file: path to either a taskat project config file or a CloudFormation template :param project_root_path: root path of the project relative to input_file :param no_delete: don't delete stacks after test is complete :param lint_disable: disable cfn-lint checks :param enable_sig_v2: enable legacy sigv2 requests for auto-created buckets :param keep_failed: do not delete failed stacks """ project_root_path: Path = Path(project_root).expanduser().resolve() input_file_path: Path = project_root_path / input_file config = Config.create( project_root=project_root_path, project_config_path=input_file_path # TODO: detect if input file is taskcat config or CloudFormation template ) if enable_sig_v2: config = Config.create( project_root=project_root_path, project_config_path=input_file_path, args={"project": { "s3_enable_sig_v2": enable_sig_v2 }}, ) boto3_cache = Boto3Cache() templates = config.get_templates(project_root_path) # 1. lint if not lint_disable: lint = TaskCatLint(config, templates) errors = lint.lints[1] lint.output_results() if errors or not lint.passed: raise TaskCatException("Lint failed with errors") # 2. build lambdas LambdaBuild(config, project_root_path) # 3. s3 sync buckets = config.get_buckets(boto3_cache) stage_in_s3(buckets, config.config.project.name, project_root_path) # 4. launch stacks regions = config.get_regions(boto3_cache) parameters = config.get_rendered_parameters(buckets, regions, templates) tests = config.get_tests(project_root_path, templates, regions, buckets, parameters) test_definition = Stacker( config.config.project.name, tests, shorten_stack_name=config.config.project.shorten_stack_name, ) test_definition.create_stacks() terminal_printer = TerminalPrinter() # 5. wait for completion terminal_printer.report_test_progress(stacker=test_definition) status = test_definition.status() # 6. create report report_path = Path("./taskcat_outputs/").resolve() report_path.mkdir(exist_ok=True) cfn_logs = _CfnLogTools() cfn_logs.createcfnlogs(test_definition, report_path) ReportBuilder(test_definition, report_path / "index.html").generate_report() # 7. delete stacks if no_delete: LOG.info("Skipping delete due to cli argument") elif keep_failed: if len(status["COMPLETE"]) > 0: LOG.info("deleting successful stacks") test_definition.delete_stacks({"status": "CREATE_COMPLETE"}) terminal_printer.report_test_progress(stacker=test_definition) else: test_definition.delete_stacks() terminal_printer.report_test_progress(stacker=test_definition) # TODO: summarise stack statusses (did they complete/delete ok) and print any # error events # 8. delete buckets if not no_delete or (keep_failed is True and len(status["FAILED"]) == 0): deleted: ListType[str] = [] for test in buckets.values(): for bucket in test.values(): if bucket.name not in deleted: bucket.delete(delete_objects=True) deleted.append(bucket.name) # 9. raise if something failed if len(status["FAILED"]) > 0: raise TaskCatException( f'One or more stacks failed tests: {status["FAILED"]}')
def test_filter_unsupported_regions(self): regions = ["us-east-1", "us-east-2", "eu-central-1"] supported = Lint._filter_unsupported_regions(regions) self.assertCountEqual(supported, regions) supported = Lint._filter_unsupported_regions(regions + ["non-exist-1"]) self.assertCountEqual(supported, regions)
def run( # noqa: C901 test_names: str = "ALL", regions: str = "ALL", input_file: str = "./.taskcat.yml", project_root: str = "./", no_delete: bool = False, lint_disable: bool = False, enable_sig_v2: bool = False, keep_failed: bool = False, output_directory: str = "./taskcat_outputs", minimal_output: bool = False, dont_wait_for_delete: bool = False, ): """tests whether CloudFormation templates are able to successfully launch :param test_names: comma separated list of tests to run :param regions: comma separated list of regions to test in :param input_file: path to either a taskat project config file or a CloudFormation template :param project_root_path: root path of the project relative to input_file :param no_delete: don't delete stacks after test is complete :param lint_disable: disable cfn-lint checks :param enable_sig_v2: enable legacy sigv2 requests for auto-created buckets :param keep_failed: do not delete failed stacks :param output_directory: Where to store generated logfiles :param minimal_output: Reduces output during test runs :param dont_wait_for_delete: Exits immediately after calling stack_delete """ project_root_path: Path = Path(project_root).expanduser().resolve() input_file_path: Path = project_root_path / input_file # pylint: disable=too-many-arguments args = _build_args(enable_sig_v2, regions, GLOBAL_ARGS.profile) config = Config.create( project_root=project_root_path, project_config_path=input_file_path, args=args # TODO: detect if input file is taskcat config or CloudFormation template ) _trim_regions(regions, config) _trim_tests(test_names, config) boto3_cache = Boto3Cache() templates = config.get_templates() # 1. lint if not lint_disable: lint = TaskCatLint(config, templates) errors = lint.lints[1] lint.output_results() if errors or not lint.passed: raise TaskCatException("Lint failed with errors") # 2. build lambdas if config.config.project.package_lambda: LambdaBuild(config, project_root_path) # 3. s3 sync buckets = config.get_buckets(boto3_cache) stage_in_s3(buckets, config.config.project.name, config.project_root) # 4. launch stacks regions = config.get_regions(boto3_cache) parameters = config.get_rendered_parameters(buckets, regions, templates) tests = config.get_tests(templates, regions, buckets, parameters) test_definition = Stacker( config.config.project.name, tests, shorten_stack_name=config.config.project.shorten_stack_name, ) test_definition.create_stacks() terminal_printer = TerminalPrinter(minimalist=minimal_output) # 5. wait for completion terminal_printer.report_test_progress(stacker=test_definition) status = test_definition.status() # 6. create report report_path = Path(output_directory).resolve() report_path.mkdir(exist_ok=True) cfn_logs = _CfnLogTools() cfn_logs.createcfnlogs(test_definition, report_path) ReportBuilder(test_definition, report_path / "index.html").generate_report() # 7. delete stacks if no_delete: LOG.info("Skipping delete due to cli argument") elif keep_failed: if len(status["COMPLETE"]) > 0: LOG.info("deleting successful stacks") test_definition.delete_stacks({"status": "CREATE_COMPLETE"}) if not dont_wait_for_delete: terminal_printer.report_test_progress( stacker=test_definition) else: test_definition.delete_stacks() if not dont_wait_for_delete: terminal_printer.report_test_progress(stacker=test_definition) # TODO: summarise stack statusses (did they complete/delete ok) and print any # error events # 8. delete buckets if not no_delete or (keep_failed is True and len(status["FAILED"]) == 0): deleted: ListType[str] = [] for test in buckets.values(): for bucket in test.values(): if (bucket.name not in deleted) and not bucket.regional_buckets: bucket.delete(delete_objects=True) deleted.append(bucket.name) # 9. raise if something failed if len(status["FAILED"]) > 0: raise TaskCatException( f'One or more stacks failed tests: {status["FAILED"]}')
def test_output_results(self, mock_log_error, mock_log_warning, mock_log_info): cwd = os.getcwd() try: lint = Lint( config=Config( project_config_path=str( build_test_case("/tmp/lint_test_output/", test_cases[0]) ), project_root="../", create_clients=False, ) ) lint.output_results() self.assertTrue( mock_log_info.call_args[0][0].startswith( "Lint passed for test test1 on template " ) ) self.assertEqual(mock_log_error.called, False) self.assertEqual(mock_log_warning.called, False) mock_log_info.reset_mock() lint_key = [t for t in lint.lints[0]][0] result_key = [t for t in lint.lints[0][lint_key]["results"]][0] test = lint.lints[0][lint_key]["results"][result_key] rule = mock.Mock(return_val="[W0001] some warning") rule.rule.id = "W0001" rule.linenumber = 123 rule.rule.shortdesc = "short warning" rule.message = "some warning" test.append(rule) lint.output_results() self.assertTrue( mock_log_warning.call_args_list[0][0][0].startswith( "Lint detected issues for test test1 on template " ) ) mock_log_warning.assert_has_calls( [mock.call(" line 123 [0001] [short warning] some warning")] ) self.assertEqual(mock_log_info.called, False) self.assertEqual(mock_log_error.called, False) mock_log_warning.reset_mock() test.pop(0) rule = mock.Mock(return_val="[E0001] some error") rule.rule.id = "E0001" rule.linenumber = 123 rule.rule.shortdesc = "short error" rule.message = "some error" test.append(rule) lint.output_results() self.assertTrue( mock_log_warning.call_args[0][0].startswith( "Lint detected issues for test test1 on template " ) ) mock_log_error.assert_called_once_with( " line 123 [0001] [short error] some error" ) self.assertEqual(mock_log_info.called, False) finally: shutil.rmtree("/tmp/lint_test_output/") os.chdir(cwd) pass