def run(self) -> None: """Deploys the required Test resources in AWS. Raises: TaskCatException: If skip_upload is set without specifying s3_bucket in config. TaskCatException: If linting fails with errors. """ _trim_regions(self.regions, self.config) _trim_tests(self.test_names, self.config) boto3_cache = Boto3Cache() templates = self.config.get_templates() if self.skip_upload and not self.config.config.project.s3_bucket: raise TaskCatException( "cannot skip_buckets without specifying s3_bucket in config") buckets = self.config.get_buckets(boto3_cache) if not self.skip_upload: # 1. lint if not self.lint_disable: lint = TaskCatLint(self.config, templates) errors = lint.lints[1] lint.output_results() if errors or not lint.passed: raise TaskCatException("Lint failed with errors") # 2. build lambdas if self.config.config.project.package_lambda: LambdaBuild(self.config, self.config.project_root) # 3. s3 sync stage_in_s3(buckets, self.config.config.project.name, self.config.project_root) regions = self.config.get_regions(boto3_cache) parameters = self.config.get_rendered_parameters( buckets, regions, templates) tests = self.config.get_tests(templates, regions, buckets, parameters) # pre-hooks execute_hooks("prehooks", self.config, tests, parameters) self.test_definition = Stacker( self.config.config.project.name, tests, shorten_stack_name=self.config.config.project.shorten_stack_name, tags=self._extra_tags, ) self.test_definition.create_stacks() # post-hooks # TODO: pass in outputs, once there is a standard interface for a test_definition execute_hooks("posthooks", self.config, tests, parameters) self.printer.report_test_progress(stacker=self.test_definition) self.passed = True self.result = self.test_definition.stacks
def __init__(self, input_file: str, project_root: str = "./", strict: bool = False): """ :param input_file: path to project config or CloudFormation template :param project_root: base path for project :param strict: fail on lint warnings as well as errors """ config = Config( project_config_path=input_file, project_root=project_root, create_clients=False, ) lint = TaskCatLint(config, strict) errors = lint.lints[1] lint.output_results() if errors or not lint.passed: raise TaskCatException("Lint failed with errors")
def __init__( self, input_file: str = ".taskcat.yml", project_root: str = "./", strict: bool = False, ): """ :param input_file: path to project config or CloudFormation template :param project_root: base path for project :param strict: fail on lint warnings as well as errors """ project_root_path: Path = Path(project_root).expanduser().resolve() input_file_path: Path = project_root_path / input_file config = Config.create(project_root=project_root_path, project_config_path=input_file_path) templates = config.get_templates() lint = TaskCatLint(config, templates, strict) errors = lint.lints[1] lint.output_results() if errors or not lint.passed: raise TaskCatException("Lint failed with errors")
def run( input_file: str = "./.taskcat.yml", project_root: str = "./", no_delete: bool = False, lint_disable: bool = False, enable_sig_v2: bool = False, keep_failed: bool = False, ): """tests whether CloudFormation templates are able to successfully launch :param input_file: path to either a taskat project config file or a CloudFormation template :param project_root_path: root path of the project relative to input_file :param no_delete: don't delete stacks after test is complete :param lint_disable: disable cfn-lint checks :param enable_sig_v2: enable legacy sigv2 requests for auto-created buckets :param keep_failed: do not delete failed stacks """ project_root_path: Path = Path(project_root).expanduser().resolve() input_file_path: Path = project_root_path / input_file config = Config.create( project_root=project_root_path, project_config_path=input_file_path # TODO: detect if input file is taskcat config or CloudFormation template ) if enable_sig_v2: config = Config.create( project_root=project_root_path, project_config_path=input_file_path, args={"project": { "s3_enable_sig_v2": enable_sig_v2 }}, ) boto3_cache = Boto3Cache() templates = config.get_templates(project_root_path) # 1. lint if not lint_disable: lint = TaskCatLint(config, templates) errors = lint.lints[1] lint.output_results() if errors or not lint.passed: raise TaskCatException("Lint failed with errors") # 2. build lambdas LambdaBuild(config, project_root_path) # 3. s3 sync buckets = config.get_buckets(boto3_cache) stage_in_s3(buckets, config.config.project.name, project_root_path) # 4. launch stacks regions = config.get_regions(boto3_cache) parameters = config.get_rendered_parameters(buckets, regions, templates) tests = config.get_tests(project_root_path, templates, regions, buckets, parameters) test_definition = Stacker( config.config.project.name, tests, shorten_stack_name=config.config.project.shorten_stack_name, ) test_definition.create_stacks() terminal_printer = TerminalPrinter() # 5. wait for completion terminal_printer.report_test_progress(stacker=test_definition) status = test_definition.status() # 6. create report report_path = Path("./taskcat_outputs/").resolve() report_path.mkdir(exist_ok=True) cfn_logs = _CfnLogTools() cfn_logs.createcfnlogs(test_definition, report_path) ReportBuilder(test_definition, report_path / "index.html").generate_report() # 7. delete stacks if no_delete: LOG.info("Skipping delete due to cli argument") elif keep_failed: if len(status["COMPLETE"]) > 0: LOG.info("deleting successful stacks") test_definition.delete_stacks({"status": "CREATE_COMPLETE"}) terminal_printer.report_test_progress(stacker=test_definition) else: test_definition.delete_stacks() terminal_printer.report_test_progress(stacker=test_definition) # TODO: summarise stack statusses (did they complete/delete ok) and print any # error events # 8. delete buckets if not no_delete or (keep_failed is True and len(status["FAILED"]) == 0): deleted: ListType[str] = [] for test in buckets.values(): for bucket in test.values(): if bucket.name not in deleted: bucket.delete(delete_objects=True) deleted.append(bucket.name) # 9. raise if something failed if len(status["FAILED"]) > 0: raise TaskCatException( f'One or more stacks failed tests: {status["FAILED"]}')
def run( # noqa: C901 test_names: str = "ALL", regions: str = "ALL", input_file: str = "./.taskcat.yml", project_root: str = "./", no_delete: bool = False, lint_disable: bool = False, enable_sig_v2: bool = False, keep_failed: bool = False, output_directory: str = "./taskcat_outputs", minimal_output: bool = False, dont_wait_for_delete: bool = False, ): """tests whether CloudFormation templates are able to successfully launch :param test_names: comma separated list of tests to run :param regions: comma separated list of regions to test in :param input_file: path to either a taskat project config file or a CloudFormation template :param project_root_path: root path of the project relative to input_file :param no_delete: don't delete stacks after test is complete :param lint_disable: disable cfn-lint checks :param enable_sig_v2: enable legacy sigv2 requests for auto-created buckets :param keep_failed: do not delete failed stacks :param output_directory: Where to store generated logfiles :param minimal_output: Reduces output during test runs :param dont_wait_for_delete: Exits immediately after calling stack_delete """ project_root_path: Path = Path(project_root).expanduser().resolve() input_file_path: Path = project_root_path / input_file # pylint: disable=too-many-arguments args = _build_args(enable_sig_v2, regions, GLOBAL_ARGS.profile) config = Config.create( project_root=project_root_path, project_config_path=input_file_path, args=args # TODO: detect if input file is taskcat config or CloudFormation template ) _trim_regions(regions, config) _trim_tests(test_names, config) boto3_cache = Boto3Cache() templates = config.get_templates() # 1. lint if not lint_disable: lint = TaskCatLint(config, templates) errors = lint.lints[1] lint.output_results() if errors or not lint.passed: raise TaskCatException("Lint failed with errors") # 2. build lambdas if config.config.project.package_lambda: LambdaBuild(config, project_root_path) # 3. s3 sync buckets = config.get_buckets(boto3_cache) stage_in_s3(buckets, config.config.project.name, config.project_root) # 4. launch stacks regions = config.get_regions(boto3_cache) parameters = config.get_rendered_parameters(buckets, regions, templates) tests = config.get_tests(templates, regions, buckets, parameters) test_definition = Stacker( config.config.project.name, tests, shorten_stack_name=config.config.project.shorten_stack_name, ) test_definition.create_stacks() terminal_printer = TerminalPrinter(minimalist=minimal_output) # 5. wait for completion terminal_printer.report_test_progress(stacker=test_definition) status = test_definition.status() # 6. create report report_path = Path(output_directory).resolve() report_path.mkdir(exist_ok=True) cfn_logs = _CfnLogTools() cfn_logs.createcfnlogs(test_definition, report_path) ReportBuilder(test_definition, report_path / "index.html").generate_report() # 7. delete stacks if no_delete: LOG.info("Skipping delete due to cli argument") elif keep_failed: if len(status["COMPLETE"]) > 0: LOG.info("deleting successful stacks") test_definition.delete_stacks({"status": "CREATE_COMPLETE"}) if not dont_wait_for_delete: terminal_printer.report_test_progress( stacker=test_definition) else: test_definition.delete_stacks() if not dont_wait_for_delete: terminal_printer.report_test_progress(stacker=test_definition) # TODO: summarise stack statusses (did they complete/delete ok) and print any # error events # 8. delete buckets if not no_delete or (keep_failed is True and len(status["FAILED"]) == 0): deleted: ListType[str] = [] for test in buckets.values(): for bucket in test.values(): if (bucket.name not in deleted) and not bucket.regional_buckets: bucket.delete(delete_objects=True) deleted.append(bucket.name) # 9. raise if something failed if len(status["FAILED"]) > 0: raise TaskCatException( f'One or more stacks failed tests: {status["FAILED"]}')