def test_status(self): test_proj = (Path(__file__).parent / "./data/nested-fail").resolve() c = Config( project_config_path=test_proj / "ci" / "taskcat.yml", project_root=test_proj, create_clients=False, ) stacker = Stacker(c) stacker.create_stacks() stacker.stacks[0].id = "stack-id" stacker.stacks[0].status_reason = "" stacker.stacks[0].status = "CREATE_COMPLETE" stacker.stacks[1].id = "stack-id2" stacker.stacks[1].status_reason = "" stacker.stacks[1].status = "CREATE_IN_PROGRESS" statuses = stacker.status() expected = { "COMPLETE": { "stack-id": "" }, "FAILED": {}, "IN_PROGRESS": { "stack-id2": "" }, } self.assertEqual(expected, statuses)
def test_resources(self): test_proj = (Path(__file__).parent / "./data/nested-fail").resolve() project_name, tests = get_tests(test_proj) stacker = Stacker(project_name=project_name, tests=tests) stacker.create_stacks() resources = stacker.resources() self.assertEqual(2, len(resources))
def minimalist_progress(self, stacker: TaskcatStacker, poll_interval): _status_dict = stacker.status() history: dict = {} while self._is_test_in_progress(_status_dict): _status_dict = stacker.status() for stack in stacker.stacks: self._print_tree_minimal(stack, history) time.sleep(poll_interval)
def run(self) -> None: """Deploys the required Test resources in AWS. Raises: TaskCatException: If skip_upload is set without specifying s3_bucket in config. TaskCatException: If linting fails with errors. """ _trim_regions(self.regions, self.config) _trim_tests(self.test_names, self.config) boto3_cache = Boto3Cache() templates = self.config.get_templates() if self.skip_upload and not self.config.config.project.s3_bucket: raise TaskCatException( "cannot skip_buckets without specifying s3_bucket in config") buckets = self.config.get_buckets(boto3_cache) if not self.skip_upload: # 1. lint if not self.lint_disable: lint = TaskCatLint(self.config, templates) errors = lint.lints[1] lint.output_results() if errors or not lint.passed: raise TaskCatException("Lint failed with errors") # 2. build lambdas if self.config.config.project.package_lambda: LambdaBuild(self.config, self.config.project_root) # 3. s3 sync stage_in_s3(buckets, self.config.config.project.name, self.config.project_root) regions = self.config.get_regions(boto3_cache) parameters = self.config.get_rendered_parameters( buckets, regions, templates) tests = self.config.get_tests(templates, regions, buckets, parameters) # pre-hooks execute_hooks("prehooks", self.config, tests, parameters) self.test_definition = Stacker( self.config.config.project.name, tests, shorten_stack_name=self.config.config.project.shorten_stack_name, tags=self._extra_tags, ) self.test_definition.create_stacks() # post-hooks # TODO: pass in outputs, once there is a standard interface for a test_definition execute_hooks("posthooks", self.config, tests, parameters) self.printer.report_test_progress(stacker=self.test_definition) self.passed = True self.result = self.test_definition.stacks
def test_create_stacks(self): test_proj = (Path(__file__).parent / "./data/nested-fail").resolve() c = Config( project_config_path=test_proj / "ci" / "taskcat.yml", project_root=test_proj, create_clients=False, ) stacker = Stacker(c) stacker.create_stacks() self.assertEqual(2, len(stacker.stacks))
def report_test_progress(self, stacker: TaskcatStacker, poll_interval=10): _status_dict = stacker.status() while self._is_test_in_progress(_status_dict): for stack in stacker.stacks: self._print_stack_tree(stack, buffer=self.buffer) time.sleep(poll_interval) self.buffer.clear() _status_dict = stacker.status() self._display_final_status(stacker)
def test_from_existing(self, m_import): test_proj = (Path(__file__).parent / "./data/nested-fail").resolve() project_name, tests = get_tests(test_proj) s = Stacker.from_existing(uid=uuid.UUID(int=0), tests=tests, project_name=project_name) self.assertEqual([], s.stacks)
def test_from_existing(self, m_import): test_proj = (Path(__file__).parent / "./data/nested-fail").resolve() c = Config( project_config_path=test_proj / "ci" / "taskcat.yml", project_root=test_proj, create_clients=False, ) s = Stacker.from_existing(uuid.UUID(int=0), c) self.assertEqual([], s.stacks)
def test_delete_stacks(self): test_proj = (Path(__file__).parent / "./data/nested-fail").resolve() project_name, tests = get_tests(test_proj) stacker = Stacker(project_name=project_name, tests=tests) stacker.create_stacks() stacker.delete_stacks() stacker.stacks[0].delete.assert_called_once()
def test_status(self, *args, **kwargs): test_proj = (Path(__file__).parent / "./data/nested-fail").resolve() project_name, tests = get_tests(test_proj) stacker = Stacker(project_name=project_name, tests=tests) stacker.create_stacks() stacker.stacks[0].id = "stack-id" stacker.stacks[0].status_reason = "" stacker.stacks[0].status = "CREATE_COMPLETE" stacker.stacks[1].id = "stack-id2" stacker.stacks[1].status_reason = "" stacker.stacks[1].status = "CREATE_IN_PROGRESS" statuses = stacker.status() expected = { "COMPLETE": {"stack-id": ""}, "FAILED": {}, "IN_PROGRESS": {"stack-id2": ""}, } self.assertEqual(expected, statuses) stacker.stacks[0].id = "stack-id" stacker.stacks[0].status_reason = "" stacker.stacks[0].status = "DELETE_IN_PROGRESS" stacker.stacks[1].id = "stack-id2" stacker.stacks[1].status_reason = "" stacker.stacks[1].status = "DELETE_COMPLETE" statuses = stacker.status() _reason = "COMPLETE event not detected. Potential out-of-band action against the stack." expected = { "COMPLETE": {}, "FAILED": {"stack-id2": _reason}, "IN_PROGRESS": {"stack-id": ""}, } self.assertEqual(expected, statuses)
def test_status(self): test_proj = (Path(__file__).parent / "./data/nested-fail").resolve() project_name, tests = get_tests(test_proj) stacker = Stacker(project_name=project_name, tests=tests) stacker.create_stacks() stacker.stacks[0].id = "stack-id" stacker.stacks[0].status_reason = "" stacker.stacks[0].status = "CREATE_COMPLETE" stacker.stacks[1].id = "stack-id2" stacker.stacks[1].status_reason = "" stacker.stacks[1].status = "CREATE_IN_PROGRESS" statuses = stacker.status() expected = { "COMPLETE": { "stack-id": "" }, "FAILED": {}, "IN_PROGRESS": { "stack-id2": "" }, } self.assertEqual(expected, statuses)
def test_delete_stacks(self): test_proj = (Path(__file__).parent / "./data/nested-fail").resolve() c = Config( project_config_path=test_proj / "ci" / "taskcat.yml", project_root=test_proj, create_clients=False, ) stacker = Stacker(c) stacker.create_stacks() stacker.delete_stacks() stacker.stacks[0].delete.assert_called_once()
def __init__( self, package: str, aws_profile: str = "default", region="default", _stack_type="package", ): """ :param package: installed package to delete, can be an install name or uuid :param aws_profile: aws profile to use for deletion :param region: region to delete from, default will use aws cli configured default """ LOG.warning("delete is in alpha feature, use with caution") boto3_cache = Boto3Cache() if region == "default": region = boto3_cache.get_default_region(aws_profile) if isinstance(region, str): region = [region] stacks = Stacker.list_stacks([aws_profile], region) jobs = [] for stack in stacks: name = stack.get("taskcat-installer", stack["taskcat-project-name"]) job = { "name": name, "project_name": stack["taskcat-project-name"], "test_name": stack["taskcat-test-name"], "taskcat_id": stack["taskcat-id"].hex, "region": stack["region"], "type": "package" if stack.get("taskcat-installer") else "test", "stack_id": stack["stack-id"], } if _stack_type == job["type"]: if package in [job["name"], job["taskcat_id"], "ALL"]: jobs.append(job) # TODO: concurrency and wait for complete for job in jobs: client = boto3_cache.client("cloudformation", profile=aws_profile, region=job["region"]) Stack.delete(client=client, stack_id=job["stack_id"])
def test_import_stacks_per_client(self, m_stack_import): clients = (mock.Mock(), "us-east-1") class Paging: @staticmethod def paginate(**kwargs): return [{ "Stacks": [ { "ParentId": "skipme" }, { "Tags": [ { "Key": "taskcat-id", "Value": "00000000000000000000000000000000", }, { "Key": "taskcat-test-name", "Value": "taskcat-json", }, { "Key": "taskcat-project-name", "Value": "nested-fail", }, ] }, { "Tags": [ { "Key": "taskcat-id", "Value": "nope" }, { "Key": "taskcat-test-name", "Value": "taskcat-json", }, { "Key": "taskcat-project-name", "Value": "nested-fail", }, ] }, { "Tags": [ { "Key": "taskcat-id", "Value": "00000000000000000000000000000000", }, { "Key": "taskcat-test-name", "Value": "nope" }, { "Key": "taskcat-project-name", "Value": "nested-fail", }, ] }, { "Tags": [ { "Key": "taskcat-id", "Value": "00000000000000000000000000000000", }, { "Key": "taskcat-test-name", "Value": "taskcat-json", }, { "Key": "taskcat-project-name", "Value": "nope" }, ] }, ] }] clients[0].get_paginator.return_value = Paging() s = Stacker._import_stacks_per_client(clients, uuid.UUID(int=0), "nested-fail", {"taskcat-json": mock.Mock()}) self.assertEqual(1, len(s))
def __init__( # noqa: C901 self, profiles: Union[str, ListType[str]] = "default", regions="ALL", _stack_type="package", ): """ :param profiles: comma separated list of aws profiles to search :param regions: comma separated list of regions to search, default is to check all commercial regions """ LOG.warning("list is in alpha feature, use with caution") if isinstance(profiles, str): profiles = profiles.split(",") if regions == "ALL": region_set: set = set() for profile in profiles: region_set = region_set.union( set( boto3.Session(profile_name=profile). get_available_regions("cloudformation"))) regions = list(region_set) else: regions = regions.split(",") stacks = Stacker.list_stacks(profiles, regions) jobs: dict = {} for stack in stacks: stack_key = stack["taskcat-id"].hex + "-" + stack["region"] if stack_key not in jobs: name = stack.get("taskcat-installer") if _stack_type == "test" and not name: name = stack["taskcat-project-name"] jobs[stack_key] = { "name": name, "id": stack["taskcat-id"].hex, "project_name": stack["taskcat-project-name"], "active_stacks": 1, "region": stack["region"], } elif name and _stack_type == "package": jobs[stack_key] = { "name": name, "id": stack["taskcat-id"].hex, "project_name": stack["taskcat-project-name"], "active_stacks": 1, "region": stack["region"], } else: jobs[stack_key]["active_stacks"] += 1 def longest(things: list): lengths = [len(thing) for thing in things] return sorted(lengths)[-1] if lengths else 0 def spaces(number): ret = "" for _ in range(number): ret += " " return ret def pad(string, length): while len(string) < length: string += " " return string longest_name = longest([v["name"] for _, v in jobs.items()]) longest_project_name = longest( [v["project_name"] for _, v in jobs.items()]) if not jobs: LOG.info("no stacks found") return if _stack_type != "test": header = ( f"NAME{spaces(longest_name)}PROJECT{spaces(longest_project_name)}" f"ID{spaces(34)}REGION") column = "{} {} {} {}" else: header = f"NAME{spaces(longest_name)}ID{spaces(34)}REGION" column = "{} {} {}" LOG.error(header, extra={"nametag": ""}) for job in jobs.values(): args = [ pad(job["name"], longest_name), pad(job["project_name"], longest_project_name), job["id"], job["region"], ] if _stack_type == "test": args = [ pad(job["name"], longest_name), job["id"], job["region"] ] LOG.error(column.format(*args), extra={"nametag": ""})
def run( # noqa: C901 test_names: str = "ALL", regions: str = "ALL", input_file: str = "./.taskcat.yml", project_root: str = "./", no_delete: bool = False, lint_disable: bool = False, enable_sig_v2: bool = False, keep_failed: bool = False, output_directory: str = "./taskcat_outputs", minimal_output: bool = False, dont_wait_for_delete: bool = False, ): """tests whether CloudFormation templates are able to successfully launch :param test_names: comma separated list of tests to run :param regions: comma separated list of regions to test in :param input_file: path to either a taskat project config file or a CloudFormation template :param project_root_path: root path of the project relative to input_file :param no_delete: don't delete stacks after test is complete :param lint_disable: disable cfn-lint checks :param enable_sig_v2: enable legacy sigv2 requests for auto-created buckets :param keep_failed: do not delete failed stacks :param output_directory: Where to store generated logfiles :param minimal_output: Reduces output during test runs :param dont_wait_for_delete: Exits immediately after calling stack_delete """ project_root_path: Path = Path(project_root).expanduser().resolve() input_file_path: Path = project_root_path / input_file # pylint: disable=too-many-arguments args = _build_args(enable_sig_v2, regions, GLOBAL_ARGS.profile) config = Config.create( project_root=project_root_path, project_config_path=input_file_path, args=args # TODO: detect if input file is taskcat config or CloudFormation template ) _trim_regions(regions, config) _trim_tests(test_names, config) boto3_cache = Boto3Cache() templates = config.get_templates() # 1. lint if not lint_disable: lint = TaskCatLint(config, templates) errors = lint.lints[1] lint.output_results() if errors or not lint.passed: raise TaskCatException("Lint failed with errors") # 2. build lambdas if config.config.project.package_lambda: LambdaBuild(config, project_root_path) # 3. s3 sync buckets = config.get_buckets(boto3_cache) stage_in_s3(buckets, config.config.project.name, config.project_root) # 4. launch stacks regions = config.get_regions(boto3_cache) parameters = config.get_rendered_parameters(buckets, regions, templates) tests = config.get_tests(templates, regions, buckets, parameters) test_definition = Stacker( config.config.project.name, tests, shorten_stack_name=config.config.project.shorten_stack_name, ) test_definition.create_stacks() terminal_printer = TerminalPrinter(minimalist=minimal_output) # 5. wait for completion terminal_printer.report_test_progress(stacker=test_definition) status = test_definition.status() # 6. create report report_path = Path(output_directory).resolve() report_path.mkdir(exist_ok=True) cfn_logs = _CfnLogTools() cfn_logs.createcfnlogs(test_definition, report_path) ReportBuilder(test_definition, report_path / "index.html").generate_report() # 7. delete stacks if no_delete: LOG.info("Skipping delete due to cli argument") elif keep_failed: if len(status["COMPLETE"]) > 0: LOG.info("deleting successful stacks") test_definition.delete_stacks({"status": "CREATE_COMPLETE"}) if not dont_wait_for_delete: terminal_printer.report_test_progress( stacker=test_definition) else: test_definition.delete_stacks() if not dont_wait_for_delete: terminal_printer.report_test_progress(stacker=test_definition) # TODO: summarise stack statusses (did they complete/delete ok) and print any # error events # 8. delete buckets if not no_delete or (keep_failed is True and len(status["FAILED"]) == 0): deleted: ListType[str] = [] for test in buckets.values(): for bucket in test.values(): if (bucket.name not in deleted) and not bucket.regional_buckets: bucket.delete(delete_objects=True) deleted.append(bucket.name) # 9. raise if something failed if len(status["FAILED"]) > 0: raise TaskCatException( f'One or more stacks failed tests: {status["FAILED"]}')
def __init__( # noqa: C901 self, profiles: Union[str, ListType[str]] = "default", regions="ALL", stack_type="ALL", ): """ :param profiles: comma separated list of aws profiles to search :param regions: comma separated list of regions to search, default is to check \ all commercial regions :param stack_type: type of stacks to check, options are 'test', 'project', or 'ALL'. \ default is 'ALL' """ if isinstance(profiles, str): profiles = profiles.split(",") if regions == "ALL": region_set: set = set() for profile in profiles: region_set = region_set.union( set( boto3.Session(profile_name=profile). get_available_regions("cloudformation"))) regions = list(region_set) else: regions = regions.split(",") stacks = Stacker.list_stacks(profiles, regions) jobs: dict = {} for stack in stacks: stack_key = stack["taskcat-id"].hex + "-" + stack["region"] if stack_key not in jobs: name = stack.get("taskcat-installer") if stack_type == "ALL": if not name: name = stack["taskcat-project-name"] jobs[stack_key] = { "name": name, "id": stack["taskcat-id"].hex, "project_name": stack["taskcat-project-name"], "active_stacks": 1, "region": stack["region"], } elif stack_type == "test" and not name: name = stack["taskcat-project-name"] jobs[stack_key] = { "name": name, "id": stack["taskcat-id"].hex, "project_name": stack["taskcat-project-name"], "active_stacks": 1, "region": stack["region"], } elif name and stack_type == "project": jobs[stack_key] = { "name": name, "id": stack["taskcat-id"].hex, "project_name": stack["taskcat-project-name"], "active_stacks": 1, "region": stack["region"], } else: jobs[stack_key]["active_stacks"] += 1 longest_name = List._longest([v["name"] for _, v in jobs.items()]) longest_project_name = List._longest( [v["project_name"] for _, v in jobs.items()]) if not jobs: LOG.info("no stacks found") return if stack_type != "test": header = ( f"NAME{List._spaces(longest_name)}PROJECT{List._spaces(longest_project_name)}" f"ID{List._spaces(34)}REGION") column = "{} {} {} {}" else: header = f"NAME{List._spaces(longest_name)}ID{List._spaces(34)}REGION" column = "{} {} {}" LOG.error(header, extra={"nametag": ""}) for job in jobs.values(): args = [ List._pad(job["name"], longest_name), List._pad(job["project_name"], longest_project_name), job["id"], job["region"], ] if stack_type == "test": args = [ List._pad(job["name"], longest_name), job["id"], job["region"] ] LOG.error(column.format(*args), extra={"nametag": ""})
def __init__( self, project: str, aws_profile: str = "default", region="ALL", no_verify: bool = False, stack_type: str = "ALL", ): """ :param project: installed project to delete, can be an install name, uuid, or project name :param aws_profile: aws profile to use for deletion :param region: region(s) to delete from, by default, will delete all applicable\ stacks, supply a csv "us-east-1,us-west-1" to override this default :param no_verify: ignore region verification, delete will not error if an invalid\ region is detected :param stack_type: type of stacks to delete, allowable options are ["project","test","ALL"] """ boto3_cache = Boto3Cache() if region == "default": regions = boto3_cache.get_default_region(aws_profile) elif region == "ALL": region_set: set = set() region_set = region_set.union( # pylint: disable=duplicate-code set( boto3.Session(profile_name=aws_profile). get_available_regions("cloudformation"))) regions = list(region_set) elif isinstance(region, str): regions = (self._validate_regions(region) if not no_verify else region.split(",")) stacks = Stacker.list_stacks([aws_profile], regions) jobs = [] for stack in stacks: name = stack.get("taskcat-installer", stack["taskcat-project-name"]) job = { "name": name, "project_name": stack["taskcat-project-name"], "test_name": stack["taskcat-test-name"], "taskcat_id": stack["taskcat-id"].hex, "region": stack["region"], "stack_id": stack["stack-id"], } if stack_type in ["project", "ALL"] and project in [ job["name"], job["taskcat_id"], "ALL", ]: jobs.append(job) if stack_type in ["test", "ALL"] and project in [ job["project_name"], "ALL", ]: jobs.append(job) with ThreadPoolExecutor() as executor: stack_futures = { executor.submit( self._delete_stack, boto3_cache=boto3_cache, job=job, aws_profile=aws_profile, ): [job["name"], job["region"]] for job in jobs } for stack_future in as_completed(stack_futures): name_and_region = stack_futures[stack_future] try: stack_future.result() # pylint: disable=broad-except except Exception: LOG.error( f"{name_and_region[0]} failed in {name_and_region[1]}") else: LOG.info( f"{name_and_region[0]} deleted in {name_and_region[1]}" )
class CFNTest(BaseTest): # pylint: disable=too-many-instance-attributes """ Tests Cloudformation template by making sure the stack can properly deploy in the specified regions. """ def __init__( self, config: Config, printer: Union[TerminalPrinter, None] = None, test_names: str = "ALL", regions: str = "ALL", skip_upload: bool = False, lint_disable: bool = False, no_delete: bool = False, keep_failed: bool = False, dont_wait_for_delete: bool = True, _extra_tags: list = None, ): """The constructor creates a test from the given Config object. Args: config (Config): A pre-configured Taskcat Config instance. printer (Union[TerminalPrinter, None], optional): A printer object that will handle Test output. Defaults to TerminalPrinter. test_names (str, optional): A comma separated list of tests to run. Defaults to "ALL". regions (str, optional): A comma separated list of regions to test in. Defaults to "ALL". skip_upload (bool, optional): Use templates in an existing cloudformation bucket. Defaults to False. lint_disable (bool, optional): Disable linting with cfn-lint. Defaults to False. no_delete (bool, optional): Don't delete stacks after test is complete. Defaults to False. keep_failed (bool, optional): Don't delete failed stacks. Defaults to False. dont_wait_for_delete (bool, optional): Exits immediately after calling stack_delete. Defaults to True. """ # noqa: B950 super().__init__(config) self.test_definition: Stacker self.test_names = test_names self.regions = regions self.skip_upload = skip_upload self.lint_disable = lint_disable self.no_delete = no_delete self.keep_failed = keep_failed self.dont_wait_for_delete = dont_wait_for_delete self._extra_tags = _extra_tags if _extra_tags else [] if printer is None: self.printer = TerminalPrinter(minimalist=True) else: self.printer = printer def run(self) -> None: """Deploys the required Test resources in AWS. Raises: TaskCatException: If skip_upload is set without specifying s3_bucket in config. TaskCatException: If linting fails with errors. """ _trim_regions(self.regions, self.config) _trim_tests(self.test_names, self.config) boto3_cache = Boto3Cache() templates = self.config.get_templates() if self.skip_upload and not self.config.config.project.s3_bucket: raise TaskCatException( "cannot skip_buckets without specifying s3_bucket in config") buckets = self.config.get_buckets(boto3_cache) if not self.skip_upload: # 1. lint if not self.lint_disable: lint = TaskCatLint(self.config, templates) errors = lint.lints[1] lint.output_results() if errors or not lint.passed: raise TaskCatException("Lint failed with errors") # 2. build lambdas if self.config.config.project.package_lambda: LambdaBuild(self.config, self.config.project_root) # 3. s3 sync stage_in_s3(buckets, self.config.config.project.name, self.config.project_root) regions = self.config.get_regions(boto3_cache) parameters = self.config.get_rendered_parameters( buckets, regions, templates) tests = self.config.get_tests(templates, regions, buckets, parameters) # pre-hooks execute_hooks("prehooks", self.config, tests, parameters) self.test_definition = Stacker( self.config.config.project.name, tests, shorten_stack_name=self.config.config.project.shorten_stack_name, tags=self._extra_tags, ) self.test_definition.create_stacks() # post-hooks # TODO: pass in outputs, once there is a standard interface for a test_definition execute_hooks("posthooks", self.config, tests, parameters) self.printer.report_test_progress(stacker=self.test_definition) self.passed = True self.result = self.test_definition.stacks def clean_up(self) -> None: # noqa: C901 """Deletes the Test related resources in AWS. Raises: TaskCatException: If one or more stacks failed to create. """ if not hasattr(self, "test_definition"): LOG.warning("No stacks were created... skipping cleanup.") return status = self.test_definition.status() # Delete Stacks if self.no_delete: LOG.info("Skipping delete due to cli argument") elif self.keep_failed: if len(status["COMPLETE"]) > 0: LOG.info("deleting successful stacks") self.test_definition.delete_stacks( {"status": "CREATE_COMPLETE"}) else: self.test_definition.delete_stacks() if not self.dont_wait_for_delete: self.printer.report_test_progress(stacker=self.test_definition) # TODO: summarise stack statusses (did they complete/delete ok) and print any # error events # Delete Templates and Buckets buckets = self.config.get_buckets() if not self.no_delete or (self.keep_failed is True and len(status["FAILED"]) == 0): deleted: ListType[str] = [] for test in buckets.values(): for bucket in test.values(): if (bucket.name not in deleted) and not bucket.regional_buckets: bucket.delete(delete_objects=True) deleted.append(bucket.name) # 9. raise if something failed # - grabbing the status again to ensure everything deleted OK. status = self.test_definition.status() if len(status["FAILED"]) > 0: raise TaskCatException( f'One or more stacks failed to create: {status["FAILED"]}') def report( self, output_directory: str = "./taskcat_outputs", ): """Generates a report of the status of Cloudformation stacks. Args: output_directory (str, optional): The directory to save the report in. Defaults to "./taskcat_outputs". """ # noqa: B950 report_path = Path(output_directory).resolve() report_path.mkdir(exist_ok=True) cfn_logs = _CfnLogTools() cfn_logs.createcfnlogs(self.test_definition, report_path) ReportBuilder(self.test_definition, report_path / "index.html").generate_report()
def run( input_file: str = "./.taskcat.yml", project_root: str = "./", no_delete: bool = False, lint_disable: bool = False, enable_sig_v2: bool = False, keep_failed: bool = False, ): """tests whether CloudFormation templates are able to successfully launch :param input_file: path to either a taskat project config file or a CloudFormation template :param project_root_path: root path of the project relative to input_file :param no_delete: don't delete stacks after test is complete :param lint_disable: disable cfn-lint checks :param enable_sig_v2: enable legacy sigv2 requests for auto-created buckets :param keep_failed: do not delete failed stacks """ project_root_path: Path = Path(project_root).expanduser().resolve() input_file_path: Path = project_root_path / input_file config = Config.create( project_root=project_root_path, project_config_path=input_file_path # TODO: detect if input file is taskcat config or CloudFormation template ) if enable_sig_v2: config = Config.create( project_root=project_root_path, project_config_path=input_file_path, args={"project": { "s3_enable_sig_v2": enable_sig_v2 }}, ) boto3_cache = Boto3Cache() templates = config.get_templates(project_root_path) # 1. lint if not lint_disable: lint = TaskCatLint(config, templates) errors = lint.lints[1] lint.output_results() if errors or not lint.passed: raise TaskCatException("Lint failed with errors") # 2. build lambdas LambdaBuild(config, project_root_path) # 3. s3 sync buckets = config.get_buckets(boto3_cache) stage_in_s3(buckets, config.config.project.name, project_root_path) # 4. launch stacks regions = config.get_regions(boto3_cache) parameters = config.get_rendered_parameters(buckets, regions, templates) tests = config.get_tests(project_root_path, templates, regions, buckets, parameters) test_definition = Stacker( config.config.project.name, tests, shorten_stack_name=config.config.project.shorten_stack_name, ) test_definition.create_stacks() terminal_printer = TerminalPrinter() # 5. wait for completion terminal_printer.report_test_progress(stacker=test_definition) status = test_definition.status() # 6. create report report_path = Path("./taskcat_outputs/").resolve() report_path.mkdir(exist_ok=True) cfn_logs = _CfnLogTools() cfn_logs.createcfnlogs(test_definition, report_path) ReportBuilder(test_definition, report_path / "index.html").generate_report() # 7. delete stacks if no_delete: LOG.info("Skipping delete due to cli argument") elif keep_failed: if len(status["COMPLETE"]) > 0: LOG.info("deleting successful stacks") test_definition.delete_stacks({"status": "CREATE_COMPLETE"}) terminal_printer.report_test_progress(stacker=test_definition) else: test_definition.delete_stacks() terminal_printer.report_test_progress(stacker=test_definition) # TODO: summarise stack statusses (did they complete/delete ok) and print any # error events # 8. delete buckets if not no_delete or (keep_failed is True and len(status["FAILED"]) == 0): deleted: ListType[str] = [] for test in buckets.values(): for bucket in test.values(): if bucket.name not in deleted: bucket.delete(delete_objects=True) deleted.append(bucket.name) # 9. raise if something failed if len(status["FAILED"]) > 0: raise TaskCatException( f'One or more stacks failed tests: {status["FAILED"]}')
def __init__( # noqa: C901 self, package: str, aws_profile: str = "default", region="default", parameters="", name="", wait=False, ): """ :param package: name of package to install can be a path to a local package, a github org/repo, or an AWS Quick Start name :param aws_profile: aws profile to use for installation :param region: regions to install into, default will use aws cli configured default :param parameters: parameters to pass to the stack, in the format Key=Value,AnotherKey=AnotherValue or providing a path to a json or yaml file containing the parameters :param name: stack name to use, if not specified one will be automatically generated :param wait: if enabled, taskcat will wait for stack to complete before exiting """ LOG.warning("deploy is in alpha feature, use with caution") boto3_cache = Boto3Cache() if not name: name = generate_name() if region == "default": region = boto3_cache.get_default_region(profile_name=aws_profile) path = Path(package).resolve() if Path(package).resolve().is_dir(): package_type = "local" elif "/" in package: package_type = "github" else: # assuming it's an AWS Quick Start package_type = "github" package = f"aws-quickstart/quickstart-{package}" if package_type == "github": if package.startswith("https://") or package.startswith("git@"): url = package org, repo = (package.replace(".git", "").replace(":", "/").split("/")[-2:]) else: org, repo = package.split("/") url = f"https://github.com/{org}/{repo}.git" path = Deploy.PKG_CACHE_PATH / org / repo LOG.info(f"fetching git repo {url}") self._git_clone(url, path) self._recurse_submodules(path, url) config = Config.create( args={"project": { "regions": [region] }}, project_config_path=(path / ".taskcat.yml"), project_root=path, ) # only use one region for test_name in config.config.tests: config.config.tests[ test_name].regions = config.config.project.regions # if there's no test called default, take the 1st in the list if "default" not in config.config.tests: config.config.tests["default"] = config.config.tests[list( config.config.tests.keys())[0]] # until install offers a way to run different "plans" we only need one test for test_name in list(config.config.tests.keys()): if test_name != "default": del config.config.tests[test_name] buckets = config.get_buckets(boto3_cache) stage_in_s3(buckets, config.config.project.name, path) regions = config.get_regions(boto3_cache) templates = config.get_templates(project_root=path) parameters = config.get_rendered_parameters(buckets, regions, templates) tests = config.get_tests(path, templates, regions, buckets, parameters) tags = [Tag({"Key": "taskcat-installer", "Value": name})] stacks = Stacker(config.config.project.name, tests, tags=tags) stacks.create_stacks() LOG.error( f" {stacks.uid.hex}", extra={"nametag": "\x1b[0;30;47m[INSTALL_ID ]\x1b[0m"}, ) LOG.error(f" {name}", extra={"nametag": "\x1b[0;30;47m[INSTALL_NAME]\x1b[0m"}) if wait: LOG.info( f"waiting for stack {stacks.stacks[0].name} to complete in " f"{stacks.stacks[0].region_name}") while stacks.status()["IN_PROGRESS"]: sleep(5) if stacks.status()["FAILED"]: LOG.error("Install failed:") for error in stacks.stacks[0].error_events(): LOG.error(f"{error.logical_id}: {error.status_reason}") raise TaskCatException("Stack creation failed")