Exemplo n.º 1
0
 def __init__(
     self,
     hook_config: dict,
     config: Config,
     tests: Mapping[str, TestObj],
     parameters: dict,
     outputs: Optional[dict],
 ):
     super().__init__(hook_config, config, tests, parameters, outputs)
     if hook_config.get("generate_failure"):
         raise TaskCatException("generated failure from hook")
Exemplo n.º 2
0
 def _assert_param_combo(self):
     throw = False
     if self._stack_name_prefix and self._stack_name_suffix:
         throw = True
     if self._stack_name and (self._stack_name_prefix
                              or self._stack_name_suffix):
         throw = True
     if throw:
         raise TaskCatException(
             "Please provide only *ONE* of stack_name, stack_name_prefix, \
             or stack_name_suffix")
Exemplo n.º 3
0
 def create_stacks(self, threads: int = 8):
     if self.stacks:
         raise TaskCatException(
             "Stacker already initialised with stack objects")
     tests = self._tests_to_list(self.config.tests)
     tags = [Tag({"Key": "taskcat-id", "Value": self.uid.hex})]
     tags += [
         Tag(t) for t in self.tags if t.key not in
         ["taskcat-project-name", "taskcat-test-name", "taskcat-id"]
     ]
     fan_out(self._create_stacks_for_test, {"tags": tags}, tests, threads)
Exemplo n.º 4
0
def param_list_to_dict(original_keys):
    # Setup a list index dictionary.
    # - Used to give an Parameter => Index mapping for replacement.
    param_index = {}
    if not isinstance(original_keys, list):
        raise TaskCatException(
            'Invalid parameter file, outermost json element must be a list ("[]")'
        )
    for (idx, param_dict) in enumerate(original_keys):
        if not isinstance(param_dict, dict):
            raise TaskCatException(
                'Invalid parameter %s parameters must be of type dict ("{}")' %
                param_dict)
        if "ParameterKey" not in param_dict or "ParameterValue" not in param_dict:
            raise TaskCatException(
                f"Invalid parameter {param_dict} all items must "
                f"have both ParameterKey and ParameterValue keys")
        key = param_dict["ParameterKey"]
        param_index[key] = idx
    return param_index
Exemplo n.º 5
0
def param_list_to_dict(original_keys):
    # Setup a list index dictionary.
    # - Used to give an Parameter => Index mapping for replacement.
    param_index = {}
    if type(original_keys) != list:
        raise TaskCatException(
            'Invalid parameter file, outermost json element must be a list ("[]")'
        )
    for (idx, param_dict) in enumerate(original_keys):
        if type(param_dict) != dict:
            raise TaskCatException(
                'Invalid parameter %s parameters must be of type dict ("{}")' %
                param_dict)
        if 'ParameterKey' not in param_dict or 'ParameterValue' not in param_dict:
            raise TaskCatException(
                'Invalid parameter %s all items must have both ParameterKey and ParameterValue keys'
                % param_dict)
        key = param_dict['ParameterKey']
        param_index[key] = idx
    return param_index
Exemplo n.º 6
0
 def _get_account_info(self, profile):
     partition, region = self._get_partition(profile)
     session = self.session(profile, region)
     sts_client = session.client("sts", region_name=region)
     try:
         account_id = sts_client.get_caller_identity()["Account"]
     except ClientError as e:
         if e.response["Error"]["Code"] == "AccessDenied":
             raise TaskCatException(
                 f"Not able to fetch account number from {region} using profile "
                 f"{profile}. {str(e)}")
         raise
     except NoCredentialsError as e:
         raise TaskCatException(
             f"Not able to fetch account number from {region} using profile "
             f"{profile}. {str(e)}")
     except ProfileNotFound as e:
         raise TaskCatException(
             f"Not able to fetch account number from {region} using profile "
             f"{profile}. {str(e)}")
     return {"partition": partition, "account_id": account_id}
Exemplo n.º 7
0
 def validate_output_dir(directory):
     if os.path.isfile(directory):
         directory = os.path.split(directory)[0]
     if not os.path.isdir(directory):
         LOG.info(
             "Directory [{}] does not exist. Trying to create it.".format(
                 directory))
         os.makedirs(directory)
     elif not os.access(directory, os.W_OK):
         raise TaskCatException(
             f"No write access allowed to output directory "
             f"[{directory}]. Aborting.")
Exemplo n.º 8
0
 def _sync(  # noqa: C901
     self, local_list, s3_list, bucket, prefix, acl, threads=16
 ):  # pylint: disable=too-many-locals
     # determine which files to remove from S3
     remove_from_s3 = []
     for s3_file in s3_list.keys():
         if s3_file not in local_list.keys() and not self._exclude_remote(s3_file):
             if self.dry_run:
                 LOG.info(
                     f"[DRY RUN] s3://{bucket}/{prefix + prefix + s3_file}",
                     extra={"nametag": PrintMsg.S3DELETE},
                 )
             else:
                 LOG.info(
                     f"s3://{bucket}/{prefix + prefix + s3_file}",
                     extra={"nametag": PrintMsg.S3DELETE},
                 )
             remove_from_s3.append({"Key": prefix + s3_file})
     # deleting objects, max 1k objects per s3 delete_objects call
     if not self.dry_run:
         for objects in [
             remove_from_s3[i : i + 1000]
             for i in range(0, len(remove_from_s3), 1000)
         ]:
             response = self.s3_client.delete_objects(
                 Bucket=bucket, Delete={"Objects": objects}
             )
             if "Errors" in response.keys():
                 for error in response["Errors"]:
                     LOG.error("S3 delete error: %s" % str(error))
                 raise TaskCatException("Failed to delete one or more files from S3")
     # build list of files to upload
     upload_to_s3 = []
     for local_file in local_list:
         upload = False
         # If file is not present in S3
         if local_file not in s3_list.keys():
             upload = True
         # If checksum is different
         elif local_list[local_file][1] != s3_list[local_file]:
             upload = True
         if upload:
             absolute_path = local_list[local_file][0]
             s3_path = local_file
             upload_to_s3.append([absolute_path, bucket, s3_path])
     # multithread the uploading of files
     pool = ThreadPool(threads)
     func = partial(
         self._s3_upload_file, prefix=prefix, s3_client=self.s3_client, acl=acl
     )
     pool.map(func, upload_to_s3)
     pool.close()
     pool.join()
Exemplo n.º 9
0
def parse_legacy_config(project_root: Path):
    config_file = (project_root / "ci/taskcat.yml").expanduser().resolve()
    if not config_file.is_file():
        raise TaskCatException(f"No config_file at {config_file}")
    with open(str(config_file), "r") as file_handle:
        config_dict = yaml.safe_load(file_handle)
    # need to rename global key, as it's a python keyword
    config_dict["global_"] = config_dict.pop("global")
    legacy_config = LegacyConfig.from_dict(config_dict)
    tests = {}
    for test_name, test_data in legacy_config.tests.items():
        parameters = {}
        parameter_file = project_root / "ci/" / test_data.parameter_input
        parameter_file = parameter_file.expanduser().resolve()
        with open(str(parameter_file), "r") as file_handle:
            for param in yaml.safe_load(file_handle):
                parameters[param["ParameterKey"]] = param["ParameterValue"]
        tests[test_name] = {
            "template": "templates/" + test_data.template_file,
            "parameters": parameters,
            "regions": test_data.regions,
        }
        if not tests[test_name]["regions"]:
            del tests[test_name]["regions"]
    new_config_dict = {
        "project": {
            "name": legacy_config.global_.qsname,
            "owner": legacy_config.global_.owner,
            "s3_bucket": legacy_config.global_.s3bucket,
            "package_lambda": legacy_config.global_.lambda_build,
            "regions": legacy_config.global_.regions,
        },
        "tests": tests,
    }
    new_config = BaseConfig.from_dict(new_config_dict)
    LOG.warning(
        "config is in a legacy format, support for which will be dropped in a "
        "future version. a new format config (.taskcat.yml) will been placed "
        "in your project_root"
    )
    new_config_path = project_root / ".taskcat.yml"
    if new_config_path.exists():
        LOG.warning(
            f"skipping new config file creation, file already exits at "
            f"{new_config_path}"
        )
    else:
        with open(str(new_config_path), "w") as file_handle:
            config_dict = new_config.to_dict()
            config_dict.pop("general")
            yaml.dump(config_dict, file_handle, default_flow_style=False)
    return new_config
Exemplo n.º 10
0
 def _absolute_path(self, path: Optional[Union[str,
                                               Path]]) -> Optional[Path]:
     if path is None:
         return path
     path = Path(path)
     abs_path = absolute_path(path)
     if self.project_root and not abs_path:
         abs_path = absolute_path(Path(self.project_root) / Path(path))
     if not abs_path:
         raise TaskCatException(
             f"Unable to resolve path {path}, with project_root "
             f"{self.project_root}")
     return abs_path
Exemplo n.º 11
0
    def clean_up(self) -> None:  # noqa: C901
        """Deletes the Test related resources in AWS.

        Raises:
            TaskCatException: If one or more stacks failed to create.
        """

        if not hasattr(self, "test_definition"):
            LOG.warning("No stacks were created... skipping cleanup.")
            return

        status = self.test_definition.status()

        # Delete Stacks
        if self.no_delete:
            LOG.info("Skipping delete due to cli argument")
        elif self.keep_failed:
            if len(status["COMPLETE"]) > 0:
                LOG.info("deleting successful stacks")
                self.test_definition.delete_stacks(
                    {"status": "CREATE_COMPLETE"})
        else:
            self.test_definition.delete_stacks()

        if not self.dont_wait_for_delete:
            self.printer.report_test_progress(stacker=self.test_definition)

        # TODO: summarise stack statusses (did they complete/delete ok) and print any
        #  error events

        # Delete Templates and Buckets
        buckets = self.config.get_buckets()

        if not self.no_delete or (self.keep_failed is True
                                  and len(status["FAILED"]) == 0):
            deleted: ListType[str] = []
            for test in buckets.values():
                for bucket in test.values():
                    if (bucket.name
                            not in deleted) and not bucket.regional_buckets:
                        bucket.delete(delete_objects=True)
                        deleted.append(bucket.name)

        # 9. raise if something failed
        # - grabbing the status again to ensure everything deleted OK.

        status = self.test_definition.status()
        if len(status["FAILED"]) > 0:
            raise TaskCatException(
                f'One or more stacks failed to create: {status["FAILED"]}')
Exemplo n.º 12
0
 def _bucket_matches_existing(self):
     try:
         location = self.s3_client.get_bucket_location(
             Bucket=self.name)["LocationConstraint"]
         location = location if location else "us-east-1"
     except self.s3_client.exceptions.NoSuchBucket:
         location = None
     if location != self.region and location is not None:
         raise TaskCatException(
             f"bucket {self.name} already exists, but is not in "
             f"the expected region {self.region}, expected {location}")
     if location:
         tags = self.s3_client.get_bucket_tagging(
             Bucket=self.name)["TagSet"]
         tags = {t["Key"]: t["Value"] for t in tags}
         uid = tags.get("taskcat-id")
         uid = uuid.UUID(uid) if uid else uid
         if uid != self.taskcat_id:
             raise TaskCatException(
                 f"bucket {self.name} already exists, but does not have a matching"
                 f" uuid")
         return True
     return False
Exemplo n.º 13
0
 def __init__(self, input_file: str, project_root: str = "./", strict: bool = False):
     """
     :param input_file: path to project config or CloudFormation template
     :param project_root: base path for project
     :param strict: fail on lint warnings as well as errors
     """
     config = Config(
         project_config_path=input_file,
         project_root=project_root,
         create_clients=False,
     )
     lint = TaskCatLint(config, strict)
     errors = lint.lints[1]
     lint.output_results()
     if errors or not lint.passed:
         raise TaskCatException("Lint failed with errors")
Exemplo n.º 14
0
 def _get_file_list(self, input_path):
     if not self._file_list:
         _file_list = []
         if os.path.isfile(input_path):
             _file_list.append(input_path)
         elif os.path.isdir(input_path):
             for root, dirs, files in os.walk(input_path):
                 for _current_file in files:
                     if not _current_file.endswith(tuple(self._GIT_EXT)):
                         _file_list.append(os.path.join(root, _current_file))
                 for directory in self._EXCLUDED_DIRS:
                     if directory in dirs:
                         dirs.remove(directory)
         else:
             raise TaskCatException("Directory/File is non-existent. Aborting.")
         self._file_list = _file_list
     return self._file_list
Exemplo n.º 15
0
 def _sync(self, local_list, s3_list, bucket, prefix, acl, threads=16):
     # determine which files to remove from S3
     remove_from_s3 = []
     for s3_file in s3_list.keys():
         if s3_file not in local_list.keys(
         ) and not self._exclude_remote(s3_file):
             print("{}[S3: DELETE ]{} s3://{}/{}".format(
                 PrintMsg.white, PrintMsg.rst_color, bucket,
                 prefix + prefix + s3_file))
             remove_from_s3.append({"Key": prefix + s3_file})
     # deleting objects, max 1k objects per s3 delete_objects call
     for d in [
             remove_from_s3[i:i + 1000]
             for i in range(0, len(remove_from_s3), 1000)
     ]:
         response = self.s3_client.delete_objects(Bucket=bucket,
                                                  Delete={'Objects': d})
         if "Errors" in response.keys():
             for error in response["Errors"]:
                 print(PrintMsg.ERROR + "S3 delete error: %s" % str(error))
             raise TaskCatException(
                 "Failed to delete one or more files from S3")
     # build list of files to upload
     upload_to_s3 = []
     for local_file in local_list:
         upload = False
         # If file is not present in S3
         if local_file not in s3_list.keys():
             upload = True
         # If checksum is different
         elif local_list[local_file][1] != s3_list[local_file]:
             upload = True
         if upload:
             absolute_path = local_list[local_file][0]
             s3_path = local_file
             upload_to_s3.append([absolute_path, bucket, s3_path])
     # multithread the uploading of files
     pool = ThreadPool(threads)
     func = partial(self._s3_upload_file,
                    prefix=prefix,
                    s3_client=self.s3_client,
                    acl=acl)
     pool.map(func, upload_to_s3)
     pool.close()
     pool.join()
Exemplo n.º 16
0
def validate_all_templates(config: Config) -> None:
    _validated_templates: List[Path] = []

    for test in config.tests.values():
        if test.template_file in _validated_templates:
            continue
        try:
            region = test.regions[0]
            test.template.validate(region.name,
                                   region.bucket.name)  # type: ignore
            _validated_templates.append(test.template_file)
            LOG.info(
                f"Validated template: {str(test.template_file)}",
                extra={"nametag": PrintMsg.PASS},
            )
        except Exception as e:
            LOG.critical(f"Exception: {str(e)}")
            raise TaskCatException(
                f"Unable to validate {test.template_file.name}")
Exemplo n.º 17
0
    def _recurse_nodes(self, current_node):
        if type(current_node) in [OrderedDict, dict]:
            for key in current_node.keys():
                self.logger.debug("Key: ")
                self.logger.debug(key)
                self.logger.debug("Type: ")
                self.logger.debug(type(current_node[key]))
                self.logger.debug("Value: ")
                self.logger.debug(current_node[key])
                current_node[key] = self._recurse_nodes(current_node[key])
        elif type(current_node) is list:
            for _index, item in enumerate(current_node):
                self.logger.debug("Type: ")
                self.logger.debug(type(item))
                self.logger.debug("Value: ")
                self.logger.debug(item)
                current_node[_index] = self._recurse_nodes(item)
            return current_node
        elif type(current_node) is str:
            return self._string_rewriter(current_node,
                                         self._target_bucket_name)
        elif type(current_node) is bool:
            self.logger.debug("Not much we can do with booleans. Skipping.")
        elif type(current_node) in [int, float]:
            self.logger.debug("Not much we can do with numbers. Skipping.")
        elif type(current_node) in [
                datetime.date, datetime.time, datetime.datetime,
                datetime.timedelta
        ]:
            self.logger.debug("Not much we can do with datetime. Skipping.")
        elif type(current_node) is None:
            self.logger.debug("Not much we can do with nulls. Skipping.")
        else:
            self.logger.error("Unsupported type.")
            self.logger.error("Failing Type: ")
            self.logger.error(type(current_node))
            self.logger.error("Failing Value: ")
            self.logger.error(current_node)
            raise TaskCatException("Unsupported type.")

        self.logger.debug("PARSED!")

        return current_node
Exemplo n.º 18
0
 def _propagate_regions(self, test: Test):
     # TODO: Better way to handle default_region
     default_region = test.client_factory.get_default_region(
         None, None, None, None)
     if not test.regions and not default_region and not self.regions:
         raise TaskCatException(
             f"unable to define region for test {test.name}, you must define "
             f"regions "
             f"or set a default region in the aws cli")
     if not test.regions:
         if self.regions:
             test.regions = [
                 AWSRegionObject(region, self._client_factory_instance)
                 for region in self.regions
             ]
         else:
             test.regions = [
                 AWSRegionObject(default_region,
                                 self._client_factory_instance)
             ]
Exemplo n.º 19
0
 def _s3_upload_file(self, paths, prefix, s3_client, acl):
     local_filename, bucket, s3_path = paths
     retry = 0
     # backoff and retry
     while retry < 5:
         print("{}[S3: -> ]{} s3://{}/{}".format(PrintMsg.white,
                                                 PrintMsg.rst_color, bucket,
                                                 prefix + s3_path))
         try:
             s3_client.upload_file(local_filename,
                                   bucket,
                                   prefix + s3_path,
                                   ExtraArgs={'ACL': acl})
             break
         except Exception as e:
             retry += 1
             print(PrintMsg.ERROR + "S3 upload error: %s" % e)
             # give up if we've exhausted retries, or if the error is not-retryable (ie AccessDenied)
             if retry == 5 or (type(e) == S3UploadFailedError
                               and '(AccessDenied)' in str(e)):
                 raise TaskCatException("Failed to upload to S3")
             time.sleep(retry * 2)
Exemplo n.º 20
0
 def _s3_upload_file(paths, prefix, s3_client, acl):
     local_filename, bucket, s3_path = paths
     retry = 0
     # backoff and retry
     while retry < 5:
         LOG.info(f"s3://{bucket}/{prefix + s3_path}",
                  extra={"nametag": PrintMsg.S3})
         try:
             s3_client.upload_file(local_filename,
                                   bucket,
                                   prefix + s3_path,
                                   ExtraArgs={"ACL": acl})
             break
         except Exception as e:  # pylint: disable=broad-except
             retry += 1
             LOG.error("S3 upload error: %s" % e)
             # give up if we've exhausted retries, or if the error is not-retryable
             # ie. AccessDenied
             if retry == 5 or (isinstance(e, S3UploadFailedError)
                               and "(AccessDenied)" in str(e)):
                 raise TaskCatException("Failed to upload to S3")
             time.sleep(retry * 2)
Exemplo n.º 21
0
    def __init__(
        self,
        input_file: str = ".taskcat.yml",
        project_root: str = "./",
        strict: bool = False,
    ):
        """
        :param input_file: path to project config or CloudFormation template
        :param project_root: base path for project
        :param strict: fail on lint warnings as well as errors
        """

        project_root_path: Path = Path(project_root).expanduser().resolve()
        input_file_path: Path = project_root_path / input_file
        config = Config.create(project_root=project_root_path,
                               project_config_path=input_file_path)

        templates = config.get_templates()
        lint = TaskCatLint(config, templates, strict)
        errors = lint.lints[1]
        lint.output_results()
        if errors or not lint.passed:
            raise TaskCatException("Lint failed with errors")
Exemplo n.º 22
0
 def _get_presigned_url_wrapper(self, presigned_url_regex):
     if presigned_url_regex.search(self.param_value):
         if len(self.param_value) < 2:
             LOG.error("Syntax: $[taskcat_presignedurl],bucket,key,OPTIONAL_TIMEOUT")
             raise TaskCatException(
                 "Syntax error when using $[taskcat_getpresignedurl]; Not "
                 "enough parameters."
             )
         paramsplit = self.regxfind(presigned_url_regex, self.param_value).split(
             ","
         )[1:]
         url_bucket, url_key = paramsplit[:2]
         if len(paramsplit) == 3:
             url_expire_seconds = paramsplit[2]
         else:
             url_expire_seconds = 3600
         s3_client = self._boto_client("s3")
         param_value = s3_client.generate_presigned_url(
             "get_object",
             Params={"Bucket": url_bucket, "Key": url_key},
             ExpiresIn=int(url_expire_seconds),
         )
         self._regex_replace_param_value(re.compile("^.*$"), param_value)
         self._regex_replace_param_value(re.compile("^.*$"), param_value)
Exemplo n.º 23
0
 def _find_children(self) -> None:  # noqa: C901
     children = set()
     if "Resources" not in self.template:
         raise TaskCatException(
             f"did not receive a valid template: {self.template_path} does not "
             f"have a Resources section"
         )
     for resource in self.template["Resources"].keys():
         resource = self.template["Resources"][resource]
         if resource["Type"] == "AWS::CloudFormation::Stack":
             child_name = self._template_url_to_path(
                 template_url=resource["Properties"]["TemplateURL"],
             )
             # print(child_name)
             if child_name:
                 # for child_url in child_name:
                 children.add(child_name)
     for child in children:
         child_template_instance = None
         for descendent in self.descendents:
             if str(descendent.template_path) == str(child):
                 child_template_instance = descendent
         if not child_template_instance:
             try:
                 child_template_instance = Template(
                     child,
                     self.project_root,
                     self._get_relative_url(child),
                     self._s3_key_prefix,
                     tcat_template_cache,
                 )
             except Exception:  # pylint: disable=broad-except
                 LOG.debug("Traceback:", exc_info=True)
                 LOG.error(f"Failed to add child template {child}")
         if isinstance(child_template_instance, Template):
             self.children.append(child_template_instance)
Exemplo n.º 24
0
    def run(
        input_file: str = "./.taskcat.yml",
        project_root: str = "./",
        no_delete: bool = False,
        lint_disable: bool = False,
        enable_sig_v2: bool = False,
        keep_failed: bool = False,
    ):
        """tests whether CloudFormation templates are able to successfully launch

        :param input_file: path to either a taskat project config file or a
        CloudFormation template
        :param project_root_path: root path of the project relative to input_file
        :param no_delete: don't delete stacks after test is complete
        :param lint_disable: disable cfn-lint checks
        :param enable_sig_v2: enable legacy sigv2 requests for auto-created buckets
        :param keep_failed: do not delete failed stacks
        """
        project_root_path: Path = Path(project_root).expanduser().resolve()
        input_file_path: Path = project_root_path / input_file
        config = Config.create(
            project_root=project_root_path,
            project_config_path=input_file_path
            # TODO: detect if input file is taskcat config or CloudFormation template
        )

        if enable_sig_v2:
            config = Config.create(
                project_root=project_root_path,
                project_config_path=input_file_path,
                args={"project": {
                    "s3_enable_sig_v2": enable_sig_v2
                }},
            )

        boto3_cache = Boto3Cache()
        templates = config.get_templates(project_root_path)
        # 1. lint
        if not lint_disable:
            lint = TaskCatLint(config, templates)
            errors = lint.lints[1]
            lint.output_results()
            if errors or not lint.passed:
                raise TaskCatException("Lint failed with errors")
        # 2. build lambdas
        LambdaBuild(config, project_root_path)
        # 3. s3 sync
        buckets = config.get_buckets(boto3_cache)
        stage_in_s3(buckets, config.config.project.name, project_root_path)
        # 4. launch stacks
        regions = config.get_regions(boto3_cache)
        parameters = config.get_rendered_parameters(buckets, regions,
                                                    templates)
        tests = config.get_tests(project_root_path, templates, regions,
                                 buckets, parameters)
        test_definition = Stacker(
            config.config.project.name,
            tests,
            shorten_stack_name=config.config.project.shorten_stack_name,
        )
        test_definition.create_stacks()
        terminal_printer = TerminalPrinter()
        # 5. wait for completion
        terminal_printer.report_test_progress(stacker=test_definition)
        status = test_definition.status()
        # 6. create report
        report_path = Path("./taskcat_outputs/").resolve()
        report_path.mkdir(exist_ok=True)
        cfn_logs = _CfnLogTools()
        cfn_logs.createcfnlogs(test_definition, report_path)
        ReportBuilder(test_definition,
                      report_path / "index.html").generate_report()
        # 7. delete stacks
        if no_delete:
            LOG.info("Skipping delete due to cli argument")
        elif keep_failed:
            if len(status["COMPLETE"]) > 0:
                LOG.info("deleting successful stacks")
                test_definition.delete_stacks({"status": "CREATE_COMPLETE"})
                terminal_printer.report_test_progress(stacker=test_definition)
        else:
            test_definition.delete_stacks()
            terminal_printer.report_test_progress(stacker=test_definition)
        # TODO: summarise stack statusses (did they complete/delete ok) and print any
        #  error events
        # 8. delete buckets
        if not no_delete or (keep_failed is True
                             and len(status["FAILED"]) == 0):
            deleted: ListType[str] = []
            for test in buckets.values():
                for bucket in test.values():
                    if bucket.name not in deleted:
                        bucket.delete(delete_objects=True)
                        deleted.append(bucket.name)
        # 9. raise if something failed
        if len(status["FAILED"]) > 0:
            raise TaskCatException(
                f'One or more stacks failed tests: {status["FAILED"]}')
Exemplo n.º 25
0
 def __init__(  # noqa: C901
     self,
     package: str,
     aws_profile: str = "default",
     region="default",
     parameters="",
     name="",
     wait=False,
 ):
     """
     :param package: name of package to install can be a path to a local package,
     a github org/repo, or an AWS Quick Start name
     :param aws_profile: aws profile to use for installation
     :param region: regions to install into, default will use aws cli configured
     default
     :param parameters: parameters to pass to the stack, in the format
     Key=Value,AnotherKey=AnotherValue or providing a path to a json or yaml file
     containing the parameters
     :param name: stack name to use, if not specified one will be automatically
     generated
     :param wait: if enabled, taskcat will wait for stack to complete before exiting
     """
     LOG.warning("deploy is in alpha feature, use with caution")
     boto3_cache = Boto3Cache()
     if not name:
         name = generate_name()
     if region == "default":
         region = boto3_cache.get_default_region(profile_name=aws_profile)
     path = Path(package).resolve()
     if Path(package).resolve().is_dir():
         package_type = "local"
     elif "/" in package:
         package_type = "github"
     else:  # assuming it's an AWS Quick Start
         package_type = "github"
         package = f"aws-quickstart/quickstart-{package}"
     if package_type == "github":
         if package.startswith("https://") or package.startswith("git@"):
             url = package
             org, repo = (package.replace(".git",
                                          "").replace(":",
                                                      "/").split("/")[-2:])
         else:
             org, repo = package.split("/")
             url = f"https://github.com/{org}/{repo}.git"
         path = Deploy.PKG_CACHE_PATH / org / repo
         LOG.info(f"fetching git repo {url}")
         self._git_clone(url, path)
         self._recurse_submodules(path, url)
     config = Config.create(
         args={"project": {
             "regions": [region]
         }},
         project_config_path=(path / ".taskcat.yml"),
         project_root=path,
     )
     # only use one region
     for test_name in config.config.tests:
         config.config.tests[
             test_name].regions = config.config.project.regions
     # if there's no test called default, take the 1st in the list
     if "default" not in config.config.tests:
         config.config.tests["default"] = config.config.tests[list(
             config.config.tests.keys())[0]]
     # until install offers a way to run different "plans" we only need one test
     for test_name in list(config.config.tests.keys()):
         if test_name != "default":
             del config.config.tests[test_name]
     buckets = config.get_buckets(boto3_cache)
     stage_in_s3(buckets, config.config.project.name, path)
     regions = config.get_regions(boto3_cache)
     templates = config.get_templates(project_root=path)
     parameters = config.get_rendered_parameters(buckets, regions,
                                                 templates)
     tests = config.get_tests(path, templates, regions, buckets, parameters)
     tags = [Tag({"Key": "taskcat-installer", "Value": name})]
     stacks = Stacker(config.config.project.name, tests, tags=tags)
     stacks.create_stacks()
     LOG.error(
         f" {stacks.uid.hex}",
         extra={"nametag": "\x1b[0;30;47m[INSTALL_ID  ]\x1b[0m"},
     )
     LOG.error(f" {name}",
               extra={"nametag": "\x1b[0;30;47m[INSTALL_NAME]\x1b[0m"})
     if wait:
         LOG.info(
             f"waiting for stack {stacks.stacks[0].name} to complete in "
             f"{stacks.stacks[0].region_name}")
         while stacks.status()["IN_PROGRESS"]:
             sleep(5)
     if stacks.status()["FAILED"]:
         LOG.error("Install failed:")
         for error in stacks.stacks[0].error_events():
             LOG.error(f"{error.logical_id}: {error.status_reason}")
         raise TaskCatException("Stack creation failed")
Exemplo n.º 26
0
    def run(  # noqa: C901
        test_names: str = "ALL",
        regions: str = "ALL",
        input_file: str = "./.taskcat.yml",
        project_root: str = "./",
        no_delete: bool = False,
        lint_disable: bool = False,
        enable_sig_v2: bool = False,
        keep_failed: bool = False,
        output_directory: str = "./taskcat_outputs",
        minimal_output: bool = False,
        dont_wait_for_delete: bool = False,
    ):
        """tests whether CloudFormation templates are able to successfully launch

        :param test_names: comma separated list of tests to run
        :param regions: comma separated list of regions to test in
        :param input_file: path to either a taskat project config file or a
        CloudFormation template
        :param project_root_path: root path of the project relative to input_file
        :param no_delete: don't delete stacks after test is complete
        :param lint_disable: disable cfn-lint checks
        :param enable_sig_v2: enable legacy sigv2 requests for auto-created buckets
        :param keep_failed: do not delete failed stacks
        :param output_directory: Where to store generated logfiles
        :param minimal_output: Reduces output during test runs
        :param dont_wait_for_delete: Exits immediately after calling stack_delete
        """
        project_root_path: Path = Path(project_root).expanduser().resolve()
        input_file_path: Path = project_root_path / input_file
        # pylint: disable=too-many-arguments
        args = _build_args(enable_sig_v2, regions, GLOBAL_ARGS.profile)
        config = Config.create(
            project_root=project_root_path,
            project_config_path=input_file_path,
            args=args
            # TODO: detect if input file is taskcat config or CloudFormation template
        )
        _trim_regions(regions, config)
        _trim_tests(test_names, config)
        boto3_cache = Boto3Cache()
        templates = config.get_templates()
        # 1. lint
        if not lint_disable:
            lint = TaskCatLint(config, templates)
            errors = lint.lints[1]
            lint.output_results()
            if errors or not lint.passed:
                raise TaskCatException("Lint failed with errors")
        # 2. build lambdas
        if config.config.project.package_lambda:
            LambdaBuild(config, project_root_path)
        # 3. s3 sync
        buckets = config.get_buckets(boto3_cache)
        stage_in_s3(buckets, config.config.project.name, config.project_root)
        # 4. launch stacks
        regions = config.get_regions(boto3_cache)
        parameters = config.get_rendered_parameters(buckets, regions,
                                                    templates)
        tests = config.get_tests(templates, regions, buckets, parameters)
        test_definition = Stacker(
            config.config.project.name,
            tests,
            shorten_stack_name=config.config.project.shorten_stack_name,
        )
        test_definition.create_stacks()
        terminal_printer = TerminalPrinter(minimalist=minimal_output)
        # 5. wait for completion
        terminal_printer.report_test_progress(stacker=test_definition)
        status = test_definition.status()
        # 6. create report
        report_path = Path(output_directory).resolve()
        report_path.mkdir(exist_ok=True)
        cfn_logs = _CfnLogTools()
        cfn_logs.createcfnlogs(test_definition, report_path)
        ReportBuilder(test_definition,
                      report_path / "index.html").generate_report()
        # 7. delete stacks
        if no_delete:
            LOG.info("Skipping delete due to cli argument")
        elif keep_failed:
            if len(status["COMPLETE"]) > 0:
                LOG.info("deleting successful stacks")
                test_definition.delete_stacks({"status": "CREATE_COMPLETE"})
                if not dont_wait_for_delete:
                    terminal_printer.report_test_progress(
                        stacker=test_definition)
        else:
            test_definition.delete_stacks()
            if not dont_wait_for_delete:
                terminal_printer.report_test_progress(stacker=test_definition)
        # TODO: summarise stack statusses (did they complete/delete ok) and print any
        #  error events
        # 8. delete buckets

        if not no_delete or (keep_failed is True
                             and len(status["FAILED"]) == 0):
            deleted: ListType[str] = []
            for test in buckets.values():
                for bucket in test.values():
                    if (bucket.name
                            not in deleted) and not bucket.regional_buckets:
                        bucket.delete(delete_objects=True)
                        deleted.append(bucket.name)
        # 9. raise if something failed
        if len(status["FAILED"]) > 0:
            raise TaskCatException(
                f'One or more stacks failed tests: {status["FAILED"]}')
Exemplo n.º 27
0
    def __init__(  # noqa: C901
        self,
        args: Optional[dict] = None,
        global_config_path: str = "~/.taskcat.yml",
        project_config_path: Optional[Union[Path, str]] = None,
        project_root: str = "./",
        override_file: str = None,  # pylint: disable=unused-argument
        all_env_vars: Optional[List[dict]] = None,
        create_clients: bool = True,
    ):  # #pylint: disable=too-many-arguments
        # #pylint: disable=too-many-statements
        # inputs
        if absolute_path(
                project_config_path) and not Path(project_root).is_absolute():
            project_root = absolute_path(
                project_config_path).parent / project_root
        self.project_root: Union[Path, str] = absolute_path(project_root)
        if not self.project_root:
            raise TaskCatException(
                f"project_root {project_root} is not a valid path")
        self.args: dict = args if args else {}
        self.global_config_path: Optional[Path] = absolute_path(
            global_config_path)
        self._client_factory_instance = ClientFactory()
        # Used only in initial client configuration, then set to None

        # general config
        self.profile_name: str = ""
        self.aws_access_key: str = ""
        self.aws_secret_key: str = ""
        self.no_cleanup: bool = False
        self.no_cleanup_failed: bool = False
        self.verbosity: str = "DEBUG"
        self.tags: dict = {}
        self.stack_prefix: str = ""
        self.lint: bool = False
        self.upload_only: bool = False
        self.lambda_build_only: bool = False
        self.exclude: str = ""
        self.enable_sig_v2: bool = False
        self.auth: Dict[str, dict] = {}

        # project config
        self.name: str = ""
        self.owner: str = ""
        self.package_lambda: bool = True
        self.s3bucket: S3BucketConfig = S3BucketConfig()
        self.tests: Dict[str, Test] = {}
        self.regions: Set[str] = set()
        self.env_vars: Dict[str, str] = {}
        self.project_config_path: Optional[Path] = None
        self.template_path: Optional[Path] = None
        self.lambda_source_path: Path = (Path(self.project_root) /
                                         "functions/source/").resolve()
        self.lambda_zip_path: Path = (Path(self.project_root) /
                                      "functions/packages/").resolve()
        self.build_submodules = True
        self._harvest_env_vars(
            all_env_vars if all_env_vars else os.environ.items())
        self._process_global_config()

        if not self._absolute_path(project_config_path):
            for path in Config.DEFAULT_PROJECT_PATHS:
                try:
                    project_config_path = self._absolute_path(path)
                    LOG.debug("found project config in default location %s",
                              path)
                    break
                except TaskCatException:
                    LOG.debug("didn't find project config in %s", path)
        if not self._absolute_path(project_config_path):
            raise TaskCatException(
                f"failed to load project config file {project_config_path}. file "
                f"does not exist")

        if self._is_template(self._absolute_path(project_config_path)):
            self.template_path = self._absolute_path(project_config_path)
            self._process_template_config()
        else:
            self.project_config_path = self._absolute_path(project_config_path)
            self._process_project_config()

        self._process_env_vars()
        self._process_args()
        if not self.template_path and not self.tests:
            raise TaskCatException(
                "minimal config requires at least one test or a "
                "template_path to be defined")

        # Add test/region specific credential sets to ClientFactory instance.
        self._add_granular_credsets_to_cf()

        # Used where full Regional/S3Bucket properties are needd.
        # - ie: 'test' subcommand, etc.
        if create_clients:
            # Assign regional and test-specific client-factories
            self._enable_regional_creds()

            # Assign account-based buckets.
            self._assign_account_buckets()

            # generate regional-based parameters
            self._generate_regional_parameters()

        # add `template` attribute to Test objects
        for _, test_obj in self.tests.items():
            test_obj.template = None  # type: ignore

        # build and attach template objects
        self._get_templates()
Exemplo n.º 28
0
 def _status(stack: Stack):
     for status_group in ["COMPLETE", "IN_PROGRESS", "FAILED"]:
         if stack.status in getattr(StackStatus, status_group):
             return stack.id, status_group, stack.status_reason
     raise TaskCatException(f"Invalid stack {stack}")
Exemplo n.º 29
0
    def upload_only(self):
        """
        This function uploads all assets to the target S3 bucket name using the target S3 key prefix for each object.
          A comparison of checksums is done for all object as well to avoid reuploading files that have not changed (this
          checksum comparison is only effective on non-multi part uploaded files).
        """
        if self._target_key_prefix is None:
            raise TaskCatException('target_key_prefix cannot be None')
        # TODO: FIGURE OUT BOTO SESSION HANDLING DETAILS CURRENTLY USING ClientFactory's get_session from utils.py
        '''
        # Use a profile
        if args.profile:
            boto_session = boto3.Session(profile_name=args.profile)
            s3_resource = boto_session.resource('s3')
        # Use explicit credentials
        elif args.access_key_id and args.secret_access_key:
            boto_session = boto3.Session(aws_access_key_id=args.access_key_id,
                                         aws_secret_access_key=args.secret_access_key)
            s3_resource = boto_session.resource('s3')
        # Attempt to use IAM role from instance profile
        else:
            boto_session = boto3.Session()
            s3_resource = boto_session.resource('s3')
        '''
        boto_session = self._boto_clients.get_session(
            credential_set='alchemist',
            region=self.get_default_region()
        )
        s3_resource = boto_session.resource('s3')
        upload_bucket = s3_resource.Bucket(self._target_bucket_name)

        self.logger.info("Gathering remote S3 bucket keys {}*".format(self._target_key_prefix))
        remote_key_dict = {}
        for obj in upload_bucket.objects.filter(Prefix='{}'.format(self._target_key_prefix)):
            if any(x not in obj.key for x in self._get_excluded_key_prefixes()):
                remote_key_dict[obj.key] = obj
        self.logger.debug(remote_key_dict.keys())

        # Gather file list
        # NOTE: We only use the output directory if it's been set (that is, a rewrite was expected to have happened to
        #       an output directory. We ensure that is not the case when parsing the args, but care must be taken
        #       when initializing all the properties of this class. If it's only an upload that's meant to happen
        #       without a previous rewrite, then output directory should never be set.
        self.logger.info("Gathering local keys {}*".format(self._target_key_prefix))
        if self._file_list:
            file_list = self._file_list
        else:
            file_list = self._get_file_list(self._input_path)

        local_key_dict = {}
        for current_file in file_list:
            local_key_dict[os.path.join(self._target_key_prefix, current_file.replace(self._input_path, '', 1).lstrip('\/')).replace('\\', '/')] = \
                os.path.join(self._output_directory if self._output_directory and not self._dry_run else self._input_path, current_file.replace(self._input_path, '', 1).lstrip('\/'))
        self.logger.debug(local_key_dict.keys())

        remote_to_local_diff = list(set(remote_key_dict.keys()) - set(local_key_dict.keys()))
        self.logger.info("Keys in remote S3 bucket but not in local:")
        self.logger.info(remote_to_local_diff)

        local_to_remote_diff = list(set(local_key_dict.keys()) - set(remote_key_dict.keys()))
        self.logger.info("Keys in local but not in remote S3 bucket:")
        self.logger.info(local_to_remote_diff)

        self.logger.info("Syncing objects to S3 bucket [{}]".format(self._target_bucket_name))
        for _key in local_key_dict.keys():
            if _key in remote_key_dict:
                self.logger.debug("File [{0}] exists in S3 bucket [{1}]. Verifying MD5 checksum for difference.".format(_key, self._target_bucket_name))
                s3_hash = remote_key_dict[_key].e_tag.strip('"')
                local_hash = hashlib.md5(open(local_key_dict[_key], 'rb').read()).hexdigest()
                self.logger.debug("S3 MD5 checksum (etag) [{0}]=>[{1}]".format(s3_hash, remote_key_dict[_key]))
                self.logger.debug("Local MD5 checksum     [{0}]=>[{1}]".format(local_hash, local_key_dict[_key]))
                if s3_hash != local_hash:
                    if self._dry_run:
                        self.logger.info("[WHAT IF DRY RUN]: UPDATE [{0}]".format(_key))
                    else:
                        self.logger.info("UPDATE [{0}]".format(_key))
                        s3_resource.Object(self._target_bucket_name, _key).upload_file(local_key_dict[_key])
                else:
                    self.logger.debug("MD5 checksums are the same. Skipping [{}]".format(_key))
            else:
                if self._dry_run:
                    self.logger.info("[WHAT IF DRY RUN]: CREATE [{0}]".format(_key))
                else:
                    self.logger.info("CREATE [{0}]".format(_key))
                    # Upload local file not present in S3 bucket
                    s3_resource.Object(self._target_bucket_name, _key).upload_file(local_key_dict[_key])

        # clean up/remove remote keys that are not in local keys
        for _key in remote_to_local_diff:
            if not any(x in _key for x in self._get_excluded_key_prefixes()):
                if self._dry_run:
                    self.logger.info("[WHAT IF DRY RUN]: DELETE [{0}]".format(_key))
                else:
                    self.logger.info("DELETE [{0}]".format(_key))
                    remote_key_dict[_key].delete()
Exemplo n.º 30
0
def get_s3_domain(region):
    try:
        return S3_PARTITION_MAP[REGIONS[region]]
    except KeyError:
        raise TaskCatException(
            f"cannot find the S3 hostname for region {region}")