Beispiel #1
0
    def collect_skip_comments(resource):
        skipped_checks = []
        bc_id_mapping = bc_integration.get_id_mapping()
        ckv_to_bc_id_mapping = bc_integration.get_ckv_to_bc_id_mapping()
        if "metadata" in resource:
            if "checkov" in resource["metadata"]:
                for index, item in enumerate(
                        force_list(resource["metadata"]["checkov"])):
                    skip_search = re.search(COMMENT_REGEX, str(item))
                    if skip_search:
                        skipped_check = {
                            'id':
                            skip_search.group(1),
                            'suppress_comment':
                            skip_search.group(2)[1:]
                            if skip_search.group(2) else "No comment provided"
                        }
                        if bc_id_mapping and skipped_check[
                                "id"] in bc_id_mapping:
                            skipped_check["bc_id"] = skipped_check["id"]
                            skipped_check["id"] = bc_id_mapping[
                                skipped_check["id"]]
                        elif ckv_to_bc_id_mapping:
                            skipped_check["bc_id"] = ckv_to_bc_id_mapping.get(
                                skipped_check["id"])

                        skipped_checks.append(skipped_check)

        return skipped_checks
Beispiel #2
0
    def collect_skip_comments(
            entity_code_lines: List[Tuple[int, str]]) -> List[_SkippedCheck]:
        skipped_checks = []
        bc_id_mapping = bc_integration.get_id_mapping()
        ckv_to_bc_id_mapping = bc_integration.get_ckv_to_bc_id_mapping()
        for line in entity_code_lines:
            skip_search = re.search(COMMENT_REGEX, str(line))
            if skip_search:
                skipped_check: _SkippedCheck = {
                    "id":
                    skip_search.group(2),
                    "suppress_comment":
                    skip_search.group(3)[1:]
                    if skip_search.group(3) else "No comment provided",
                }
                # No matter which ID was used to skip, save the pair of IDs in the appropriate fields
                if bc_id_mapping and skipped_check["id"] in bc_id_mapping:
                    skipped_check["bc_id"] = skipped_check["id"]
                    skipped_check["id"] = bc_id_mapping[skipped_check["id"]]
                elif ckv_to_bc_id_mapping:
                    skipped_check["bc_id"] = ckv_to_bc_id_mapping.get(
                        skipped_check["id"])

                skipped_checks.append(skipped_check)
        return skipped_checks
Beispiel #3
0
    def collect_skip_comments(
            entity_code_lines: List[Tuple[int, str]],
            resource_config: Optional[DictNode] = None) -> List[_SkippedCheck]:
        skipped_checks = []
        bc_id_mapping = bc_integration.get_id_mapping()
        ckv_to_bc_id_mapping = bc_integration.get_ckv_to_bc_id_mapping()
        for line in entity_code_lines:
            skip_search = re.search(COMMENT_REGEX, str(line))
            if skip_search:
                skipped_check: _SkippedCheck = {
                    "id":
                    skip_search.group(2),
                    "suppress_comment":
                    skip_search.group(3)[1:]
                    if skip_search.group(3) else "No comment provided",
                }
                # No matter which ID was used to skip, save the pair of IDs in the appropriate fields
                if bc_id_mapping and skipped_check["id"] in bc_id_mapping:
                    skipped_check["bc_id"] = skipped_check["id"]
                    skipped_check["id"] = bc_id_mapping[skipped_check["id"]]
                elif ckv_to_bc_id_mapping:
                    skipped_check["bc_id"] = ckv_to_bc_id_mapping.get(
                        skipped_check["id"])

                skipped_checks.append(skipped_check)
        if resource_config:
            metadata = resource_config.get("Metadata")
            if metadata:
                ckv_skip = metadata.get("checkov", {}).get("skip", [])
                bc_skip = metadata.get("bridgecrew", {}).get("skip", [])
                if ckv_skip or bc_skip:
                    for skip in itertools.chain(ckv_skip, bc_skip):
                        skip_id = skip.get("id")
                        skip_comment = skip.get("comment",
                                                "No comment provided")
                        if skip_id is None:
                            logging.warning(
                                "Check suppression is missing key 'id'")
                            continue

                        skipped_check = {
                            "id": skip_id,
                            "suppress_comment": skip_comment
                        }
                        if bc_id_mapping and skipped_check[
                                "id"] in bc_id_mapping:
                            skipped_check["bc_id"] = skipped_check["id"]
                            skipped_check["id"] = bc_id_mapping[
                                skipped_check["id"]]
                        elif ckv_to_bc_id_mapping:
                            skipped_check["bc_id"] = ckv_to_bc_id_mapping.get(
                                skipped_check["id"])

                        skipped_checks.append(skipped_check)

        return skipped_checks
Beispiel #4
0
 def _collect_skip_comments(
         self, definition_blocks: List[Dict[str, Any]]) -> Dict[str, Any]:
     """
     Collects checkov skip comments to all definition blocks
     :param definition_blocks: parsed definition blocks
     :return: context enriched with with skipped checks per skipped entity
     """
     bc_id_mapping = bc_integration.get_id_mapping()
     ckv_to_bc_id_mapping = bc_integration.get_ckv_to_bc_id_mapping()
     parsed_file_lines = self.filtered_lines
     optional_comment_lines = [
         line for line in parsed_file_lines
         if self.is_optional_comment_line(line[1])
     ]
     comments = [(
         line_num,
         {
             "id":
             match.group(2),
             "suppress_comment":
             match.group(3)[1:]
             if match.group(3) else "No comment provided",
         },
     ) for (line_num, x) in optional_comment_lines
                 for match in [re.search(COMMENT_REGEX, x)] if match]
     for entity_block in definition_blocks:
         skipped_checks = []
         entity_context_path = self.get_entity_context_path(entity_block)
         entity_context = self.context
         found = True
         for k in entity_context_path:
             if k in entity_context:
                 entity_context = entity_context[k]
             else:
                 logging.warning(
                     f'Failed to find context for {".".join(entity_context_path)}'
                 )
                 found = False
                 break
         if not found:
             continue
         for (skip_check_line_num, skip_check) in comments:
             if entity_context[
                     "start_line"] < skip_check_line_num < entity_context[
                         "end_line"]:
                 # No matter which ID was used to skip, save the pair of IDs in the appropriate fields
                 if bc_id_mapping and skip_check["id"] in bc_id_mapping:
                     skip_check["bc_id"] = skip_check["id"]
                     skip_check["id"] = bc_id_mapping[skip_check["id"]]
                 elif ckv_to_bc_id_mapping:
                     skip_check["bc_id"] = ckv_to_bc_id_mapping.get(
                         skip_check["id"])
                 skipped_checks.append(skip_check)
         dpath.new(self.context, entity_context_path + ["skipped_checks"],
                   skipped_checks)
     return self.context
Beispiel #5
0
def get_skipped_checks(entity_conf):
    skipped = []
    metadata = {}
    bc_id_mapping = bc_integration.get_id_mapping()
    ckv_to_bc_id_mapping = bc_integration.get_ckv_to_bc_id_mapping()
    if not isinstance(entity_conf, dict):
        return skipped
    if entity_conf["kind"] == "containers" or entity_conf[
            "kind"] == "initContainers":
        metadata = entity_conf["parent_metadata"]
    else:
        if "metadata" in entity_conf.keys():
            metadata = entity_conf["metadata"]
    if "annotations" in metadata.keys(
    ) and metadata["annotations"] is not None:
        if isinstance(metadata["annotations"], dict):
            metadata["annotations"] = force_list(metadata["annotations"])
        for annotation in metadata["annotations"]:
            if not isinstance(annotation, dict):
                logging.debug(
                    f"Parse of Annotation Failed for {annotation}: {entity_conf}"
                )
                continue
            for key in annotation:
                skipped_item = {}
                if "checkov.io/skip" in key or "bridgecrew.io/skip" in key:
                    if "CKV_K8S" in annotation[key] or "BC_K8S" in annotation[
                            key]:
                        if "=" in annotation[key]:
                            (skipped_item["id"],
                             skipped_item["suppress_comment"]
                             ) = annotation[key].split("=")
                        else:
                            skipped_item["id"] = annotation[key]
                            skipped_item[
                                "suppress_comment"] = "No comment provided"

                        # No matter which ID was used to skip, save the pair of IDs in the appropriate fields
                        if bc_id_mapping and skipped_item[
                                "id"] in bc_id_mapping:
                            skipped_item["bc_id"] = skipped_item["id"]
                            skipped_item["id"] = bc_id_mapping[
                                skipped_item["id"]]
                        elif ckv_to_bc_id_mapping:
                            skipped_item["bc_id"] = ckv_to_bc_id_mapping.get(
                                skipped_item["id"])
                        skipped.append(skipped_item)
                    else:
                        logging.debug(
                            "Parse of Annotation Failed for {}: {}".format(
                                metadata["annotations"][key],
                                entity_conf,
                                indent=2))
                        continue
    return skipped
Beispiel #6
0
def collect_skipped_checks(parse_result):
    skipped_checks = []
    bc_id_mapping = bc_integration.get_id_mapping()
    ckv_to_bc_id_mapping = bc_integration.get_ckv_to_bc_id_mapping()
    if COMMENT_INSTRUCTION in parse_result:
        for comment in parse_result[COMMENT_INSTRUCTION]:
            skip_search = re.search(COMMENT_REGEX, comment["value"])
            if skip_search:
                skipped_check = {
                    'id': skip_search.group(2),
                    'suppress_comment': skip_search.group(3)[1:] if skip_search.group(
                        3) else "No comment provided"
                }
                # No matter which ID was used to skip, save the pair of IDs in the appropriate fields
                if bc_id_mapping and skipped_check["id"] in bc_id_mapping:
                    skipped_check["bc_id"] = skipped_check["id"]
                    skipped_check["id"] = bc_id_mapping[skipped_check["id"]]
                elif ckv_to_bc_id_mapping:
                    skipped_check["bc_id"] = ckv_to_bc_id_mapping.get(skipped_check["id"])
                skipped_checks.append(skipped_check)
    return skipped_checks
Beispiel #7
0
def run(banner=checkov_banner, argv=sys.argv[1:]):
    default_config_paths = get_default_config_paths(sys.argv[1:])
    parser = ExtArgumentParser(
        description='Infrastructure as code static analysis',
        default_config_files=default_config_paths,
        config_file_parser_class=configargparse.YAMLConfigFileParser,
        add_env_var_help=True)
    add_parser_args(parser)
    argcomplete.autocomplete(parser)
    config = parser.parse_args(argv)

    if config.add_check:
        resp = prompt.Prompt()
        check = prompt.Check(resp.responses)
        check.action()
        return

    # Check if --output value is None. If so, replace with ['cli'] for default cli output.
    if config.output is None:
        config.output = ['cli']

    logger.debug(f'Checkov version: {version}')
    logger.debug(f'Python executable: {sys.executable}')
    logger.debug(f'Python version: {sys.version}')
    logger.debug(f'Checkov executable (argv[0]): {sys.argv[0]}')
    logger.debug(parser.format_values(sanitize=True))

    # bridgecrew uses both the urllib3 and requests libraries, while checkov uses the requests library.
    # Allow the user to specify a CA bundle to be used by both libraries.
    bc_integration.setup_http_manager(config.ca_certificate)

    # if a repo is passed in it'll save it.  Otherwise a default will be created based on the file or dir
    config.repo_id = bc_integration.persist_repo_id(config)
    # if a bc_api_key is passed it'll save it.  Otherwise it will check ~/.bridgecrew/credentials
    config.bc_api_key = bc_integration.persist_bc_api_key(config)

    excluded_paths = config.skip_path or []

    if config.var_file:
        config.var_file = [os.path.abspath(f) for f in config.var_file]

    runner_filter = RunnerFilter(
        framework=config.framework,
        skip_framework=config.skip_framework,
        checks=config.check,
        skip_checks=config.skip_check,
        download_external_modules=convert_str_to_bool(
            config.download_external_modules),
        external_modules_download_path=config.external_modules_download_path,
        evaluate_variables=convert_str_to_bool(config.evaluate_variables),
        runners=checkov_runners,
        excluded_paths=excluded_paths,
        all_external=config.run_all_external_checks,
        var_files=config.var_file)
    if outer_registry:
        runner_registry = outer_registry
        runner_registry.runner_filter = runner_filter
    else:
        runner_registry = RunnerRegistry(banner, runner_filter,
                                         *DEFAULT_RUNNERS)

    runnerDependencyHandler = RunnerDependencyHandler(runner_registry)
    runnerDependencyHandler.validate_runner_deps()

    if config.show_config:
        print(parser.format_values())
        return

    if config.bc_api_key == '':
        parser.error(
            'The --bc-api-key flag was specified but the value was blank. If this value was passed as a '
            'secret, you may need to double check the mapping.')
    elif config.bc_api_key:
        logger.debug(f'Using API key ending with {config.bc_api_key[-8:]}')

        if config.repo_id is None and not config.list:
            # if you are only listing policies, then the API key will be used to fetch policies, but that's it,
            # so the repo is not required
            parser.error(
                "--repo-id argument is required when using --bc-api-key")
        elif config.repo_id:
            repo_id_sections = config.repo_id.split('/')
            if len(repo_id_sections) < 2 or any(
                    len(section) == 0 for section in repo_id_sections):
                parser.error(
                    "--repo-id argument format should be 'organization/repository_name' E.g "
                    "bridgecrewio/checkov")

        source_env_val = os.getenv('BC_SOURCE', 'cli')
        source = get_source_type(source_env_val)
        if source == SourceTypes[BCSourceType.DISABLED]:
            logger.warning(
                f'Received unexpected value for BC_SOURCE: {source_env_val}; Should be one of {{{",".join(SourceTypes.keys())}}} setting source to DISABLED'
            )
        source_version = os.getenv('BC_SOURCE_VERSION', version)
        logger.debug(f'BC_SOURCE = {source.name}, version = {source_version}')

        if config.list:
            # This speeds up execution by not setting up upload credentials (since we won't upload anything anyways)
            logger.debug('Using --list; setting source to DISABLED')
            source = SourceTypes[BCSourceType.DISABLED]

        try:
            bc_integration.bc_api_key = config.bc_api_key
            bc_integration.setup_bridgecrew_credentials(
                repo_id=config.repo_id,
                skip_fixes=config.skip_fixes,
                skip_suppressions=config.skip_suppressions,
                skip_policy_download=config.skip_policy_download,
                source=source,
                source_version=source_version,
                repo_branch=config.branch)
            platform_excluded_paths = bc_integration.get_excluded_paths() or []
            runner_filter.excluded_paths = runner_filter.excluded_paths + platform_excluded_paths
        except Exception:
            if bc_integration.prisma_url:
                message = 'An error occurred setting up the Bridgecrew platform integration. Please check your API ' \
                          'token and PRISMA_API_URL environment variable and try again. The PRISMA_API_URL value ' \
                          'should be similar to: `https://api0.prismacloud.io`'
            else:
                message = 'An error occurred setting up the Bridgecrew platform integration. Please check your API ' \
                          'token and try again.'
            if logger.isEnabledFor(logging.DEBUG):
                logger.debug(message, exc_info=True)
            else:
                logger.error(message)
            return
    else:
        logger.debug('No API key found. Scanning locally only.')

    if config.check and config.skip_check:
        if any(item in runner_filter.checks
               for item in runner_filter.skip_checks):
            parser.error(
                "The check ids specified for '--check' and '--skip-check' must be mutually exclusive."
            )
            return

    integration_feature_registry.run_pre_scan()

    guidelines = {}
    BC_SKIP_MAPPING = os.getenv("BC_SKIP_MAPPING", "FALSE")
    if config.no_guide or BC_SKIP_MAPPING.upper() == "TRUE":
        bc_integration.bc_skip_mapping = True
    else:
        guidelines = bc_integration.get_guidelines()

        ckv_to_bc_mapping = bc_integration.get_ckv_to_bc_id_mapping()
        if ckv_to_bc_mapping:
            all_checks = BaseCheckRegistry.get_all_registered_checks()
            for check in all_checks:
                check.bc_id = ckv_to_bc_mapping.get(check.id)

    if config.list:
        print_checks(frameworks=config.framework,
                     use_bc_ids=config.output_bc_ids)
        return

    baseline = None
    if config.baseline:
        baseline = Baseline()
        baseline.from_json(config.baseline)

    external_checks_dir = get_external_checks_dir(config)
    url = None
    created_baseline_path = None

    if config.directory:
        exit_codes = []
        for root_folder in config.directory:
            file = config.file
            scan_reports = runner_registry.run(
                root_folder=root_folder,
                external_checks_dir=external_checks_dir,
                files=file,
                guidelines=guidelines)
            if baseline:
                baseline.compare_and_reduce_reports(scan_reports)
            if bc_integration.is_integration_configured():
                bc_integration.persist_repository(
                    root_folder, excluded_paths=runner_filter.excluded_paths)
                bc_integration.persist_scan_results(scan_reports)
                url = bc_integration.commit_repository(config.branch)

            if config.create_baseline:
                overall_baseline = Baseline()
                for report in scan_reports:
                    overall_baseline.add_findings_from_report(report)
                created_baseline_path = os.path.join(
                    os.path.abspath(root_folder), '.checkov.baseline')
                with open(created_baseline_path, 'w') as f:
                    json.dump(overall_baseline.to_dict(), f, indent=4)
            exit_codes.append(
                runner_registry.print_reports(
                    scan_reports,
                    config,
                    url=url,
                    created_baseline_path=created_baseline_path,
                    baseline=baseline))
        exit_code = 1 if 1 in exit_codes else 0
        return exit_code
    elif config.file:
        scan_reports = runner_registry.run(
            external_checks_dir=external_checks_dir,
            files=config.file,
            guidelines=guidelines,
            repo_root_for_plan_enrichment=config.repo_root_for_plan_enrichment)
        if baseline:
            baseline.compare_and_reduce_reports(scan_reports)
        if config.create_baseline:
            overall_baseline = Baseline()
            for report in scan_reports:
                overall_baseline.add_findings_from_report(report)
            created_baseline_path = os.path.join(
                os.path.abspath(os.path.commonprefix(config.file)),
                '.checkov.baseline')
            with open(created_baseline_path, 'w') as f:
                json.dump(overall_baseline.to_dict(), f, indent=4)

        if bc_integration.is_integration_configured():
            files = [os.path.abspath(file) for file in config.file]
            root_folder = os.path.split(os.path.commonprefix(files))[0]
            bc_integration.persist_repository(
                root_folder,
                files,
                excluded_paths=runner_filter.excluded_paths)
            bc_integration.persist_scan_results(scan_reports)
            url = bc_integration.commit_repository(config.branch)
        return runner_registry.print_reports(
            scan_reports,
            config,
            url=url,
            created_baseline_path=created_baseline_path,
            baseline=baseline)
    elif config.docker_image:
        if config.bc_api_key is None:
            parser.error(
                "--bc-api-key argument is required when using --docker-image")
            return
        if config.dockerfile_path is None:
            parser.error(
                "--dockerfile-path argument is required when using --docker-image"
            )
            return
        if config.branch is None:
            parser.error(
                "--branch argument is required when using --docker-image")
            return
        bc_integration.commit_repository(config.branch)
        image_scanner.scan(config.docker_image, config.dockerfile_path)
    elif not config.quiet:
        print(f"{banner}")

        bc_integration.onboarding()