Ejemplo n.º 1
0
    def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(), collect_skip_comments=True):
        report = Report(self.check_type)
        self.tf_definitions = {}
        parsing_errors = {}
        if external_checks_dir:
            for directory in external_checks_dir:
                resource_registry.load_external_checks(directory, runner_filter)
        if root_folder:
            root_folder = os.path.abspath(root_folder)
            
            self.parser.parse_directory(directory=root_folder,
                                        out_definitions=self.tf_definitions,
                                        out_evaluations_context=self.evaluations_context,
                                        out_parsing_errors=parsing_errors,
                                        download_external_modules=runner_filter.download_external_modules,
                                        external_modules_download_path=runner_filter.external_modules_download_path,
                                        evaluate_variables=runner_filter.evaluate_variables)
            self.check_tf_definition(report, root_folder, runner_filter, collect_skip_comments)

        if files:
            files = [os.path.abspath(file) for file in files]
            root_folder = os.path.split(os.path.commonprefix(files))[0]
            for file in files:
                if file.endswith(".tf"):
                    file_parsing_errors = {}
                    self.tf_definitions[file] = self.parser.parse_file(file=file, parsing_errors=file_parsing_errors)
                    if file_parsing_errors:
                        parsing_errors.update(file_parsing_errors)
                        continue
                    self.check_tf_definition(report, root_folder, runner_filter, collect_skip_comments)

        report.add_parsing_errors(parsing_errors.keys())

        return report
Ejemplo n.º 2
0
    def run(self, root_folder, external_checks_dir=None, files=None):
        report = Report(self.check_type)
        tf_definitions = {}
        parsing_errors = {}
        if external_checks_dir:
            for directory in external_checks_dir:
                resource_registry.load_external_checks(directory)
        if root_folder:
            self.parser.hcl2(directory=root_folder,
                             tf_definitions=tf_definitions,
                             parsing_errors=parsing_errors)
            self.check_tf_definition(report, root_folder, tf_definitions)

        if files:
            root_folder = os.path.commonprefix(files)
            for file in files:
                file_tf_definitions = {}
                self.parser.parse_file(file=file,
                                       tf_definitions=file_tf_definitions,
                                       parsing_errors=parsing_errors)

                self.check_tf_definition(report, root_folder,
                                         file_tf_definitions)

        report.add_parsing_errors(parsing_errors.keys())

        return report
Ejemplo n.º 3
0
    def get_graph_checks_report(self, root_folder: str,
                                runner_filter: RunnerFilter) -> Report:
        report = Report(self.check_type)
        checks_results = self.run_graph_checks_results(runner_filter)

        for check, check_results in checks_results.items():
            for check_result in check_results:
                entity = check_result["entity"]
                entity_file_path = entity.get(CustomAttributes.FILE_PATH)
                entity_file_abs_path = _get_entity_abs_path(
                    root_folder, entity_file_path)
                entity_id = entity.get(CustomAttributes.ID)
                entity_context = self.context[entity_file_path][entity_id]

                record = Record(check_id=check.id,
                                check_name=check.name,
                                check_result=check_result,
                                code_block=entity_context.get("code_lines"),
                                file_path=entity_file_path,
                                file_line_range=[
                                    entity_context.get("start_line"),
                                    entity_context.get("end_line")
                                ],
                                resource=entity.get(CustomAttributes.ID),
                                evaluations={},
                                check_class=check.__class__.__module__,
                                file_abs_path=entity_file_abs_path)
                record.set_guideline(check.guideline)
                report.add_record(record=record)
        return report
Ejemplo n.º 4
0
    def run(self,
            root_folder=None,
            external_checks_dir=None,
            files=None,
            runner_filter=RunnerFilter(),
            collect_skip_comments=True):
        report = Report(self.check_type)
        self.tf_definitions = {}
        parsing_errors = {}
        if external_checks_dir:
            for directory in external_checks_dir:
                resource_registry.load_external_checks(directory,
                                                       runner_filter)
        if files:
            files = [os.path.abspath(file) for file in files]
            for file in files:
                if file.endswith(".json"):
                    tf_definitions, template_lines = parse_tf_plan(file)
                    self.tf_definitions = tf_definitions
                    self.template_lines = template_lines
                    self.check_tf_definition(report, runner_filter)

        report.add_parsing_errors(parsing_errors.keys())

        return report
Ejemplo n.º 5
0
    def run(self,
            root_folder,
            external_checks_dir=None,
            files=None,
            runner_filter=RunnerFilter(),
            collect_skip_comments=True):
        report = Report(self.check_type)
        self.tf_definitions = {}
        parsing_errors = {}
        if external_checks_dir:
            for directory in external_checks_dir:
                resource_registry.load_external_checks(directory,
                                                       runner_filter)
        if root_folder:
            root_folder = os.path.abspath(root_folder)
            self.parser.hcl2(directory=root_folder,
                             tf_definitions=self.tf_definitions,
                             parsing_errors=parsing_errors)
            self.check_tf_definition(report, root_folder, runner_filter,
                                     collect_skip_comments)

        if files:
            files = [os.path.abspath(file) for file in files]
            root_folder = os.path.split(os.path.commonprefix(files))[0]
            for file in files:
                if file.endswith(".tf"):
                    self.tf_definitions[file] = self.parser.parse_file(
                        file=file, parsing_errors=parsing_errors)
                    self.check_tf_definition(report, root_folder,
                                             runner_filter,
                                             collect_skip_comments)

        report.add_parsing_errors(parsing_errors.keys())

        return report
Ejemplo n.º 6
0
    def run(self,
            root_folder,
            external_checks_dir=None,
            files=None,
            runner_filter=RunnerFilter(),
            collect_skip_comments=True,
            helmChart=None):
        report = Report(self.check_type)
        if self.context is None or self.definitions is None:
            if files or root_folder:
                self.definitions, self.definitions_raw = create_definitions(
                    root_folder, files, runner_filter)
            else:
                return report
            if external_checks_dir:
                for directory in external_checks_dir:
                    registry.load_external_checks(directory)
                    self.graph_registry.load_external_checks(directory)
            self.context = build_definitions_context(self.definitions,
                                                     self.definitions_raw)

            logging.info("creating kubernetes graph")
            local_graph = self.graph_manager.build_graph_from_definitions(
                self.definitions)
            for vertex in local_graph.vertices:
                file_abs_path = _get_entity_abs_path(root_folder, vertex.path)
                report.add_resource(f'{file_abs_path}:{vertex.id}')
            self.graph_manager.save_graph(local_graph)
            self.definitions = local_graph.definitions

        report = self.check_definitions(root_folder, runner_filter, report)
        graph_report = self.get_graph_checks_report(root_folder, runner_filter)
        merge_reports(report, graph_report)

        return report
Ejemplo n.º 7
0
 def run(self,
         root_folder=None,
         external_checks_dir=None,
         files=None,
         guidelines=None,
         collect_skip_comments=True,
         repo_root_for_plan_enrichment=None):
     for runner in self.runners:
         integration_feature_registry.run_pre_scan()
         scan_report = runner.run(
             root_folder,
             external_checks_dir=external_checks_dir,
             files=files,
             runner_filter=self.runner_filter,
             collect_skip_comments=collect_skip_comments)
         integration_feature_registry.run_post_scan(scan_report)
         if guidelines:
             RunnerRegistry.enrich_report_with_guidelines(
                 scan_report, guidelines)
         if repo_root_for_plan_enrichment:
             enriched_resources = RunnerRegistry.get_enriched_resources(
                 repo_root_for_plan_enrichment)
             enriched_report = Report("terraform_plan").enrich_plan_report(
                 scan_report, enriched_resources)
             enriched_report_with_skipped = Report(
                 "terraform_plan").handle_skipped_checks(
                     enriched_report, enriched_resources)
             self.scan_reports.append(enriched_report_with_skipped)
         else:
             self.scan_reports.append(scan_report)
     return self.scan_reports
Ejemplo n.º 8
0
    def mutateKubernetesGraphResults(self,
                                     root_folder: str,
                                     runner_filter: RunnerFilter,
                                     report: Report,
                                     checks_results,
                                     reportMutatorData=None) -> Report:
        # Moves report generation logic out of run() method in Runner class.
        # Allows function overriding of a much smaller function than run() for other "child" frameworks such as Kustomize, Helm
        # Where Kubernetes CHECKS are needed, but the specific file references are to another framework for the user output (or a mix of both).
        for check, check_results in checks_results.items():
            for check_result in check_results:
                entity = check_result["entity"]
                entity_file_path = entity.get(CustomAttributes.FILE_PATH)
                entity_file_abs_path = _get_entity_abs_path(
                    root_folder, entity_file_path)
                entity_id = entity.get(CustomAttributes.ID)
                entity_context = self.context[entity_file_path][entity_id]

                record = Record(check_id=check.id,
                                check_name=check.name,
                                check_result=check_result,
                                code_block=entity_context.get("code_lines"),
                                file_path=entity_file_path,
                                file_line_range=[
                                    entity_context.get("start_line"),
                                    entity_context.get("end_line")
                                ],
                                resource=entity.get(CustomAttributes.ID),
                                evaluations={},
                                check_class=check.__class__.__module__,
                                file_abs_path=entity_file_abs_path)
                record.set_guideline(check.guideline)
                report.add_record(record=record)
        return report
Ejemplo n.º 9
0
    def get_graph_checks_report(self, root_folder: str,
                                runner_filter: RunnerFilter) -> Report:
        report = Report(self.check_type)
        checks_results = self.run_graph_checks_results(runner_filter)

        for check, check_results in checks_results.items():
            for check_result in check_results:
                entity = check_result["entity"]
                entity_file_abs_path = entity.get(CustomAttributes.FILE_PATH)
                entity_file_path = f"/{os.path.relpath(entity_file_abs_path, root_folder)}"
                start_line = entity['__startline__']
                end_line = entity['__endline__']

                if start_line == end_line:
                    entity_lines_range = [start_line, end_line]
                    entity_code_lines = self.definitions_raw[entity_file_path][
                        start_line - 1:end_line]
                else:
                    entity_lines_range = [start_line, end_line - 1]
                    entity_code_lines = self.definitions_raw[entity_file_path][
                        start_line - 1:end_line - 1]

                record = Record(check_id=check.id,
                                check_name=check.name,
                                check_result=check_result,
                                code_block=entity_code_lines,
                                file_path=entity_file_path,
                                file_line_range=entity_lines_range,
                                resource=entity.get(CustomAttributes.ID),
                                evaluations={},
                                check_class=check.__class__.__module__,
                                file_abs_path=entity_file_abs_path)
                record.set_guideline(check.guideline)
                report.add_record(record=record)
        return report
Ejemplo n.º 10
0
    def run(self,
            root_folder,
            external_checks_dir=None,
            files=None,
            runner_filter=RunnerFilter(),
            collect_skip_comments=True):
        report = Report(self.check_type)
        parsing_errors = {}
        self.load_external_checks(external_checks_dir)

        if self.definitions_context is None or self.tf_definitions is None or self.breadcrumbs is None:
            self.tf_definitions = {}
            logging.info(
                "Scanning root folder and producing fresh tf_definitions and context"
            )
            if root_folder:
                root_folder = os.path.abspath(root_folder)

                local_graph, tf_definitions = \
                    self.graph_manager.build_graph_from_source_directory(root_folder,
                                                                         local_graph_class=self.graph_class,
                                                                         download_external_modules=runner_filter.download_external_modules,
                                                                         parsing_errors=parsing_errors, excluded_paths=runner_filter.excluded_paths)
            elif files:
                files = [os.path.abspath(file) for file in files]
                root_folder = os.path.split(os.path.commonprefix(files))[0]
                self.parser.evaluate_variables = False
                for file in files:
                    if file.endswith(".tf"):
                        file_parsing_errors = {}
                        parse_result = self.parser.parse_file(
                            file=file, parsing_errors=file_parsing_errors)
                        if parse_result is not None:
                            self.tf_definitions[file] = parse_result
                        if file_parsing_errors:
                            parsing_errors.update(file_parsing_errors)
                            continue
                local_graph = self.graph_manager.build_graph_from_tf_definitions(
                    self.tf_definitions)
            else:
                raise Exception(
                    "Root directory was not specified, files were not specified"
                )

            self.graph_manager.save_graph(local_graph)
            self.tf_definitions, self.breadcrumbs = convert_graph_vertices_to_tf_definitions(
                local_graph.vertices, root_folder)
        else:
            logging.info(f"Scanning root folder using existing tf_definitions")

        self.check_tf_definition(report, root_folder, runner_filter,
                                 collect_skip_comments)

        report.add_parsing_errors(parsing_errors.keys())

        graph_report = self.get_graph_checks_report(root_folder, runner_filter)
        merge_reports(report, graph_report)

        return report
Ejemplo n.º 11
0
    def run(self, root_folder=None, external_checks_dir=None, files=None,
            runner_filter=RunnerFilter(), collect_skip_comments=True) -> Report:
        registry = self.import_registry()

        definitions = {}
        definitions_raw = {}

        report = Report(self.check_type)

        if not files and not root_folder:
            logging.debug("No resources to scan.")
            return report

        if not external_checks_dir and self.require_external_checks():
            logging.debug("The json runner requires that external checks are defined.")
            return report
        if external_checks_dir:
            for directory in external_checks_dir:
                registry.load_external_checks(directory)

        if files:
            self._load_files(files, definitions, definitions_raw)

        if root_folder:
            for root, d_names, f_names in os.walk(root_folder):
                filter_ignored_paths(root, d_names, runner_filter.excluded_paths)
                filter_ignored_paths(root, f_names, runner_filter.excluded_paths)
                self._load_files(
                    f_names,
                    definitions,
                    definitions_raw,
                    lambda f: os.path.join(root, f)
                )

        for json_file_path in definitions.keys():
            results = registry.scan(
                json_file_path, definitions[json_file_path], [], runner_filter
            )
            for check, result in results.items():
                result_config = result["results_configuration"]
                start = result_config.start_mark.line
                end = result_config.end_mark.line
                record = Record(
                    check_id=check.id,
                    bc_check_id=check.bc_id,
                    check_name=check.name,
                    check_result=result,
                    code_block=definitions_raw[json_file_path][start:end + 1],
                    file_path=json_file_path,
                    file_line_range=[start + 1, end + 1],
                    resource=f"{json_file_path}",
                    evaluations=None,
                    check_class=check.__class__.__module__,
                    file_abs_path=os.path.abspath(json_file_path),
                    entity_tags=None
                )
                report.add_record(record)

        return report
Ejemplo n.º 12
0
 def _handle_report(self, scan_report, guidelines, repo_root_for_plan_enrichment):
     integration_feature_registry.run_post_runner(scan_report)
     if guidelines:
         RunnerRegistry.enrich_report_with_guidelines(scan_report, guidelines)
     if repo_root_for_plan_enrichment:
         enriched_resources = RunnerRegistry.get_enriched_resources(repo_root_for_plan_enrichment)
         scan_report = Report("terraform_plan").enrich_plan_report(scan_report, enriched_resources)
         scan_report = Report("terraform_plan").handle_skipped_checks(scan_report, enriched_resources)
     self.scan_reports.append(scan_report)
Ejemplo n.º 13
0
    def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter()):
        report = Report(self.check_type)
        definitions = {}
        definitions_raw = {}
        parsing_errors = {}
        files_list = []
        if external_checks_dir:
            for directory in external_checks_dir:
                cfn_registry.load_external_checks(directory)

        if files:
            for file in files:
                (definitions[file], definitions_raw[file]) = parse(file)

        if root_folder:
            for root, d_names, f_names in os.walk(root_folder):
                filter_ignored_directories(d_names)
                for file in f_names:
                    file_ending = os.path.splitext(file)[1]
                    if file_ending in CF_POSSIBLE_ENDINGS:
                        files_list.append(os.path.join(root, file))

            for file in files_list:
                relative_file_path = f'/{os.path.relpath(file, os.path.commonprefix((root_folder, file)))}'
                (definitions[relative_file_path], definitions_raw[relative_file_path]) = parse(file)

        # Filter out empty files that have not been parsed successfully, and filter out non-CF template files
        definitions = {k: v for k, v in definitions.items() if v and v.__contains__("Resources")}
        definitions_raw = {k: v for k, v in definitions_raw.items() if k in definitions.keys()}

        for cf_file in definitions.keys():
            if isinstance(definitions[cf_file], dict_node) and 'Resources' in definitions[cf_file].keys():
                cf_context_parser = ContextParser(cf_file, definitions[cf_file], definitions_raw[cf_file])
                logging.debug("Template Dump for {}: {}".format(cf_file, definitions[cf_file], indent=2))
                cf_context_parser.evaluate_default_refs()
                for resource_name, resource in definitions[cf_file]['Resources'].items():
                    resource_id = cf_context_parser.extract_cf_resource_id(resource, resource_name)
                    # check that the resource can be parsed as a CF resource
                    if resource_id:
                        entity_lines_range, entity_code_lines = cf_context_parser.extract_cf_resource_code_lines(resource)
                        if entity_lines_range and entity_code_lines:
                            # TODO - Variable Eval Message!
                            variable_evaluations = {}

                            skipped_checks = ContextParser.collect_skip_comments(entity_code_lines)

                            results = cfn_registry.scan(cf_file, {resource_name: resource}, skipped_checks,
                                                        runner_filter)
                            for check, check_result in results.items():
                                record = Record(check_id=check.id, check_name=check.name, check_result=check_result,
                                                code_block=entity_code_lines, file_path=cf_file,
                                                file_line_range=entity_lines_range,
                                                resource=resource_id, evaluations=variable_evaluations,
                                                check_class=check.__class__.__module__)
                                report.add_record(record=record)
        return report
Ejemplo n.º 14
0
    def run(self,
            root_folder=None,
            external_checks_dir=None,
            files=None,
            runner_filter=RunnerFilter(),
            collect_skip_comments=True):
        report = Report(self.check_type)
        self.tf_definitions = {}
        parsing_errors = {}
        if external_checks_dir:
            for directory in external_checks_dir:
                resource_registry.load_external_checks(directory,
                                                       runner_filter)

        if root_folder:
            files = [] if not files else files
            for root, d_names, f_names in os.walk(root_folder):
                for file in f_names:
                    file_ending = os.path.splitext(file)[1]
                    if file_ending == '.json':
                        try:
                            with open(f'{root}/{file}') as f:
                                content = json.load(f)
                            if isinstance(
                                    content,
                                    dict) and content.get('terraform_version'):
                                files.append(os.path.join(root, file))
                        except Exception as e:
                            logging.debug(
                                f'Failed to load json file {root}/{file}, skipping'
                            )
                            logging.debug('Failure message:')
                            logging.debug(e, stack_info=True)

        if files:
            files = [os.path.abspath(file) for file in files]
            for file in files:
                if file.endswith(".json"):
                    tf_definitions, template_lines = parse_tf_plan(file)
                    if not tf_definitions:
                        continue
                    self.tf_definitions = tf_definitions
                    self.template_lines = template_lines
                    self.check_tf_definition(report, runner_filter)
                else:
                    logging.debug(
                        f'Failed to load {file} as is not a .json file, skipping'
                    )

        report.add_parsing_errors(parsing_errors.keys())

        return report
Ejemplo n.º 15
0
    def get_graph_checks_report(self, root_folder,
                                runner_filter: RunnerFilter):
        report = Report(self.check_type)
        checks_results = {}
        for r in self.external_registries + [graph_registry]:
            r.load_checks()
            registry_results = r.run_checks(
                self.graph_manager.get_reader_traversal(), runner_filter)
            checks_results = {**checks_results, **registry_results}

        for check, check_results in checks_results.items():
            for check_result in check_results:
                entity = check_result['entity']
                entity_context, entity_evaluations = self.get_entity_context_and_evaluations(
                    entity)
                if entity_context:
                    full_file_path = entity[CustomAttributes.FILE_PATH]
                    copy_of_check_result = copy.deepcopy(check_result)
                    for skipped_check in entity_context.get(
                            'skipped_checks', []):
                        if skipped_check['id'] == check.id:
                            copy_of_check_result[
                                'result'] = CheckResult.SKIPPED
                            copy_of_check_result[
                                'suppress_comment'] = skipped_check[
                                    'suppress_comment']
                            break
                    copy_of_check_result['entity'] = entity.get(
                        CustomAttributes.CONFIG)
                    record = Record(
                        check_id=check.id,
                        check_name=check.name,
                        check_result=copy_of_check_result,
                        code_block=entity_context.get('code_lines'),
                        file_path=
                        f"/{os.path.relpath(full_file_path, root_folder)}",
                        file_line_range=[
                            entity_context.get('start_line'),
                            entity_context.get('end_line')
                        ],
                        resource=".".join(entity_context['definition_path']),
                        evaluations=entity_evaluations,
                        check_class=check.__class__.__module__,
                        file_abs_path=os.path.abspath(full_file_path))
                    breadcrumb = self.breadcrumbs.get(record.file_path,
                                                      {}).get(record.resource)
                    if breadcrumb:
                        record = GraphRecord(record, breadcrumb)

                    report.add_record(record=record)
        return report
Ejemplo n.º 16
0
    def print_reports(self,
                      scan_reports,
                      config,
                      url=None,
                      created_baseline_path=None,
                      baseline=None):
        if config.output == 'cli':
            print(f"{self.banner}\n")
        exit_codes = []
        report_jsons = []
        junit_reports = []
        for report in scan_reports:
            if not report.is_empty():
                if config.output == "json":
                    report_jsons.append(report.get_dict(is_quiet=config.quiet))
                elif config.output == "junitxml":
                    junit_reports.append(report)
                    # report.print_junit_xml()
                elif config.output == 'github_failed_only':
                    report.print_failed_github_md()
                else:
                    report.print_console(
                        is_quiet=config.quiet,
                        is_compact=config.compact,
                        created_baseline_path=created_baseline_path,
                        baseline=baseline)
                    if url:
                        print("More details: {}".format(url))
            exit_codes.append(
                report.get_exit_code(config.soft_fail, config.soft_fail_on,
                                     config.hard_fail_on))
        if config.output == "junitxml":
            if len(junit_reports) == 1:
                junit_reports[0].print_junit_xml()
            else:
                master_report = Report(None)
                for report in junit_reports:
                    master_report.skipped_checks += report.skipped_checks
                    master_report.passed_checks += report.passed_checks
                    master_report.failed_checks += report.failed_checks
                master_report.print_junit_xml()
        if config.output == "json":
            if len(report_jsons) == 1:
                print(json.dumps(report_jsons[0], indent=4))
            else:
                print(json.dumps(report_jsons, indent=4))
        #if config.output == "cli":
        #bc_integration.get_report_to_platform(config,scan_reports)

        exit_code = 1 if 1 in exit_codes else 0
        return exit_code
Ejemplo n.º 17
0
 def create_report_from_graph_checks_results(self, checks_results, check):
     report = Report("cloudformation")
     first_results_key = list(checks_results.keys())[0]
     for check_result in checks_results[first_results_key]:
         entity = check_result["entity"]
         record = Record(check_id=check['id'],
                         check_name=check['name'],
                         check_result=copy.deepcopy(check_result),
                         code_block="",
                         file_path=entity.get(CustomAttributes.FILE_PATH),
                         file_line_range=[
                             entity.get('__startline__'),
                             entity.get('__endline__')
                         ],
                         resource=entity.get(CustomAttributes.BLOCK_NAME),
                         entity_tags=entity.get('tags', {}),
                         evaluations=None,
                         check_class=None,
                         file_abs_path=entity.get(
                             CustomAttributes.FILE_PATH))
         if check_result["result"] == CheckResult.PASSED:
             report.passed_checks.append(record)
         if check_result["result"] == CheckResult.FAILED:
             report.failed_checks.append(record)
     return report
    def test_post_runner_with_cloned_checks(self):
        instance = BcPlatformIntegration()
        instance.skip_policy_download = False
        instance.platform_integration_configured = True
        custom_policies_integration = CustomPoliciesIntegration(instance)

        # mock _get_policies_from_platform method
        custom_policies_integration._get_policies_from_platform = types.MethodType(_get_policies_from_platform,
                                                                                   custom_policies_integration)
        custom_policies_integration.pre_scan()

        scan_reports = Report("terraform")
        record = Record(
            check_id="CKV_AWS_5",
            check_name="Ensure all data stored in the Elasticsearch is securely encrypted at rest",
            check_result={"result": CheckResult.FAILED},
            code_block=[],
            file_path="./main.tf",
            file_line_range=[7, 10],
            resource="aws_elasticsearch_domain.enabled",
            evaluations=None,
            check_class='',
            file_abs_path=",.",
            entity_tags={"tag1": "value1"},
            bc_check_id="BC_AWS_ELASTICSEARCH_3"
        )
        scan_reports.failed_checks.append(record)

        custom_policies_integration.post_runner(scan_reports)
        self.assertEqual(2, len(scan_reports.failed_checks))
        self.assertEqual('mikepolicies_cloned_AWS_1625063607541', scan_reports.failed_checks[1].check_id)
Ejemplo n.º 19
0
    def print_reports(self, scan_reports, args, url=None):
        if args.output == 'cli':
            print(f"{self.banner}\n")
        exit_codes = []
        report_jsons = []
        junit_reports = []
        for report in scan_reports:
            if not report.is_empty():
                if args.output == "json":
                    report_jsons.append(report.get_dict(is_quiet=args.quiet))
                elif args.output == "junitxml":
                    junit_reports.append(report)
                    # report.print_junit_xml()
                elif args.output == 'github_failed_only':
                    report.print_failed_github_md()
                else:
                    report.print_console(is_quiet=args.quiet,
                                         is_compact=args.compact)
                    if url:
                        print("More details: {}".format(url))
            exit_codes.append(report.get_exit_code(args.soft_fail))
        if args.output == "junitxml":
            if len(junit_reports) == 1:
                junit_reports[0].print_junit_xml()
            else:
                master_report = Report(None)
                for report in junit_reports:
                    master_report.skipped_checks += report.skipped_checks
                    master_report.passed_checks += report.passed_checks
                    master_report.failed_checks += report.failed_checks
                master_report.print_junit_xml()
        if args.output == "json":
            if len(report_jsons) == 1:
                print(json.dumps(report_jsons[0], indent=4))
            else:
                print(json.dumps(report_jsons, indent=4))
        if args.output == "cli":
            self.bc_platform.get_report_to_platform(args, scan_reports)

        exit_code = 1 if 1 in exit_codes else 0
        exit(exit_code)
Ejemplo n.º 20
0
 def get_graph_checks_report(self, root_folder: str,
                             runner_filter: RunnerFilter, helmChart,
                             reportMutatorData) -> Report:
     report = Report(self.check_type)
     checks_results = self.run_graph_checks_results(runner_filter)
     report = self.mutateKubernetesGraphResults(
         root_folder,
         runner_filter,
         report,
         checks_results,
         reportMutatorData=reportMutatorData)
     return report
Ejemplo n.º 21
0
    def test_valid_passing_valid_testcases(self):
        record1 = Record(check_id='CKV_AWS_21',
                         check_name="Some Check", check_result={"result": CheckResult.FAILED},
                         code_block=None, file_path="./s3.tf",
                         file_line_range='1:3',
                         resource='aws_s3_bucket.operations', evaluations=None,
                         check_class=None, file_abs_path=',.',
                         entity_tags={
                             'tag1': 'value1'
                         })
        record2 = Record(check_id='CKV_AWS_3',
                         check_name="Ensure all data stored in the EBS is securely encrypted",
                         check_result={"result": CheckResult.FAILED},
                         code_block=None, file_path="./ec2.tf",
                         file_line_range='1:3',
                         resource='aws_ebs_volume.web_host_storage', evaluations=None,
                         check_class=None, file_abs_path=',.',
                         entity_tags={
                             'tag1': 'value1'
                         })

        r = Report("terraform")
        r.add_record(record=record1)
        r.add_record(record=record2)
        ts = r.get_test_suites()
        xml_string = r.get_junit_xml_string(ts)
        root = ET.fromstring(xml_string)
        self.assertEqual(root.attrib['errors'], '0')
Ejemplo n.º 22
0
    def run(self,
            root_folder,
            external_checks_dir=None,
            files=None,
            runner_filter=RunnerFilter(),
            collect_skip_comments=True):
        report = Report(self.check_type)
        parsing_errors = {}
        self.load_external_checks(external_checks_dir)
        scan_hcl = should_scan_hcl_files()

        if self.context is None or self.definitions is None or self.breadcrumbs is None:
            self.definitions = {}
            logging.info(
                "Scanning root folder and producing fresh tf_definitions and context"
            )
            if root_folder:
                root_folder = os.path.abspath(root_folder)

                local_graph, tf_definitions = self.graph_manager.build_graph_from_source_directory(
                    source_dir=root_folder,
                    local_graph_class=self.graph_class,
                    download_external_modules=runner_filter.
                    download_external_modules,
                    external_modules_download_path=runner_filter.
                    external_modules_download_path,
                    parsing_errors=parsing_errors,
                    excluded_paths=runner_filter.excluded_paths,
                    vars_files=runner_filter.var_files)
            elif files:
                files = [os.path.abspath(file) for file in files]
                root_folder = os.path.split(os.path.commonprefix(files))[0]
                self.parser.evaluate_variables = False
                self._parse_files(files, scan_hcl, parsing_errors)
                local_graph = self.graph_manager.build_graph_from_definitions(
                    self.definitions)
            else:
                raise Exception(
                    "Root directory was not specified, files were not specified"
                )

            for vertex in local_graph.vertices:
                if vertex.block_type == BlockType.RESOURCE:
                    report.add_resource(f'{vertex.path}:{vertex.id}')
            self.graph_manager.save_graph(local_graph)
            self.definitions, self.breadcrumbs = convert_graph_vertices_to_tf_definitions(
                local_graph.vertices, root_folder)
        else:
            logging.info("Scanning root folder using existing tf_definitions")

        self.check_tf_definition(report, root_folder, runner_filter,
                                 collect_skip_comments)

        report.add_parsing_errors(list(parsing_errors.keys()))

        graph_report = self.get_graph_checks_report(root_folder, runner_filter)
        merge_reports(report, graph_report)
        report = remove_duplicate_results(report)

        return report
Ejemplo n.º 23
0
    def get_graph_checks_report(self, root_folder: str,
                                runner_filter: RunnerFilter) -> Report:
        report = Report(self.check_type)
        checks_results = self.run_graph_checks_results(runner_filter)

        for check, check_results in checks_results.items():
            for check_result in check_results:
                entity = check_result["entity"]
                entity_file_abs_path = entity.get(CustomAttributes.FILE_PATH)
                entity_file_path = scanned_file = f"/{os.path.relpath(entity_file_abs_path, root_folder)}"
                entity_name = entity.get(
                    CustomAttributes.BLOCK_NAME).split(".")[1]
                entity_context = self.context[entity_file_abs_path][
                    TemplateSections.RESOURCES][entity_name]

                record = Record(
                    check_id=check.id,
                    check_name=check.name,
                    check_result=check_result,
                    code_block=entity_context.get("code_lines"),
                    file_path=entity_file_path,
                    file_line_range=[
                        entity_context.get("start_line"),
                        entity_context.get("end_line")
                    ],
                    resource=entity.get(CustomAttributes.ID),
                    evaluations={},
                    check_class=check.__class__.__module__,
                    file_abs_path=entity_file_abs_path,
                    entity_tags={} if not entity.get("Tags") else
                    cfn_utils.parse_entity_tags(entity.get("Tags")))
                record.set_guideline(check.guideline)
                if self.breadcrumbs:
                    breadcrumb = self.breadcrumbs.get(record.file_path,
                                                      {}).get(record.resource)
                    if breadcrumb:
                        record = GraphRecord(record, breadcrumb)

                report.add_record(record=record)
        return report
Ejemplo n.º 24
0
    def mutateKubernetesGraphResults(self, root_folder: str, runner_filter: RunnerFilter, report: Report, checks_results, reportMutatorData=None) -> Report:
        # Moves report generation logic out of run() method in Runner class.
        # Allows function overriding of a much smaller function than run() for other "child" frameworks such as Kustomize, Helm
        # Where Kubernetes CHECKS are needed, but the specific file references are to another framework for the user output (or a mix of both).
        kustomizeMetadata = reportMutatorData['kustomizeMetadata'], 
        kustomizeFileMappings = reportMutatorData['kustomizeFileMappings']

        for check, check_results in checks_results.items():
            for check_result in check_results:
                entity = check_result["entity"]
                entity_file_path = entity.get(CustomAttributes.FILE_PATH)
                entity_file_abs_path = _get_entity_abs_path(root_folder, entity_file_path)
                entity_id = entity.get(CustomAttributes.ID)
                entity_context = self.context[entity_file_path][entity_id]

                if entity_file_abs_path in kustomizeFileMappings:
                    realKustomizeEnvMetadata = kustomizeMetadata[0][kustomizeFileMappings[entity_file_abs_path]]
                    if 'overlay' in realKustomizeEnvMetadata["type"]:
                        kustomizeResourceID = f'{realKustomizeEnvMetadata["type"]}:{str(realKustomizeEnvMetadata["overlay_name"])}:{entity_id}'
                    else:
                        kustomizeResourceID = f'{realKustomizeEnvMetadata["type"]}:{entity_id}'
                else: 
                    kustomizeResourceID = "Unknown error. This is a bug."

                record = Record(
                    check_id=check.id,
                    check_name=check.name,
                    check_result=check_result,
                    code_block=entity_context.get("code_lines"),
                    file_path=realKustomizeEnvMetadata['filePath'],
                    file_line_range=[0,0],
                    resource=kustomizeResourceID,  # entity.get(CustomAttributes.ID),
                    evaluations={},
                    check_class=check.__class__.__module__,
                    file_abs_path=entity_file_abs_path
                )
                record.set_guideline(check.guideline)
                report.add_record(record=record)

        return report
Ejemplo n.º 25
0
    def run(
        self,
        root_folder: str,
        external_checks_dir: Optional[List[str]] = None,
        files: Optional[List[str]] = None,
        runner_filter: RunnerFilter = RunnerFilter(),
        collect_skip_comments: bool = True,
    ) -> Report:
        report = Report(self.check_type)
        parsing_errors = {}

        if self.context is None or self.definitions is None or self.breadcrumbs is None:
            self.definitions, self.definitions_raw = create_definitions(
                root_folder, files, runner_filter, parsing_errors)
            if external_checks_dir:
                for directory in external_checks_dir:
                    cfn_registry.load_external_checks(directory)
                    self.graph_registry.load_external_checks(directory)
            self.context = build_definitions_context(self.definitions,
                                                     self.definitions_raw,
                                                     root_folder)

            logging.info("creating cloudformation graph")
            local_graph = self.graph_manager.build_graph_from_definitions(
                self.definitions)
            for vertex in local_graph.vertices:
                if vertex.block_type == BlockType.RESOURCE:
                    report.add_resource(f'{vertex.path}:{vertex.id}')
            self.graph_manager.save_graph(local_graph)
            self.definitions, self.breadcrumbs = convert_graph_vertices_to_definitions(
                local_graph.vertices, root_folder)

        # TODO: replace with real graph rendering
        for cf_file in self.definitions.keys():
            file_definition = self.definitions.get(cf_file, None)
            file_definition_raw = self.definitions_raw.get(cf_file, None)
            if file_definition is not None and file_definition_raw is not None:
                cf_context_parser = ContextParser(cf_file, file_definition,
                                                  file_definition_raw)
                logging.debug("Template Dump for {}: {}".format(
                    cf_file, json.dumps(file_definition, indent=2,
                                        default=str)))
                cf_context_parser.evaluate_default_refs()

        report.add_parsing_errors(list(parsing_errors.keys()))
        # run checks
        self.check_definitions(root_folder, runner_filter, report)

        # run graph checks
        graph_report = self.get_graph_checks_report(root_folder, runner_filter)
        merge_reports(report, graph_report)

        return report
Ejemplo n.º 26
0
    def run(
        self,
        root_folder: Union[str, Path],
        external_checks_dir: Optional[List[str]] = None,
        files: Optional[List[str]] = None,
        runner_filter: RunnerFilter = RunnerFilter(),
        collect_skip_comments: bool = True,
    ) -> Report:
        report = Report(self.check_type)

        scan_results = self.prepare_and_scan(root_folder, files, runner_filter)
        if scan_results is None:
            return report

        for result in scan_results:
            package_file_path = Path(result["repository"])
            try:
                package_file_path = package_file_path.relative_to(
                    self._code_repo_path)
            except ValueError:
                # Path.is_relative_to() was implemented in Python 3.9
                pass

            vulnerabilities = result.get("vulnerabilities") or []

            rootless_file_path = str(package_file_path).replace(
                package_file_path.anchor, "", 1)
            for vulnerability in vulnerabilities:
                record = create_report_record(
                    rootless_file_path=rootless_file_path,
                    file_abs_path=result["repository"],
                    check_class=self._check_class,
                    vulnerability_details=vulnerability,
                    runner_filter=runner_filter)
                if not runner_filter.should_run_check(record.check_id,
                                                      record.bc_check_id):
                    if runner_filter.checks:
                        continue
                    else:
                        record.check_result = {
                            "result": CheckResult.SKIPPED,
                            "suppress_comment":
                            f"{vulnerability['id']} is skipped"
                        }

                report.add_resource(record.resource)
                report.add_record(record)

        return report
Ejemplo n.º 27
0
    def test_external_data(self):
        dir_abs_path = os.path.dirname(os.path.realpath(__file__))

        definitions = {
            f'{dir_abs_path}/s3.yaml': {
                'Resources': {
                    'MySourceQueue': {
                        'Type': 'AWS::SQS::Queue',
                        'Properties': {
                            'KmsMasterKeyId': 'kms_id',
                            '__startline__': 17,
                            '__endline__': 22,
                            'resource_type': 'AWS::SQS::Queue'
                        }
                    },
                    'MyDB': {
                        'Type': 'AWS::RDS::DBInstance',
                        'Properties': {
                            'DBName': 'db',
                            'DBInstanceClass': 'db.t3.micro',
                            'Engine': 'mysql',
                            'MasterUsername': '******',
                            'MasterUserPassword': '******',
                            '__startline__': 23,
                            '__endline__': 32,
                            'resource_type': 'AWS::RDS::DBInstance'
                        }
                    }
                }
            }
        }
        context = {f'{dir_abs_path}/s3.yaml': {'Parameters': {'KmsMasterKeyId': {'start_line': 5, 'end_line': 9, 'code_lines': [(5, '    "KmsMasterKeyId": {\n'), (6, '      "Description": "Company Name",\n'), (7, '      "Type": "String",\n'), (8, '      "Default": "kms_id"\n'), (9, '    },\n')]}, 'DBName': {'start_line': 10, 'end_line': 14, 'code_lines': [(10, '    "DBName": {\n'), (11, '      "Description": "Name of the Database",\n'), (12, '      "Type": "String",\n'), (13, '      "Default": "db"\n'), (14, '    }\n')]}}, 'Resources': {'MySourceQueue': {'start_line': 17, 'end_line': 22, 'code_lines': [(17, '    "MySourceQueue": {\n'), (18, '      "Type": "AWS::SQS::Queue",\n'), (19, '      "Properties": {\n'), (20, '        "KmsMasterKeyId": { "Ref": "KmsMasterKeyId" }\n'), (21, '      }\n'), (22, '    },\n')], 'skipped_checks': []}, 'MyDB': {'start_line': 23, 'end_line': 32, 'code_lines': [(23, '    "MyDB": {\n'), (24, '      "Type": "AWS::RDS::DBInstance",\n'), (25, '      "Properties": {\n'), (26, '        "DBName": { "Ref": "DBName" },\n'), (27, '        "DBInstanceClass": "db.t3.micro",\n'), (28, '        "Engine": "mysql",\n'), (29, '        "MasterUsername": "******",\n'), (30, '        "MasterUserPassword": "******"\n'), (31, '      }\n'), (32, '    }\n')], 'skipped_checks': []}}, 'Outputs': {'DBAppPublicDNS': {'start_line': 35, 'end_line': 38, 'code_lines': [(35, '    "DBAppPublicDNS": {\n'), (36, '      "Description": "DB App Public DNS Name",\n'), (37, '      "Value": { "Fn::GetAtt" : [ "MyDB", "PublicDnsName" ] }\n'), (38, '    }\n')]}}}}
        breadcrumbs = {}
        runner = Runner()
        runner.set_external_data(definitions, context, breadcrumbs)
        report = Report('cloudformation')
        runner.check_definitions(root_folder=dir_abs_path, runner_filter=RunnerFilter(framework='cloudformation', download_external_modules=False), report=report)
        self.assertEqual(len(report.passed_checks), 2)
        self.assertEqual(len(report.failed_checks), 3)
        pass
Ejemplo n.º 28
0
    def test_valid_passing_valid_testcases(self):
        record1 = Record(
            check_id="CKV_AWS_21",
            check_name="Some Check",
            check_result={"result": CheckResult.FAILED},
            code_block=None,
            file_path="./s3.tf",
            file_line_range=[1, 3],
            resource="aws_s3_bucket.operations",
            evaluations=None,
            check_class=None,
            file_abs_path=",.",
            entity_tags={"tag1": "value1"},
        )

        record2 = Record(
            check_id="CKV_AWS_3",
            check_name=
            "Ensure all data stored in the EBS is securely encrypted",
            check_result={"result": CheckResult.FAILED},
            code_block=None,
            file_path="./ec2.tf",
            file_line_range=[1, 3],
            resource="aws_ebs_volume.web_host_storage",
            evaluations=None,
            check_class=None,
            file_abs_path=",.",
            entity_tags={"tag1": "value1"},
        )

        r = Report("terraform")
        r.add_record(record=record1)
        r.add_record(record=record2)
        r.get_test_suites()
        json_structure = r.get_sarif_json("")
        print(json.dumps(json_structure))
        self.assertEqual(
            None,
            jsonschema.validate(instance=json_structure,
                                schema=get_sarif_schema()),
        )
Ejemplo n.º 29
0
    def run(self,
            root_folder,
            external_checks_dir=None,
            files=None,
            runner_filter=RunnerFilter(),
            collect_skip_comments=True,
            helmChart=None):
        report = Report(self.check_type)
        definitions = {}
        definitions_raw = {}
        parsing_errors = {}
        files_list = []
        if external_checks_dir:
            for directory in external_checks_dir:
                registry.load_external_checks(directory, runner_filter)

        if files:
            for file in files:
                parse_result = parse(file)
                if parse_result:
                    (definitions[file], definitions_raw[file]) = parse_result

        if root_folder:
            for root, d_names, f_names in os.walk(root_folder):
                filter_ignored_directories(d_names)

                for file in f_names:
                    file_ending = os.path.splitext(file)[1]
                    if file_ending in K8_POSSIBLE_ENDINGS:
                        full_path = os.path.join(root, file)
                        if "/." not in full_path and file not in [
                                'package.json', 'package-lock.json'
                        ]:
                            # skip temp directories
                            files_list.append(full_path)

            for file in files_list:
                relative_file_path = f'/{os.path.relpath(file, os.path.commonprefix((root_folder, file)))}'
                parse_result = parse(file)
                if parse_result:
                    (definitions[relative_file_path],
                     definitions_raw[relative_file_path]) = parse_result

        for k8_file in definitions.keys():

            # There are a few cases here. If -f was used, there could be a leading / because it's an absolute path,
            # or there will be no leading slash; root_folder will always be none.
            # If -d is used, root_folder will be the value given, and -f will start with a / (hardcoded above).
            # The goal here is simply to get a valid path to the file (which sls_file does not always give).
            if k8_file[0] == '/':
                path_to_convert = (root_folder +
                                   k8_file) if root_folder else k8_file
            else:
                path_to_convert = (os.path.join(
                    root_folder, k8_file)) if root_folder else k8_file

            file_abs_path = os.path.abspath(path_to_convert)

            if definitions[k8_file]:
                for i in range(len(definitions[k8_file])):
                    if (not 'apiVersion' in definitions[k8_file][i].keys()
                        ) and (not 'kind' in definitions[k8_file][i].keys()):
                        continue
                    logging.debug("Template Dump for {}: {}".format(
                        k8_file, definitions[k8_file][i], indent=2))

                    entity_conf = definitions[k8_file][i]

                    # Split out resources if entity kind is List
                    if entity_conf["kind"] == "List":
                        for item in entity_conf["items"]:
                            definitions[k8_file].append(item)

                for i in range(len(definitions[k8_file])):
                    if (not 'apiVersion' in definitions[k8_file][i].keys()
                        ) and (not 'kind' in definitions[k8_file][i].keys()):
                        continue
                    logging.debug("Template Dump for {}: {}".format(
                        k8_file, definitions[k8_file][i], indent=2))

                    entity_conf = definitions[k8_file][i]

                    if entity_conf["kind"] == "List":
                        continue

                    # Skip entity without metadata["name"]
                    if "metadata" in entity_conf:
                        if isinstance(
                                entity_conf["metadata"],
                                int) or not "name" in entity_conf["metadata"]:
                            continue
                    else:
                        continue

                    # Skip entity with parent (metadata["ownerReferences"]) in runtime
                    # We will alert in runtime only
                    if "ownerReferences" in entity_conf["metadata"] and \
                            entity_conf["metadata"]["ownerReferences"] is not None:
                        continue

                    # Append containers and initContainers to definitions list
                    for type in ["containers", "initContainers"]:
                        containers = []
                        if entity_conf["kind"] == "CustomResourceDefinition":
                            continue
                        containers = self._search_deep_keys(
                            type, entity_conf, [])
                        if not containers:
                            continue
                        containers = containers.pop()
                        #containers.insert(0,entity_conf['kind'])
                        containerDef = {}
                        namespace = ""
                        if "namespace" in entity_conf["metadata"]:
                            namespace = entity_conf["metadata"]["namespace"]
                        else:
                            namespace = "default"
                        containerDef["containers"] = containers.pop()
                        if containerDef["containers"] is not None:
                            for cd in containerDef["containers"]:
                                i = containerDef["containers"].index(cd)
                                containerDef["containers"][i][
                                    "apiVersion"] = entity_conf["apiVersion"]
                                containerDef["containers"][i]["kind"] = type
                                containerDef["containers"][i][
                                    "parent"] = "{}.{}.{} (container {})".format(
                                        entity_conf["kind"],
                                        entity_conf["metadata"]["name"],
                                        namespace, str(i))
                                containerDef["containers"][i][
                                    "parent_metadata"] = entity_conf[
                                        "metadata"]
                            definitions[k8_file].extend(
                                containerDef["containers"])

                # Run for each definition included added container definitions
                for i in range(len(definitions[k8_file])):
                    if (not 'apiVersion' in definitions[k8_file][i].keys()
                        ) and (not 'kind' in definitions[k8_file][i].keys()):
                        continue
                    logging.debug("Template Dump for {}: {}".format(
                        k8_file, definitions[k8_file][i], indent=2))

                    entity_conf = definitions[k8_file][i]

                    if entity_conf["kind"] == "List":
                        continue

                    if isinstance(entity_conf["kind"], int):
                        continue
                    # Skip entity without metadata["name"] or parent_metadata["name"]
                    if not any(x in entity_conf["kind"]
                               for x in ["containers", "initContainers"]):
                        if "metadata" in entity_conf:
                            if isinstance(
                                    entity_conf["metadata"], int
                            ) or not "name" in entity_conf["metadata"]:
                                continue
                        else:
                            continue

                    # Skip entity with parent (metadata["ownerReferences"]) in runtime
                    # We will alert in runtime only
                    if "metadata" in entity_conf:
                        if "ownerReferences" in entity_conf["metadata"] and \
                                entity_conf["metadata"]["ownerReferences"] is not None:
                            continue

                    # Skip Kustomization Templates (for now)
                    if entity_conf["kind"] == "Kustomization":
                        continue

                    skipped_checks = get_skipped_checks(entity_conf)

                    results = registry.scan(k8_file, entity_conf,
                                            skipped_checks, runner_filter)

                    # TODO refactor into context parsing
                    find_lines_result_list = list(
                        find_lines(entity_conf, '__startline__'))
                    start_line = entity_conf["__startline__"]
                    end_line = entity_conf["__endline__"]

                    if start_line == end_line:
                        entity_lines_range = [start_line, end_line]
                        entity_code_lines = definitions_raw[k8_file][
                            start_line - 1:end_line]
                    else:
                        entity_lines_range = [start_line, end_line - 1]
                        entity_code_lines = definitions_raw[k8_file][
                            start_line - 1:end_line - 1]

                    # TODO? - Variable Eval Message!
                    variable_evaluations = {}

                    for check, check_result in results.items():
                        record = Record(
                            check_id=check.id,
                            check_name=check.name,
                            check_result=check_result,
                            code_block=entity_code_lines,
                            file_path=k8_file,
                            file_line_range=entity_lines_range,
                            resource=check.get_resource_id(entity_conf),
                            evaluations=variable_evaluations,
                            check_class=check.__class__.__module__,
                            file_abs_path=file_abs_path)
                        report.add_record(record=record)

        return report
Ejemplo n.º 30
0
    def run(self,
            root_folder,
            external_checks_dir=None,
            files=None,
            runner_filter=RunnerFilter(),
            collect_skip_comments=True):
        report = Report(self.check_type)
        definitions = {}
        definitions_raw = {}
        parsing_errors = {}
        files_list = []
        if external_checks_dir:
            for directory in external_checks_dir:
                cfn_registry.load_external_checks(directory, runner_filter)

        if files:
            for file in files:
                (definitions[file], definitions_raw[file]) = parse(file)

        if root_folder:
            for root, d_names, f_names in os.walk(root_folder):
                filter_ignored_directories(d_names)
                for file in f_names:
                    file_ending = os.path.splitext(file)[1]
                    if file_ending in CF_POSSIBLE_ENDINGS:
                        files_list.append(os.path.join(root, file))

            for file in files_list:
                relative_file_path = f'/{os.path.relpath(file, os.path.commonprefix((root_folder, file)))}'
                try:
                    (definitions[relative_file_path],
                     definitions_raw[relative_file_path]) = parse(file)
                except TypeError:
                    logging.info(
                        f'CloudFormation skipping {file} as it is not a valid CF template'
                    )

        # Filter out empty files that have not been parsed successfully, and filter out non-CF template files
        definitions = {
            k: v
            for k, v in definitions.items()
            if v and isinstance(v, dict_node) and v.__contains__("Resources")
            and isinstance(v["Resources"], dict_node)
        }
        definitions_raw = {
            k: v
            for k, v in definitions_raw.items() if k in definitions.keys()
        }

        for cf_file in definitions.keys():

            # There are a few cases here. If -f was used, there could be a leading / because it's an absolute path,
            # or there will be no leading slash; root_folder will always be none.
            # If -d is used, root_folder will be the value given, and -f will start with a / (hardcoded above).
            # The goal here is simply to get a valid path to the file (which cf_file does not always give).
            if cf_file[0] == '/':
                path_to_convert = (root_folder +
                                   cf_file) if root_folder else cf_file
            else:
                path_to_convert = (os.path.join(
                    root_folder, cf_file)) if root_folder else cf_file

            file_abs_path = os.path.abspath(path_to_convert)
            if isinstance(
                    definitions[cf_file],
                    dict_node) and 'Resources' in definitions[cf_file].keys():
                cf_context_parser = ContextParser(cf_file,
                                                  definitions[cf_file],
                                                  definitions_raw[cf_file])
                logging.debug("Template Dump for {}: {}".format(
                    cf_file, definitions[cf_file], indent=2))
                cf_context_parser.evaluate_default_refs()
                for resource_name, resource in definitions[cf_file][
                        'Resources'].items():
                    resource_id = cf_context_parser.extract_cf_resource_id(
                        resource, resource_name)
                    # check that the resource can be parsed as a CF resource
                    if resource_id:
                        entity_lines_range, entity_code_lines = cf_context_parser.extract_cf_resource_code_lines(
                            resource)
                        if entity_lines_range and entity_code_lines:
                            # TODO - Variable Eval Message!
                            variable_evaluations = {}

                            skipped_checks = ContextParser.collect_skip_comments(
                                entity_code_lines)

                            results = cfn_registry.scan(
                                cf_file, {resource_name: resource},
                                skipped_checks, runner_filter)
                            for check, check_result in results.items():
                                record = Record(
                                    check_id=check.id,
                                    check_name=check.name,
                                    check_result=check_result,
                                    code_block=entity_code_lines,
                                    file_path=cf_file,
                                    file_line_range=entity_lines_range,
                                    resource=resource_id,
                                    evaluations=variable_evaluations,
                                    check_class=check.__class__.__module__,
                                    file_abs_path=file_abs_path)
                                report.add_record(record=record)
        return report