def build_graph_from_source_directory( self, source_dir: str, render_variables: bool = True, local_graph_class: Type[ CloudformationLocalGraph] = CloudformationLocalGraph, parsing_errors: Optional[Dict[str, Exception]] = None, download_external_modules: bool = False, excluded_paths: Optional[List[str]] = None, ) -> Tuple[CloudformationLocalGraph, Dict[str, DictNode]]: logging.info( f"[CloudformationGraphManager] Parsing files in source dir {source_dir}" ) parsing_errors = {} definitions, definitions_raw = get_folder_definitions( source_dir, excluded_paths, parsing_errors) local_graph = self.build_graph_from_definitions( definitions, render_variables) rendered_definitions, _ = convert_graph_vertices_to_definitions( local_graph.vertices, source_dir) # TODO: replace with real graph rendering for cf_file in rendered_definitions.keys(): file_definition = rendered_definitions.get(cf_file, None) file_definition_raw = definitions_raw.get(cf_file, None) if file_definition is not None and file_definition_raw is not None: cf_context_parser = ContextParser(cf_file, file_definition, file_definition_raw) logging.debug("Template Dump for {}: {}".format( cf_file, json.dumps(file_definition, indent=2, default=str))) cf_context_parser.evaluate_default_refs() return local_graph, rendered_definitions
def scan_resource_conf(self, conf: Dict[str, Any], entity_type: str) -> CheckResult: excluded_key = self.get_excluded_key() if excluded_key is not None: path_elements = excluded_key.split("/") matches = ContextParser.search_deep_keys(path_elements[-1], conf, []) if len(matches) > 0: for match in matches: if match[:-1] == path_elements: if isinstance(match, list) and len(match) == 1: match = match[0] if self.check_excluded_condition(match): return CheckResult.PASSED inspected_key = self.get_inspected_key() bad_values = self.get_forbidden_values() path_elements = inspected_key.split("/") matches = ContextParser.search_deep_keys(path_elements[-1], conf, []) if len(matches) > 0: for match in matches: if match[:-1] == path_elements: if match[-1] in bad_values or ANY_VALUE in bad_values: return CheckResult.FAILED return CheckResult.PASSED
def test_code_line_extraction(self): current_dir = os.path.dirname(os.path.realpath(__file__)) # the test data that we'll evaluate against # line ranges are 1-based # mapping is file name, to resource index, to resource details # checking the resource index helps make sure that we are testing what we think we are testing files = [ f'{current_dir}/cfn_newline_at_end.yaml', f'{current_dir}/cfn_nonewline_at_end.yaml' ] resource_properties_mapping = { files[0]: { 0: { 'name': 'MyDB', 'line_range': [2, 9] }, 1: { 'name': 'MyBucket', 'line_range': [10, 13] } }, files[1]: { 0: { 'name': 'MyDB', 'line_range': [2, 9] }, 1: { 'name': 'MyBucket', 'line_range': [11, 14] } } } for file in files: cfn_dict, cfn_str = parse(file) cf_context_parser = ContextParser(file, cfn_dict, cfn_str) for index, (resource_name, resource) in enumerate(cfn_dict['Resources'].items()): # this filters out __startline__ and __endline__ markers resource_id = cf_context_parser.extract_cf_resource_id( resource, resource_name) if resource_id: # make sure we are checking the right resource self.assertEqual( resource_name, resource_properties_mapping[file][index]['name']) entity_lines_range, entity_code_lines = cf_context_parser.extract_cf_resource_code_lines( resource) self.assertEqual(entity_lines_range[0], entity_code_lines[0][0]) self.assertEqual(entity_lines_range[1], entity_code_lines[-1][0]) self.assertEqual( entity_lines_range, resource_properties_mapping[file][index]['line_range'])
def extract_code_lines(self, content): find_lines_result_list = list(CfnContextParser.find_lines(content, '__startline__')) if len(find_lines_result_list) >= 1: start_line = min(find_lines_result_list) - 1 end_line = max(list(CfnContextParser.find_lines(content, '__endline__'))) entity_lines_range = [start_line, end_line - 1] entity_code_lines = self.sls_template_lines[start_line - 1: end_line - 1] return entity_lines_range, entity_code_lines return None, None
def test_parameter_import_lines(self): # check that when a parameter is imported into a resource, the line numbers of the resource are preserved current_dir = os.path.dirname(os.path.realpath(__file__)) file = f'{current_dir}/cfn_with_ref.yaml' definitions, definitions_raw = parse(file) cf_context_parser = ContextParser(file, definitions, definitions_raw) resource = definitions['Resources']['ElasticsearchDomain'] entity_lines_range, entity_code_lines = cf_context_parser.extract_cf_resource_code_lines(resource) self.assertEqual(entity_lines_range[0], 10) self.assertEqual(entity_lines_range[1], 20)
def run( self, root_folder: str, external_checks_dir: Optional[List[str]] = None, files: Optional[List[str]] = None, runner_filter: RunnerFilter = RunnerFilter(), collect_skip_comments: bool = True, ) -> Report: report = Report(self.check_type) parsing_errors = {} if self.context is None or self.definitions is None or self.breadcrumbs is None: self.definitions, self.definitions_raw = create_definitions( root_folder, files, runner_filter, parsing_errors) if external_checks_dir: for directory in external_checks_dir: cfn_registry.load_external_checks(directory) self.graph_registry.load_external_checks(directory) self.context = build_definitions_context(self.definitions, self.definitions_raw, root_folder) logging.info("creating cloudformation graph") local_graph = self.graph_manager.build_graph_from_definitions( self.definitions) for vertex in local_graph.vertices: if vertex.block_type == BlockType.RESOURCE: report.add_resource(f'{vertex.path}:{vertex.id}') self.graph_manager.save_graph(local_graph) self.definitions, self.breadcrumbs = convert_graph_vertices_to_definitions( local_graph.vertices, root_folder) # TODO: replace with real graph rendering for cf_file in self.definitions.keys(): file_definition = self.definitions.get(cf_file, None) file_definition_raw = self.definitions_raw.get(cf_file, None) if file_definition is not None and file_definition_raw is not None: cf_context_parser = ContextParser(cf_file, file_definition, file_definition_raw) logging.debug("Template Dump for {}: {}".format( cf_file, json.dumps(file_definition, indent=2, default=str))) cf_context_parser.evaluate_default_refs() report.add_parsing_errors(list(parsing_errors.keys())) # run checks self.check_definitions(root_folder, runner_filter, report) # run graph checks graph_report = self.get_graph_checks_report(root_folder, runner_filter) merge_reports(report, graph_report) return report
def check_definitions(self, root_folder, runner_filter, report): for file_abs_path, definition in self.definitions.items(): cf_file = f"/{os.path.relpath(file_abs_path, root_folder)}" if isinstance( definition, dict) and TemplateSections.RESOURCES in definition.keys(): for resource_name, resource in definition[ TemplateSections.RESOURCES].items(): resource_id = ContextParser.extract_cf_resource_id( resource, resource_name) # check that the resource can be parsed as a CF resource if resource_id: resource_context = self.context[file_abs_path][ TemplateSections.RESOURCES][resource_name] entity_lines_range = [ resource_context['start_line'], resource_context['end_line'] ] entity_code_lines = resource_context['code_lines'] if entity_lines_range and entity_code_lines: # TODO - Variable Eval Message! variable_evaluations = {} skipped_checks = ContextParser.collect_skip_comments( entity_code_lines) entity = {resource_name: resource} results = cfn_registry.scan( cf_file, entity, skipped_checks, runner_filter) tags = cfn_utils.get_resource_tags(entity) for check, check_result in results.items(): record = Record( check_id=check.id, bc_check_id=check.bc_id, check_name=check.name, check_result=check_result, code_block=entity_code_lines, file_path=cf_file, file_line_range=entity_lines_range, resource=resource_id, evaluations=variable_evaluations, check_class=check.__class__.__module__, file_abs_path=file_abs_path, entity_tags=tags) breadcrumb = self.breadcrumbs.get( record.file_path, {}).get(record.resource) if breadcrumb: record = GraphRecord(record, breadcrumb) record.set_guideline(check.guideline) report.add_record(record=record)
def test_trim_lines(self): # trim from front test1 = [ (0, '\n'), (1, ''), (2, ' here is text'), (3, 'more text') ] self.assertEqual(ContextParser.trim_lines(test1), test1[2:4]) # trim from back test2 = [ (0, ' here is text'), (1, 'more text'), (2, '\n'), (3, ''), ] self.assertEqual(ContextParser.trim_lines(test2), test2[0:2]) # trim from both test3 = [ (0, '\n'), (1, ''), (2, ' here is text'), (3, 'more text'), (4, '\n'), (5, ''), ] self.assertEqual(ContextParser.trim_lines(test3), test3[2:4]) # trim nothing test4 = [ (2, ' here is text'), (3, 'more text'), ] self.assertEqual(ContextParser.trim_lines(test4), test4) # trim everything test5 = [ (2, ''), (3, '\n'), ] self.assertEqual(ContextParser.trim_lines(test5), [])
def scan_resource_conf(self, conf): inspected_key = self.get_inspected_key() expected_values = self.get_expected_values() path_elements = inspected_key.split('/') matches = ContextParser.search_deep_keys(path_elements[-1], conf, []) if len(matches) > 0: for match in matches: # CFN files are parsed differently from terraform, which causes the path search above to behave differently. # The tesult is path parts with integer indexes, instead of strings like '[0]'. This logic replaces # those, allowing inspected_keys in checks to use the same syntax. for i in range(0, len(match)): if type(match[i]) == int: match[i] = f'[{match[i]}]' if match[:-1] == path_elements: # Inspected key exists if ANY_VALUE in expected_values: # Key is found on the configuration - if it accepts any value, the check is PASSED return CheckResult.PASSED value = match[-1] if isinstance(value, list) and len(value) == 1: value = value[0] if self._is_variable_dependant(value): # If the tested attribute is variable-dependant, then result is PASSED return CheckResult.PASSED if value in expected_values: return CheckResult.PASSED return CheckResult.FAILED return self.missing_block_result
def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter()): report = Report(self.check_type) definitions = {} definitions_raw = {} parsing_errors = {} files_list = [] if external_checks_dir: for directory in external_checks_dir: cfn_registry.load_external_checks(directory) if files: for file in files: (definitions[file], definitions_raw[file]) = parse(file) if root_folder: for root, d_names, f_names in os.walk(root_folder): filter_ignored_directories(d_names) for file in f_names: file_ending = os.path.splitext(file)[1] if file_ending in CF_POSSIBLE_ENDINGS: files_list.append(os.path.join(root, file)) for file in files_list: relative_file_path = f'/{os.path.relpath(file, os.path.commonprefix((root_folder, file)))}' (definitions[relative_file_path], definitions_raw[relative_file_path]) = parse(file) # Filter out empty files that have not been parsed successfully, and filter out non-CF template files definitions = {k: v for k, v in definitions.items() if v and v.__contains__("Resources")} definitions_raw = {k: v for k, v in definitions_raw.items() if k in definitions.keys()} for cf_file in definitions.keys(): if isinstance(definitions[cf_file], dict_node) and 'Resources' in definitions[cf_file].keys(): cf_context_parser = ContextParser(cf_file, definitions[cf_file], definitions_raw[cf_file]) logging.debug("Template Dump for {}: {}".format(cf_file, definitions[cf_file], indent=2)) cf_context_parser.evaluate_default_refs() for resource_name, resource in definitions[cf_file]['Resources'].items(): resource_id = cf_context_parser.extract_cf_resource_id(resource, resource_name) # check that the resource can be parsed as a CF resource if resource_id: entity_lines_range, entity_code_lines = cf_context_parser.extract_cf_resource_code_lines(resource) if entity_lines_range and entity_code_lines: # TODO - Variable Eval Message! variable_evaluations = {} skipped_checks = ContextParser.collect_skip_comments(entity_code_lines) results = cfn_registry.scan(cf_file, {resource_name: resource}, skipped_checks, runner_filter) for check, check_result in results.items(): record = Record(check_id=check.id, check_name=check.name, check_result=check_result, code_block=entity_code_lines, file_path=cf_file, file_line_range=entity_lines_range, resource=resource_id, evaluations=variable_evaluations, check_class=check.__class__.__module__) report.add_record(record=record) return report
def build_definitions_context( definitions: Dict[str, DictNode], definitions_raw: Dict[str, List[Tuple[int, str]]], root_folder: str ) -> Dict[str, Dict[str, Any]]: definitions_context: Dict[str, Dict[str, Any]] = {} # iterate on the files for file_path, file_path_definitions in definitions.items(): # iterate on the definitions (Parameters, Resources, Outputs...) for file_path_definition, definition in file_path_definitions.items(): if ( isinstance(file_path_definition, StrNode) and file_path_definition.upper() in TemplateSections.__members__ and isinstance(definition, DictNode) ): # iterate on the actual objects of each definition for attribute, attr_value in definition.items(): if isinstance(attr_value, DictNode): start_line = attr_value.start_mark.line end_line = attr_value.end_mark.line # fix lines number for yaml and json files first_line_index = 0 while not str.strip(definitions_raw[file_path][first_line_index][1]): first_line_index += 1 # check if the file is a json file if str.strip(definitions_raw[file_path][first_line_index][1])[0] == "{": start_line += 1 end_line += 1 else: current_line = str.strip(definitions_raw[file_path][start_line - 1][1]) while not current_line or current_line[0] == YAML_COMMENT_MARK: start_line -= 1 current_line = str.strip(definitions_raw[file_path][start_line - 1][1]) current_line = str.strip(definitions_raw[file_path][end_line - 1][1]) while not current_line or current_line[0] == YAML_COMMENT_MARK: end_line -= 1 current_line = str.strip(definitions_raw[file_path][end_line - 1][1]) code_lines = definitions_raw[file_path][start_line - 1 : end_line] dpath.new( definitions_context, [file_path, str(file_path_definition), str(attribute)], {"start_line": start_line, "end_line": end_line, "code_lines": code_lines}, ) if file_path_definition.upper() == TemplateSections.RESOURCES.value.upper(): skipped_checks = ContextParser.collect_skip_comments(code_lines) dpath.new( definitions_context, [file_path, str(file_path_definition), str(attribute), "skipped_checks"], skipped_checks, ) return definitions_context
def scan_resource_conf(self, conf): inspected_key = self.get_inspected_key() expected_values = self.get_expected_values() path_elements = inspected_key.split('/') matches = ContextParser.search_deep_keys(path_elements[-1], conf, []) if len(matches) > 0: for match in matches: if match[:-1] == path_elements: # Inspected key exists if ANY_VALUE in expected_values: # Key is found on the configuration - if it accepts any value, the check is PASSED return CheckResult.PASSED value = match[-1] if isinstance(value, list) and len(value) == 1: value = value[0] if self._is_variable_dependant(value): # If the tested attribute is variable-dependant, then result is PASSED return CheckResult.PASSED if value in expected_values: return CheckResult.PASSED return CheckResult.FAILED return self.missing_block_result
def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(), collect_skip_comments=True): report = Report(self.check_type) definitions = {} definitions_raw = {} parsing_errors = {} files_list = [] if external_checks_dir: for directory in external_checks_dir: cfn_registry.load_external_checks(directory, runner_filter) if files: for file in files: (definitions[file], definitions_raw[file]) = parse(file) if root_folder: for root, d_names, f_names in os.walk(root_folder): filter_ignored_directories(d_names) for file in f_names: file_ending = os.path.splitext(file)[1] if file_ending in CF_POSSIBLE_ENDINGS: files_list.append(os.path.join(root, file)) for file in files_list: relative_file_path = f'/{os.path.relpath(file, os.path.commonprefix((root_folder, file)))}' try: (definitions[relative_file_path], definitions_raw[relative_file_path]) = parse(file) except TypeError: logging.info( f'CloudFormation skipping {file} as it is not a valid CF template' ) # Filter out empty files that have not been parsed successfully, and filter out non-CF template files definitions = { k: v for k, v in definitions.items() if v and isinstance(v, dict_node) and v.__contains__("Resources") and isinstance(v["Resources"], dict_node) } definitions_raw = { k: v for k, v in definitions_raw.items() if k in definitions.keys() } for cf_file in definitions.keys(): # There are a few cases here. If -f was used, there could be a leading / because it's an absolute path, # or there will be no leading slash; root_folder will always be none. # If -d is used, root_folder will be the value given, and -f will start with a / (hardcoded above). # The goal here is simply to get a valid path to the file (which cf_file does not always give). if cf_file[0] == '/': path_to_convert = (root_folder + cf_file) if root_folder else cf_file else: path_to_convert = (os.path.join( root_folder, cf_file)) if root_folder else cf_file file_abs_path = os.path.abspath(path_to_convert) if isinstance( definitions[cf_file], dict_node) and 'Resources' in definitions[cf_file].keys(): cf_context_parser = ContextParser(cf_file, definitions[cf_file], definitions_raw[cf_file]) logging.debug("Template Dump for {}: {}".format( cf_file, definitions[cf_file], indent=2)) cf_context_parser.evaluate_default_refs() for resource_name, resource in definitions[cf_file][ 'Resources'].items(): resource_id = cf_context_parser.extract_cf_resource_id( resource, resource_name) # check that the resource can be parsed as a CF resource if resource_id: entity_lines_range, entity_code_lines = cf_context_parser.extract_cf_resource_code_lines( resource) if entity_lines_range and entity_code_lines: # TODO - Variable Eval Message! variable_evaluations = {} skipped_checks = ContextParser.collect_skip_comments( entity_code_lines) results = cfn_registry.scan( cf_file, {resource_name: resource}, skipped_checks, runner_filter) for check, check_result in results.items(): record = Record( check_id=check.id, check_name=check.name, check_result=check_result, code_block=entity_code_lines, file_path=cf_file, file_line_range=entity_lines_range, resource=resource_id, evaluations=variable_evaluations, check_class=check.__class__.__module__, file_abs_path=file_abs_path) report.add_record(record=record) return report
def template_contains_key(template, key): if ContextParser.search_deep_keys(key, template, []): return True return False
def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter()): report = Report(self.check_type) definitions = {} definitions_raw = {} parsing_errors = {} files_list = [] if external_checks_dir: for directory in external_checks_dir: sls_registry.load_external_checks(directory, runner_filter) if files: for file in files: if os.path.basename(file) in SLS_FILE_MASK: parse_result = parse(file) if parse_result: (definitions[file], definitions_raw[file]) = parse_result if root_folder: for root, d_names, f_names in os.walk(root_folder): # Don't walk in to "node_modules" directories under the root folder. If –for some reason– # scanning one of these is desired, it can be directly specified. if "node_modules" in d_names: d_names.remove("node_modules") filter_ignored_directories(d_names) for file in f_names: if file in SLS_FILE_MASK: full_path = os.path.join(root, file) if "/." not in full_path: # skip temp directories files_list.append(full_path) for file in files_list: relative_file_path = f'/{os.path.relpath(file, os.path.commonprefix((root_folder, file)))}' parse_result = parse(file) if parse_result: (definitions[relative_file_path], definitions_raw[relative_file_path]) = parse_result # Filter out empty files that have not been parsed successfully definitions = {k: v for k, v in definitions.items() if v} definitions_raw = { k: v for k, v in definitions_raw.items() if k in definitions.keys() } for sls_file in definitions.keys(): if not isinstance(definitions[sls_file], dict_node): continue if CFN_RESOURCES_TOKEN in definitions[sls_file] and isinstance( definitions[sls_file][CFN_RESOURCES_TOKEN], dict_node): cf_sub_template = definitions[sls_file][CFN_RESOURCES_TOKEN] cf_context_parser = CfnContextParser(sls_file, cf_sub_template, definitions_raw[sls_file]) logging.debug("Template Dump for {}: {}".format( sls_file, definitions[sls_file], indent=2)) cf_context_parser.evaluate_default_refs() for resource_name, resource in cf_sub_template[ 'Resources'].items(): if not isinstance(resource, dict_node): continue cf_resource_id = cf_context_parser.extract_cf_resource_id( resource, resource_name) entity_lines_range, entity_code_lines = cf_context_parser.extract_cf_resource_code_lines( resource) if entity_lines_range and entity_code_lines: skipped_checks = CfnContextParser.collect_skip_comments( entity_code_lines) # TODO - Variable Eval Message! variable_evaluations = {} results = cfn_registry.scan(sls_file, {resource_name: resource}, skipped_checks, runner_filter) for check, check_result in results.items(): record = Record( check_id=check.id, check_name=check.name, check_result=check_result, code_block=entity_code_lines, file_path=sls_file, file_line_range=entity_lines_range, resource=cf_resource_id, evaluations=variable_evaluations, check_class=check.__class__.__module__) report.add_record(record=record) if FUNCTIONS_TOKEN in definitions[sls_file]: template_functions = definitions[sls_file][FUNCTIONS_TOKEN] sls_context_parser = SlsContextParser( sls_file, definitions[sls_file], definitions_raw[sls_file]) for sls_function_name, sls_function in template_functions.items( ): if not isinstance(sls_function, dict_node): continue entity_lines_range, entity_code_lines = sls_context_parser.extract_function_code_lines( sls_function) if entity_lines_range and entity_code_lines: skipped_checks = CfnContextParser.collect_skip_comments( entity_code_lines) variable_evaluations = {} sls_context_parser.enrich_function_with_provider( sls_function_name) results = sls_registry.scan( sls_file, { 'function': sls_function, 'provider_type': sls_context_parser.provider_type }, skipped_checks, runner_filter) for check, check_result in results.items(): record = Record( check_id=check.id, check_name=check.name, check_result=check_result, code_block=entity_code_lines, file_path=sls_file, file_line_range=entity_lines_range, resource=sls_function_name, evaluations=variable_evaluations, check_class=check.__class__.__module__) report.add_record(record=record) return report
def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(), collect_skip_comments=True): report = Report(self.check_type) definitions = {} definitions_raw = {} parsing_errors = {} files_list = [] if external_checks_dir: for directory in external_checks_dir: function_registry.load_external_checks(directory) if files: for file in files: if os.path.basename(file) in SLS_FILE_MASK: parse_result = parse(file) if parse_result: (definitions[file], definitions_raw[file]) = parse_result if root_folder: for root, d_names, f_names in os.walk(root_folder): # Don't walk in to "node_modules" directories under the root folder. If –for some reason– # scanning one of these is desired, it can be directly specified. if "node_modules" in d_names: d_names.remove("node_modules") filter_ignored_directories(d_names) for file in f_names: if file in SLS_FILE_MASK: full_path = os.path.join(root, file) if "/." not in full_path: # skip temp directories files_list.append(full_path) for file in files_list: relative_file_path = f'/{os.path.relpath(file, os.path.commonprefix((root_folder, file)))}' parse_result = parse(file) if parse_result: (definitions[relative_file_path], definitions_raw[relative_file_path]) = parse_result # Filter out empty files that have not been parsed successfully definitions = {k: v for k, v in definitions.items() if v} definitions_raw = { k: v for k, v in definitions_raw.items() if k in definitions.keys() } for sls_file, sls_file_data in definitions.items(): # There are a few cases here. If -f was used, there could be a leading / because it's an absolute path, # or there will be no leading slash; root_folder will always be none. # If -d is used, root_folder will be the value given, and -f will start with a / (hardcoded above). # The goal here is simply to get a valid path to the file (which sls_file does not always give). if sls_file[0] == '/': path_to_convert = (root_folder + sls_file) if root_folder else sls_file else: path_to_convert = (os.path.join( root_folder, sls_file)) if root_folder else sls_file file_abs_path = os.path.abspath(path_to_convert) if not isinstance(sls_file_data, dict_node): continue if CFN_RESOURCES_TOKEN in sls_file_data and isinstance( sls_file_data[CFN_RESOURCES_TOKEN], dict_node): cf_sub_template = sls_file_data[CFN_RESOURCES_TOKEN] cf_context_parser = CfnContextParser(sls_file, cf_sub_template, definitions_raw[sls_file]) logging.debug("Template Dump for {}: {}".format(sls_file, sls_file_data, indent=2)) cf_context_parser.evaluate_default_refs() for resource_name, resource in cf_sub_template[ 'Resources'].items(): if not isinstance(resource, dict_node): continue cf_resource_id = cf_context_parser.extract_cf_resource_id( resource, resource_name) if not cf_resource_id: # Not Type attribute for resource continue entity_lines_range, entity_code_lines = cf_context_parser.extract_cf_resource_code_lines( resource) if entity_lines_range and entity_code_lines: skipped_checks = CfnContextParser.collect_skip_comments( entity_code_lines) # TODO - Variable Eval Message! variable_evaluations = {} entity = {resource_name: resource} results = cfn_registry.scan(sls_file, entity, skipped_checks, runner_filter) tags = cfn_utils.get_resource_tags( entity, cfn_registry) for check, check_result in results.items(): record = Record( check_id=check.id, check_name=check.name, check_result=check_result, code_block=entity_code_lines, file_path=sls_file, file_line_range=entity_lines_range, resource=cf_resource_id, evaluations=variable_evaluations, check_class=check.__class__.__module__, file_abs_path=file_abs_path, entity_tags=tags) report.add_record(record=record) sls_context_parser = SlsContextParser(sls_file, sls_file_data, definitions_raw[sls_file]) # Sub-sections that have multiple items under them for token, registry in MULTI_ITEM_SECTIONS: template_items = sls_file_data.get(token) if not template_items or not isinstance(template_items, dict): continue for item_name, item_content in template_items.items(): if not isinstance(item_content, dict_node): continue entity_lines_range, entity_code_lines = sls_context_parser.extract_code_lines( item_content) if entity_lines_range and entity_code_lines: skipped_checks = CfnContextParser.collect_skip_comments( entity_code_lines) variable_evaluations = {} if token == "functions": #nosec # "Enriching" copies things like "environment" and "stackTags" down into the # function data from the provider block since logically that's what serverless # does. This allows checks to see what the complete data would be. sls_context_parser.enrich_function_with_provider( item_name) entity = EntityDetails( sls_context_parser.provider_type, item_content) results = registry.scan(sls_file, entity, skipped_checks, runner_filter) tags = cfn_utils.get_resource_tags(entity, registry) for check, check_result in results.items(): record = Record( check_id=check.id, check_name=check.name, check_result=check_result, code_block=entity_code_lines, file_path=sls_file, file_line_range=entity_lines_range, resource=item_name, evaluations=variable_evaluations, check_class=check.__class__.__module__, file_abs_path=file_abs_path, entity_tags=tags) report.add_record(record=record) # Sub-sections that are a single item for token, registry in SINGLE_ITEM_SECTIONS: item_content = sls_file_data.get(token) if not item_content: continue entity_lines_range, entity_code_lines = sls_context_parser.extract_code_lines( item_content) if not entity_lines_range: entity_lines_range, entity_code_lines = sls_context_parser.extract_code_lines( sls_file_data) skipped_checks = CfnContextParser.collect_skip_comments( entity_code_lines) variable_evaluations = {} entity = EntityDetails(sls_context_parser.provider_type, item_content) results = registry.scan(sls_file, entity, skipped_checks, runner_filter) tags = cfn_utils.get_resource_tags(entity, registry) for check, check_result in results.items(): record = Record(check_id=check.id, check_name=check.name, check_result=check_result, code_block=entity_code_lines, file_path=sls_file, file_line_range=entity_lines_range, resource=token, evaluations=variable_evaluations, check_class=check.__class__.__module__, file_abs_path=file_abs_path, entity_tags=tags) report.add_record(record=record) # "Complete" checks # NOTE: Ignore code content, no point in showing (could be long) entity_lines_range, entity_code_lines = sls_context_parser.extract_code_lines( sls_file_data) if entity_lines_range: skipped_checks = CfnContextParser.collect_skip_comments( entity_code_lines) variable_evaluations = {} entity = EntityDetails(sls_context_parser.provider_type, sls_file_data) results = complete_registry.scan(sls_file, entity, skipped_checks, runner_filter) tags = cfn_utils.get_resource_tags(entity, complete_registry) for check, check_result in results.items(): record = Record( check_id=check.id, check_name=check.name, check_result=check_result, code_block=[], # Don't show, could be large file_path=sls_file, file_line_range=entity_lines_range, resource="complete", # Weird, not sure what to put where evaluations=variable_evaluations, check_class=check.__class__.__module__, file_abs_path=file_abs_path, entity_tags=tags) report.add_record(record=record) return report