def test_record_relative_path_with_abs_dir(self): # test whether the record's repo_file_path is correct, relative to the CWD (with a / at the start). # this is just constructing the scan dir as normal current_dir = os.path.dirname(os.path.realpath(__file__)) scan_dir_path = os.path.join(current_dir, "resources", "plan") dir_abs_path = os.path.abspath(scan_dir_path) runner = Runner() checks_allowlist = ["CKV_AWS_20"] report = runner.run( root_folder=dir_abs_path, external_checks_dir=None, runner_filter=RunnerFilter(framework="terraform", checks=checks_allowlist), ) all_checks = report.failed_checks + report.passed_checks for record in all_checks: # The plan runner sets file_path to be relative from the CWD already, so this is easy self.assertEqual(record.repo_file_path, record.file_path)
def test_runner_two_checks_only(self): current_dir = os.path.dirname(os.path.realpath(__file__)) valid_plan_path = current_dir + "/resources/plan/tfplan.json" runner = Runner() checks_allowlist = ["CKV_AWS_21"] report = runner.run( root_folder=None, files=[valid_plan_path], external_checks_dir=None, runner_filter=RunnerFilter(framework="all", checks=checks_allowlist), ) report_json = report.get_json() self.assertTrue(isinstance(report_json, str)) self.assertIsNotNone(report_json) self.assertIsNotNone(report.get_test_suites()) self.assertEqual(report.get_exit_code(soft_fail=False), 1) self.assertEqual(report.get_exit_code(soft_fail=True), 0) for record in report.failed_checks: self.assertIn(record.check_id, checks_allowlist) self.assertEqual(report.get_summary()["failed"], 3) self.assertEqual(report.get_summary()["passed"], 3)
def test_summary(self): runner = Runner() current_dir = os.path.dirname(os.path.realpath(__file__)) test_files_dir = current_dir + "/example_KubletRotateCertificates" report = runner.run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id])) summary = report.get_summary() self.assertEqual(summary['passed'], 1) self.assertEqual(summary['failed'], 1) self.assertEqual(summary['skipped'], 0) self.assertEqual(summary['parsing_errors'], 0) for record in report.failed_checks: with self.subTest(record=record): self.assertIn("FAILED", record.file_path) self.assertIn(record.check_id, [check.id]) for record in report.passed_checks: with self.subTest(record=record): self.assertIn("PASSED", record.file_path) self.assertIn(record.check_id, [check.id])
def test(self): runner = Runner() current_dir = os.path.dirname(os.path.realpath(__file__)) test_files_dir = current_dir + "/example_DBInstanceBackupRetentionPeriod" report = runner.run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id])) summary = report.get_summary() passing_resources = { "aws_rds_cluster.pass", "aws_db_instance.pass", "aws_rds_cluster.pass2", "aws_db_instance.pass2", } failing_resources = { "aws_rds_cluster.fail", "aws_rds_cluster.fail2", "aws_db_instance.fail", "aws_db_instance.fail2", } unknown_resources = {"aws_db_instance.unknown"} passed_check_resources = set( [c.resource for c in report.passed_checks]) failed_check_resources = set( [c.resource for c in report.failed_checks]) self.assertEqual(summary["passed"], 4) self.assertEqual(summary["failed"], 4) self.assertEqual(summary["skipped"], 0) self.assertEqual(summary["parsing_errors"], 0) self.assertEqual(passing_resources, passed_check_resources) self.assertEqual(failing_resources, failed_check_resources) self.assertEqual( len([r for r in report.resources if r in unknown_resources]), 0)
def test(self): runner = Runner() current_dir = os.path.dirname(os.path.realpath(__file__)) test_files_dir = current_dir + "/example_AppLoadBalancerTLS12" report = runner.run( root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id]) ) summary = report.get_summary() passing_resources = { "aws_lb_listener.http_redirect", "aws_lb_listener.tcp", "aws_lb_listener.udp", "aws_lb_listener.tcp_udp", "aws_lb_listener.tls_fs_1_2", "aws_lb_listener.https_fs_1_2", "aws_alb_listener.https_fs_1_2", } failing_resources = { "aws_lb_listener.http", "aws_lb_listener.https_2016", "aws_lb_listener.tls_fs_1_1", "aws_alb_listener.tls_fs_1_1", "aws_lb_listener.cognito", } passed_check_resources = set([c.resource for c in report.passed_checks]) failed_check_resources = set([c.resource for c in report.failed_checks]) self.assertEqual(summary["passed"], 7) self.assertEqual(summary["failed"], 5) self.assertEqual(summary["skipped"], 0) self.assertEqual(summary["parsing_errors"], 0) self.assertEqual(passing_resources, passed_check_resources) self.assertEqual(failing_resources, failed_check_resources)
def test_summary(self): runner = Runner() current_dir = os.path.dirname(os.path.realpath(__file__)) test_files_dir = current_dir + "/example_APIGatewayCacheEnable" report = runner.run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id])) summary = report.get_summary() for record in report.failed_checks: self.assertEqual(record.check_id, check.id) for record in report.passed_checks: self.assertEqual(record.check_id, check.id) passing_resources = { "AWS::ApiGateway::Stage.CacheTrue", "AWS::Serverless::Api.Enabled", } failing_resources = { "AWS::ApiGateway::Stage.CacheDefault", "AWS::ApiGateway::Stage.CacheFalse", "AWS::Serverless::Api.Default", "AWS::Serverless::Api.Disabled", } passed_check_resources = set( [c.resource for c in report.passed_checks]) failed_check_resources = set( [c.resource for c in report.failed_checks]) self.assertEqual(summary['passed'], 2) self.assertEqual(summary['failed'], 4) self.assertEqual(summary['skipped'], 0) self.assertEqual(summary['parsing_errors'], 0) self.assertEqual(passing_resources, passed_check_resources) self.assertEqual(failing_resources, failed_check_resources)
def test_summary(self): runner = Runner() current_dir = os.path.dirname(os.path.realpath(__file__)) test_files_dir = current_dir + "/example_IAMRoleAllowsPublicAssume" report = runner.run(root_folder=test_files_dir,runner_filter=RunnerFilter(checks=[check.id])) summary = report.get_summary() for record in report.failed_checks: self.assertEqual(record.check_id, check.id) for record in report.passed_checks: self.assertEqual(record.check_id, check.id) passing_resources = { "AWS::IAM::Role.ServiceRole", "AWS::IAM::Role.DenyIgnore", "AWS::IAM::Role.ServiceRole2", "AWS::IAM::Role.DenyIgnore2", } failing_resources = { "AWS::IAM::Role.AWSStarPrincipal", "AWS::IAM::Role.AWSStarPrincipalInList", "AWS::IAM::Role.AWSStarPrincipal2", "AWS::IAM::Role.AWSStarPrincipalInList2", } passed_check_resources = set([c.resource for c in report.passed_checks]) failed_check_resources = set([c.resource for c in report.failed_checks]) self.assertEqual(summary['passed'], 4) self.assertEqual(summary['failed'], 4) self.assertEqual(summary['skipped'], 0) self.assertEqual(summary['parsing_errors'], 0) self.assertEqual(passing_resources, passed_check_resources) self.assertEqual(failing_resources, failed_check_resources)
def test(self): runner = Runner() current_dir = os.path.dirname(os.path.realpath(__file__)) test_files_dir = current_dir + "/example_SSMSessionManagerDocumentLogging" report = runner.run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id])) summary = report.get_summary() passing_resources = { "aws_ssm_document.s3_enabled_encrypted", "aws_ssm_document.s3_enabled_encrypted_yaml", "aws_ssm_document.cw_enabled_encrypted", "aws_ssm_document.cw_enabled_encrypted_yaml", } failing_resources = { "aws_ssm_document.disabled", "aws_ssm_document.disabled_yaml", "aws_ssm_document.s3_enabled_not_encrypted", "aws_ssm_document.s3_enabled_not_encrypted_yaml", "aws_ssm_document.cw_enabled_not_encrypted", "aws_ssm_document.cw_enabled_not_encrypted_yaml", } passed_check_resources = set( [c.resource for c in report.passed_checks]) failed_check_resources = set( [c.resource for c in report.failed_checks]) self.assertEqual(summary["passed"], 4) self.assertEqual(summary["failed"], 6) self.assertEqual(summary["skipped"], 0) self.assertEqual(summary["parsing_errors"], 0) self.assertEqual(passing_resources, passed_check_resources) self.assertEqual(failing_resources, failed_check_resources)
def test(self): runner = Runner() current_dir = os.path.dirname(os.path.realpath(__file__)) test_files_dir = current_dir + "/example_WafHasAnyRules" report = runner.run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id])) summary = report.get_summary() passing_resources = { "aws_waf_web_acl.pass", 'aws_wafv2_web_acl.pass', 'aws_wafregional_web_acl.pass', } failing_resources = { "aws_waf_web_acl.fail", "aws_waf_web_acl.fail2", 'aws_wafv2_web_acl.fail', 'aws_wafv2_web_acl.fail2', 'aws_wafregional_web_acl.fail', 'aws_wafregional_web_acl.fail2', } passed_check_resources = set( [c.resource for c in report.passed_checks]) failed_check_resources = set( [c.resource for c in report.failed_checks]) self.assertEqual(summary["passed"], 3) self.assertEqual(summary["failed"], 6) self.assertEqual(summary["skipped"], 0) self.assertEqual(summary["parsing_errors"], 0) self.assertEqual(passing_resources, passed_check_resources) self.assertEqual(failing_resources, failed_check_resources)
def test_summary(self): runner = Runner() current_dir = os.path.dirname(os.path.realpath(__file__)) test_files_dir = current_dir + "/example_LambdaEnvironmentEncryptionSettings" report = runner.run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id])) summary = report.get_summary() for record in report.failed_checks: self.assertEqual(record.check_id, check.id) for record in report.passed_checks: self.assertEqual(record.check_id, check.id) passing_resources = { "AWS::Lambda::Function.EnvAndKey", "AWS::Lambda::Function.NoEnvAndNoKey", "AWS::Serverless::Function.EnvAndKey", "AWS::Serverless::Function.NoEnvAndNoKey", } failing_resources = { "AWS::Lambda::Function.EnvAndNoKey", "AWS::Serverless::Function.EnvAndNoKey", } passed_check_resources = {c.resource for c in report.passed_checks} failed_check_resources = {c.resource for c in report.failed_checks} self.assertEqual(summary['passed'], 4) self.assertEqual(summary['failed'], 2) self.assertEqual(summary['skipped'], 0) self.assertEqual(summary['parsing_errors'], 0) self.assertEqual(passing_resources, passed_check_resources) self.assertEqual(failing_resources, failed_check_resources)
def test_terraform_module_checks_are_performed_even_if_supported_resources_is_omitted( self): check_name = "TF_M_2" from checkov.common.models.enums import CheckResult from checkov.terraform.checks.module.base_module_check import BaseModuleCheck from checkov.terraform.checks.module.registry import module_registry class ModuleCheck(BaseModuleCheck): def __init__(self): name = "Test check" id = check_name categories = [] super().__init__(name=name, id=id, categories=categories) def scan_module_conf(self, conf): return CheckResult.PASSED check = ModuleCheck() current_dir = os.path.dirname(os.path.realpath(__file__)) valid_dir_path = os.path.join(current_dir, "resources/valid_tf_only_module_usage") runner = Runner() result = runner.run(root_folder=valid_dir_path, external_checks_dir=None, runner_filter=RunnerFilter(checks=check_name)) # unregister check for resource in check.supported_resources: module_registry.checks[resource].remove(check) self.assertEqual(len(result.passed_checks), 1) self.assertIn( 'module.some-module', map(lambda record: record.resource, result.passed_checks))
def test(self): test_files_dir = Path(__file__).parent / "example_DataFactoryUsesGitRepository" report = Runner().run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id])) summary = report.get_summary() passing_resources = { "azurerm_data_factory.github", "azurerm_data_factory.vsts", } failing_resources = { "azurerm_data_factory.fail", } passed_check_resources = {c.resource for c in report.passed_checks} failed_check_resources = {c.resource for c in report.failed_checks} self.assertEqual(summary["passed"], 2) self.assertEqual(summary["failed"], 1) self.assertEqual(summary["skipped"], 0) self.assertEqual(summary["parsing_errors"], 0) self.assertEqual(passing_resources, passed_check_resources) self.assertEqual(failing_resources, failed_check_resources)
def test(self): test_files_dir = Path(__file__).parent / "example_RDSClusterIAMAuthentication" report = Runner().run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id])) summary = report.get_summary() passing_resources = { "aws_rds_cluster.enabled", } failing_resources = { "aws_rds_cluster.default", "aws_rds_cluster.disabled", } passed_check_resources = set([c.resource for c in report.passed_checks]) failed_check_resources = set([c.resource for c in report.failed_checks]) self.assertEqual(summary["passed"], 1) self.assertEqual(summary["failed"], 2) self.assertEqual(summary["skipped"], 0) self.assertEqual(summary["parsing_errors"], 0) self.assertEqual(passing_resources, passed_check_resources) self.assertEqual(failing_resources, failed_check_resources)
def validate_conditioned_vertices_from_local_graph(self, root_dir, file_name): true_condition_resources = { 'BucketFnEqualsTrue', 'BucketFnNotTrue', 'BucketFnNotTrueThroughCondition', 'BucketFnAndTrue', 'BucketFnAndTrueWithCondition', 'BucketFnOrTrue', 'BucketFnOrTrueWithCondition' } definitions, _ = create_definitions(root_folder=root_dir, files=None, runner_filter=RunnerFilter()) local_graph = CloudformationLocalGraph(definitions) local_graph.build_graph(render_variables=True) definitions, breadcrumbs = convert_graph_vertices_to_definitions( local_graph.vertices, root_dir) self.assertIsNotNone(definitions) self.assertEqual(len(definitions.items()), 1) test_yaml_definitions = definitions[os.path.join( root_dir, file_name)][TemplateSections.RESOURCES] definitions_set = set(test_yaml_definitions.keys()) self.assertEqual(len(definitions_set), 7) self.assertSetEqual(true_condition_resources, definitions_set)
def test(self): runner = Runner() current_dir = os.path.dirname(os.path.realpath(__file__)) test_files_dir = current_dir + "/example_PolicyNoDSRI" report = runner.run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id])) summary = report.get_summary() passing_resources = { 'panos_security_policy.pass1', 'panos_security_rule_group.pass2', 'panos_security_policy.pass3', 'panos_security_rule_group.pass4', 'panos_security_policy.pass5', 'panos_security_rule_group.pass6', } failing_resources = { 'panos_security_policy.fail1', 'panos_security_rule_group.fail2', 'panos_security_policy.fail3', 'panos_security_rule_group.fail4', } passed_check_resources = set( [c.resource for c in report.passed_checks]) failed_check_resources = set( [c.resource for c in report.failed_checks]) self.assertEqual(summary['passed'], 6) self.assertEqual(summary['failed'], 4) self.assertEqual(summary['skipped'], 0) self.assertEqual(summary['parsing_errors'], 0) self.assertEqual(passing_resources, passed_check_resources) self.assertEqual(failing_resources, failed_check_resources)
def test(self): test_files_dir = Path(__file__).parent / "example_FunctionAppMinTLSVersion" report = Runner().run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id])) summary = report.get_summary() passing_resources = { "azurerm_function_app.pass", "azurerm_function_app.pass2", } failing_resources = { "azurerm_function_app.fail", } passed_check_resources = {c.resource for c in report.passed_checks} failed_check_resources = {c.resource for c in report.failed_checks} self.assertEqual(summary["passed"], 2) self.assertEqual(summary["failed"], 1) self.assertEqual(summary["skipped"], 0) self.assertEqual(summary["parsing_errors"], 0) self.assertEqual(passing_resources, passed_check_resources) self.assertEqual(failing_resources, failed_check_resources)
def test(self): runner = Runner() current_dir = os.path.dirname(os.path.realpath(__file__)) test_files_dir = current_dir + "/example_DataprocPrivateCluster" report = runner.run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id])) summary = report.get_summary() passing_resources = { 'google_dataproc_cluster_iam_binding.pass1', 'google_dataproc_cluster_iam_binding.pass2', 'google_dataproc_cluster_iam_member.pass1', 'google_dataproc_cluster_iam_member.pass2', } failing_resources = { 'google_dataproc_cluster_iam_binding.fail1', 'google_dataproc_cluster_iam_binding.fail2', 'google_dataproc_cluster_iam_binding.fail3', 'google_dataproc_cluster_iam_binding.fail4', 'google_dataproc_cluster_iam_member.fail1', 'google_dataproc_cluster_iam_member.fail2', } passed_check_resources = set( [c.resource for c in report.passed_checks]) failed_check_resources = set( [c.resource for c in report.failed_checks]) self.assertEqual(summary['passed'], 4) self.assertEqual(summary['failed'], 6) self.assertEqual(summary['skipped'], 0) self.assertEqual(summary['parsing_errors'], 0) self.assertEqual(passing_resources, passed_check_resources) self.assertEqual(failing_resources, failed_check_resources)
def test(self): runner = Runner() current_dir = os.path.dirname(os.path.realpath(__file__)) test_files_dir = current_dir + "/example_HealthcheckExists" report = runner.run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id])) summary = report.get_summary() passing_resources = {"/success/Dockerfile.HEALTHCHECK"} failing_resources = {"/failure/Dockerfile."} passed_check_resources = set( [c.resource for c in report.passed_checks]) failed_check_resources = set( [c.resource for c in report.failed_checks]) self.assertEqual(summary["passed"], 1) self.assertEqual(summary["failed"], 1) self.assertEqual(summary["skipped"], 0) self.assertEqual(summary["parsing_errors"], 0) self.assertEqual(passing_resources, passed_check_resources) self.assertEqual(failing_resources, failed_check_resources)
def test(self): runner = Runner() current_dir = os.path.dirname(os.path.realpath(__file__)) test_files_dir = current_dir + "/example_SQSQueuePolicyAnyPrincipal" report = runner.run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id])) summary = report.get_summary() passing_resources = { "aws_sqs_queue_policy.q1", "aws_sqs_queue_policy.q6", "aws_sqs_queue.aq1", "aws_sqs_queue.aq6" } failing_resources = { "aws_sqs_queue_policy.q2", "aws_sqs_queue_policy.q3", "aws_sqs_queue_policy.q4", "aws_sqs_queue_policy.q5", "aws_sqs_queue.aq2", "aws_sqs_queue.aq3", "aws_sqs_queue.aq4", "aws_sqs_queue.aq5", } passed_check_resources = set( [c.resource for c in report.passed_checks]) failed_check_resources = set( [c.resource for c in report.failed_checks]) self.assertEqual(summary["passed"], 4) self.assertEqual(summary["failed"], 8) self.assertEqual(summary["skipped"], 0) self.assertEqual(summary["parsing_errors"], 0) self.assertEqual(passing_resources, passed_check_resources) self.assertEqual(failing_resources, failed_check_resources)
def test_summary(self): test_files_dir = Path(__file__).parent / "example_AppSyncLogging" report = Runner().run(root_folder=str(test_files_dir), runner_filter=RunnerFilter(checks=[check.id])) summary = report.get_summary() passing_resources = { "AWS::AppSync::GraphQLApi.Enabled", } failing_resources = { "AWS::AppSync::GraphQLApi.Default", } passed_check_resources = {c.resource for c in report.passed_checks} failed_check_resources = {c.resource for c in report.failed_checks} self.assertEqual(summary["passed"], 1) self.assertEqual(summary["failed"], 1) self.assertEqual(summary["skipped"], 0) self.assertEqual(summary["parsing_errors"], 0) self.assertEqual(passing_resources, passed_check_resources) self.assertEqual(failing_resources, failed_check_resources)
def test_summary(self): test_files_dir = Path(__file__).parent / "example_LambdaEnvironmentCredentials" report = Runner().run(root_folder=str(test_files_dir), runner_filter=RunnerFilter(checks=[check.id])) summary = report.get_summary() passing_resources = { "AWS::Lambda::Function.NoEnv", "AWS::Lambda::Function.NoSecret", } failing_resources = { "AWS::Lambda::Function.Secret", } passed_check_resources = set([c.resource for c in report.passed_checks]) failed_check_resources = set([c.resource for c in report.failed_checks]) self.assertEqual(summary["passed"], 2) self.assertEqual(summary["failed"], 1) self.assertEqual(summary["skipped"], 0) self.assertEqual(summary["parsing_errors"], 0) self.assertEqual(passing_resources, passed_check_resources) self.assertEqual(failing_resources, failed_check_resources)
def test(self): test_files_dir = Path(__file__).parent / "example_CloudsplainingIAMWrite" report = Runner().run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id])) summary = report.get_summary() passing_resources = { "aws_iam_policy_document.restrictable", "aws_iam_policy_document.unrestrictable", } failing_resources = { "aws_iam_policy_document.fail", } passed_check_resources = set([c.resource for c in report.passed_checks]) failed_check_resources = set([c.resource for c in report.failed_checks]) self.assertEqual(summary["passed"], 2) self.assertEqual(summary["failed"], 1) self.assertEqual(summary["skipped"], 0) self.assertEqual(summary["parsing_errors"], 0) self.assertEqual(passing_resources, passed_check_resources) self.assertEqual(failing_resources, failed_check_resources)
def test(self): # given test_files_dir = Path( __file__ ).parent / "example_GoogleComputeFirewallUnrestrictedIngress21" # when report = Runner().run(root_folder=str(test_files_dir), runner_filter=RunnerFilter(checks=[check.id])) # then summary = report.get_summary() passing_resources = { "google_compute_firewall.restricted", "google_compute_firewall.allow_null", "google_compute_firewall.allow_different_int", } failing_resources = { "google_compute_firewall.allow_multiple", "google_compute_firewall.allow_ftp_int", "google_compute_firewall.allow_all", } passed_check_resources = {c.resource for c in report.passed_checks} failed_check_resources = {c.resource for c in report.failed_checks} self.assertEqual(summary["passed"], 3) self.assertEqual(summary["failed"], 3) self.assertEqual(summary["skipped"], 0) self.assertEqual(summary["parsing_errors"], 0) self.assertEqual(summary["resource_count"], 7) # 1 unknown self.assertEqual(passing_resources, passed_check_resources) self.assertEqual(failing_resources, failed_check_resources)
def test(self): runner = Runner() current_dir = os.path.dirname(os.path.realpath(__file__)) test_files_dir = current_dir + "/test_GKEReleaseChannel" report = runner.run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id])) summary = report.get_summary() passing_resources = {'google_container_cluster.success'} failing_resources = {'google_container_cluster.fail'} passed_check_resources = set( [c.resource for c in report.passed_checks]) failed_check_resources = set( [c.resource for c in report.failed_checks]) self.assertEqual(summary['passed'], 1) self.assertEqual(summary['failed'], 1) self.assertEqual(summary['skipped'], 0) self.assertEqual(summary['parsing_errors'], 0) self.assertEqual(passing_resources, passed_check_resources) self.assertEqual(failing_resources, failed_check_resources)
def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(), collect_skip_comments=True, helmChart=None): report = Report(self.check_type) definitions = {} definitions_raw = {} parsing_errors = {} files_list = [] if external_checks_dir: for directory in external_checks_dir: registry.load_external_checks(directory, runner_filter) if files: for file in files: parse_result = parse(file) if parse_result: (definitions[file], definitions_raw[file]) = parse_result if root_folder: for root, d_names, f_names in os.walk(root_folder): filter_ignored_directories(d_names) for file in f_names: file_ending = os.path.splitext(file)[1] if file_ending in K8_POSSIBLE_ENDINGS: full_path = os.path.join(root, file) if "/." not in full_path and file not in [ 'package.json', 'package-lock.json' ]: # skip temp directories files_list.append(full_path) for file in files_list: relative_file_path = f'/{os.path.relpath(file, os.path.commonprefix((root_folder, file)))}' parse_result = parse(file) if parse_result: (definitions[relative_file_path], definitions_raw[relative_file_path]) = parse_result for k8_file in definitions.keys(): # There are a few cases here. If -f was used, there could be a leading / because it's an absolute path, # or there will be no leading slash; root_folder will always be none. # If -d is used, root_folder will be the value given, and -f will start with a / (hardcoded above). # The goal here is simply to get a valid path to the file (which sls_file does not always give). if k8_file[0] == '/': path_to_convert = (root_folder + k8_file) if root_folder else k8_file else: path_to_convert = (os.path.join( root_folder, k8_file)) if root_folder else k8_file file_abs_path = os.path.abspath(path_to_convert) if definitions[k8_file]: for i in range(len(definitions[k8_file])): if (not 'apiVersion' in definitions[k8_file][i].keys() ) and (not 'kind' in definitions[k8_file][i].keys()): continue logging.debug("Template Dump for {}: {}".format( k8_file, definitions[k8_file][i], indent=2)) entity_conf = definitions[k8_file][i] # Split out resources if entity kind is List if entity_conf["kind"] == "List": for item in entity_conf["items"]: definitions[k8_file].append(item) for i in range(len(definitions[k8_file])): if (not 'apiVersion' in definitions[k8_file][i].keys() ) and (not 'kind' in definitions[k8_file][i].keys()): continue logging.debug("Template Dump for {}: {}".format( k8_file, definitions[k8_file][i], indent=2)) entity_conf = definitions[k8_file][i] if entity_conf["kind"] == "List": continue # Skip entity without metadata["name"] if "metadata" in entity_conf: if isinstance( entity_conf["metadata"], int) or not "name" in entity_conf["metadata"]: continue else: continue # Skip entity with parent (metadata["ownerReferences"]) in runtime # We will alert in runtime only if "ownerReferences" in entity_conf["metadata"] and \ entity_conf["metadata"]["ownerReferences"] is not None: continue # Append containers and initContainers to definitions list for type in ["containers", "initContainers"]: containers = [] if entity_conf["kind"] == "CustomResourceDefinition": continue containers = self._search_deep_keys( type, entity_conf, []) if not containers: continue containers = containers.pop() #containers.insert(0,entity_conf['kind']) containerDef = {} namespace = "" if "namespace" in entity_conf["metadata"]: namespace = entity_conf["metadata"]["namespace"] else: namespace = "default" containerDef["containers"] = containers.pop() if containerDef["containers"] is not None: for cd in containerDef["containers"]: i = containerDef["containers"].index(cd) containerDef["containers"][i][ "apiVersion"] = entity_conf["apiVersion"] containerDef["containers"][i]["kind"] = type containerDef["containers"][i][ "parent"] = "{}.{}.{} (container {})".format( entity_conf["kind"], entity_conf["metadata"]["name"], namespace, str(i)) containerDef["containers"][i][ "parent_metadata"] = entity_conf[ "metadata"] definitions[k8_file].extend( containerDef["containers"]) # Run for each definition included added container definitions for i in range(len(definitions[k8_file])): if (not 'apiVersion' in definitions[k8_file][i].keys() ) and (not 'kind' in definitions[k8_file][i].keys()): continue logging.debug("Template Dump for {}: {}".format( k8_file, definitions[k8_file][i], indent=2)) entity_conf = definitions[k8_file][i] if entity_conf["kind"] == "List": continue if isinstance(entity_conf["kind"], int): continue # Skip entity without metadata["name"] or parent_metadata["name"] if not any(x in entity_conf["kind"] for x in ["containers", "initContainers"]): if "metadata" in entity_conf: if isinstance( entity_conf["metadata"], int ) or not "name" in entity_conf["metadata"]: continue else: continue # Skip entity with parent (metadata["ownerReferences"]) in runtime # We will alert in runtime only if "metadata" in entity_conf: if "ownerReferences" in entity_conf["metadata"] and \ entity_conf["metadata"]["ownerReferences"] is not None: continue # Skip Kustomization Templates (for now) if entity_conf["kind"] == "Kustomization": continue skipped_checks = get_skipped_checks(entity_conf) results = registry.scan(k8_file, entity_conf, skipped_checks, runner_filter) # TODO refactor into context parsing find_lines_result_list = list( find_lines(entity_conf, '__startline__')) start_line = entity_conf["__startline__"] end_line = entity_conf["__endline__"] if start_line == end_line: entity_lines_range = [start_line, end_line] entity_code_lines = definitions_raw[k8_file][ start_line - 1:end_line] else: entity_lines_range = [start_line, end_line - 1] entity_code_lines = definitions_raw[k8_file][ start_line - 1:end_line - 1] # TODO? - Variable Eval Message! variable_evaluations = {} for check, check_result in results.items(): record = Record( check_id=check.id, check_name=check.name, check_result=check_result, code_block=entity_code_lines, file_path=k8_file, file_line_range=entity_lines_range, resource=check.get_resource_id(entity_conf), evaluations=variable_evaluations, check_class=check.__class__.__module__, file_abs_path=file_abs_path) report.add_record(record=record) return report
def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(), collect_skip_comments=True): report = Report(self.check_type) definitions = {} definitions_raw = {} parsing_errors = {} files_list = [] if external_checks_dir: for directory in external_checks_dir: cfn_registry.load_external_checks(directory, runner_filter) if files: for file in files: (definitions[file], definitions_raw[file]) = parse(file) if root_folder: for root, d_names, f_names in os.walk(root_folder): filter_ignored_directories(d_names) for file in f_names: file_ending = os.path.splitext(file)[1] if file_ending in CF_POSSIBLE_ENDINGS: files_list.append(os.path.join(root, file)) for file in files_list: relative_file_path = f'/{os.path.relpath(file, os.path.commonprefix((root_folder, file)))}' try: (definitions[relative_file_path], definitions_raw[relative_file_path]) = parse(file) except TypeError: logging.info( f'CloudFormation skipping {file} as it is not a valid CF template' ) # Filter out empty files that have not been parsed successfully, and filter out non-CF template files definitions = { k: v for k, v in definitions.items() if v and isinstance(v, dict_node) and v.__contains__("Resources") and isinstance(v["Resources"], dict_node) } definitions_raw = { k: v for k, v in definitions_raw.items() if k in definitions.keys() } for cf_file in definitions.keys(): # There are a few cases here. If -f was used, there could be a leading / because it's an absolute path, # or there will be no leading slash; root_folder will always be none. # If -d is used, root_folder will be the value given, and -f will start with a / (hardcoded above). # The goal here is simply to get a valid path to the file (which cf_file does not always give). if cf_file[0] == '/': path_to_convert = (root_folder + cf_file) if root_folder else cf_file else: path_to_convert = (os.path.join( root_folder, cf_file)) if root_folder else cf_file file_abs_path = os.path.abspath(path_to_convert) if isinstance( definitions[cf_file], dict_node) and 'Resources' in definitions[cf_file].keys(): cf_context_parser = ContextParser(cf_file, definitions[cf_file], definitions_raw[cf_file]) logging.debug("Template Dump for {}: {}".format( cf_file, definitions[cf_file], indent=2)) cf_context_parser.evaluate_default_refs() for resource_name, resource in definitions[cf_file][ 'Resources'].items(): resource_id = cf_context_parser.extract_cf_resource_id( resource, resource_name) # check that the resource can be parsed as a CF resource if resource_id: entity_lines_range, entity_code_lines = cf_context_parser.extract_cf_resource_code_lines( resource) if entity_lines_range and entity_code_lines: # TODO - Variable Eval Message! variable_evaluations = {} skipped_checks = ContextParser.collect_skip_comments( entity_code_lines) results = cfn_registry.scan( cf_file, {resource_name: resource}, skipped_checks, runner_filter) for check, check_result in results.items(): record = Record( check_id=check.id, check_name=check.name, check_result=check_result, code_block=entity_code_lines, file_path=cf_file, file_line_range=entity_lines_range, resource=resource_id, evaluations=variable_evaluations, check_class=check.__class__.__module__, file_abs_path=file_abs_path) report.add_record(record=record) return report
def run(self, root_folder=None, external_checks_dir=None, files=None, runner_filter=RunnerFilter(), collect_skip_comments=True): report = Report(self.check_type) files_list = [] filepath_fn = None if external_checks_dir: for directory in external_checks_dir: registry.load_external_checks(directory) if files: files_list = [ file for file in files if Runner._is_docker_file(os.path.basename(file)) ] if root_folder: filepath_fn = lambda f: f'/{os.path.relpath(f, os.path.commonprefix((root_folder, f)))}' for root, d_names, f_names in os.walk(root_folder): filter_ignored_paths(root, d_names, runner_filter.excluded_paths) filter_ignored_paths(root, f_names, runner_filter.excluded_paths) for file in f_names: if Runner._is_docker_file(file): file_path = os.path.join(root, file) files_list.append(file_path) definitions, definitions_raw = get_files_definitions( files_list, filepath_fn) for docker_file_path in definitions.keys(): # There are a few cases here. If -f was used, there could be a leading / because it's an absolute path, # or there will be no leading slash; root_folder will always be none. # If -d is used, root_folder will be the value given, and -f will start with a / (hardcoded above). # The goal here is simply to get a valid path to the file (which docker_file_path does not always give). if docker_file_path[0] == '/': path_to_convert = ( root_folder + docker_file_path) if root_folder else docker_file_path else: path_to_convert = (os.path.join( root_folder, docker_file_path)) if root_folder else docker_file_path file_abs_path = os.path.abspath(path_to_convert) report.add_resource(file_abs_path) skipped_checks = collect_skipped_checks( definitions[docker_file_path]) instructions = definitions[docker_file_path] results = registry.scan(docker_file_path, instructions, skipped_checks, runner_filter) for check, check_result in results.items(): result_configuration = check_result['results_configuration'] startline = 0 endline = len(definitions_raw[docker_file_path]) - 1 result_instruction = "" if result_configuration: startline = result_configuration['startline'] endline = result_configuration['endline'] result_instruction = result_configuration["instruction"] codeblock = [] self.calc_record_codeblock(codeblock, definitions_raw, docker_file_path, endline, startline) record = Record(check_id=check.id, bc_check_id=check.bc_id, check_name=check.name, check_result=check_result, code_block=codeblock, file_path=docker_file_path, file_line_range=[startline + 1, endline + 1], resource="{}.{}".format( docker_file_path, result_instruction, startline), evaluations=None, check_class=check.__class__.__module__, file_abs_path=file_abs_path, entity_tags=None) record.set_guideline(check.guideline) report.add_record(record=record) return report
def run(banner=checkov_banner, argv=sys.argv[1:]): default_config_paths = get_default_config_paths(sys.argv[1:]) parser = ExtArgumentParser( description='Infrastructure as code static analysis', default_config_files=default_config_paths, config_file_parser_class=configargparse.YAMLConfigFileParser, add_env_var_help=True) add_parser_args(parser) config = parser.parse_args(argv) # bridgecrew uses both the urllib3 and requests libraries, while checkov uses the requests library. # Allow the user to specify a CA bundle to be used by both libraries. bc_integration.setup_http_manager(config.ca_certificate) # if a repo is passed in it'll save it. Otherwise a default will be created based on the file or dir config.repo_id = bc_integration.persist_repo_id(config) # if a bc_api_key is passed it'll save it. Otherwise it will check ~/.bridgecrew/credentials config.bc_api_key = bc_integration.persist_bc_api_key(config) excluded_paths = config.skip_path or [] runner_filter = RunnerFilter( framework=config.framework, skip_framework=config.skip_framework, checks=config.check, skip_checks=config.skip_check, download_external_modules=convert_str_to_bool( config.download_external_modules), external_modules_download_path=config.external_modules_download_path, evaluate_variables=convert_str_to_bool(config.evaluate_variables), runners=checkov_runners, excluded_paths=excluded_paths, all_external=config.run_all_external_checks) if outer_registry: runner_registry = outer_registry runner_registry.runner_filter = runner_filter else: runner_registry = RunnerRegistry(banner, runner_filter, *DEFAULT_RUNNERS) runnerDependencyHandler = RunnerDependencyHandler(runner_registry) runnerDependencyHandler.validate_runner_deps() if config.show_config: print(parser.format_values()) return if config.bc_api_key == '': parser.error( 'The --bc-api-key flag was specified but the value was blank. If this value was passed as a secret, ' 'you may need to double check the mapping.') elif config.bc_api_key: logger.debug(f'Using API key ending with {config.bc_api_key[-8:]}') if config.repo_id is None: parser.error( "--repo-id argument is required when using --bc-api-key") if len(config.repo_id.split('/')) != 2: parser.error( "--repo-id argument format should be 'organization/repository_name' E.g " "bridgecrewio/checkov") source = os.getenv('BC_SOURCE', 'cli') source_version = os.getenv('BC_SOURCE_VERSION', version) logger.debug(f'BC_SOURCE = {source}, version = {source_version}') try: bc_integration.setup_bridgecrew_credentials( bc_api_key=config.bc_api_key, repo_id=config.repo_id, skip_fixes=config.skip_fixes, skip_suppressions=config.skip_suppressions, skip_policy_download=config.skip_policy_download, source=source, source_version=source_version, repo_branch=config.branch) platform_excluded_paths = bc_integration.get_excluded_paths() or [] runner_filter.excluded_paths = runner_filter.excluded_paths + platform_excluded_paths except Exception as e: logger.error( 'An error occurred setting up the Bridgecrew platform integration. Please check your API token' ' and try again.', exc_info=True) return else: logger.debug('No API key found. Scanning locally only.') guidelines = {} if not config.no_guide: guidelines = bc_integration.get_guidelines() if config.check and config.skip_check: if any(item in runner_filter.checks for item in runner_filter.skip_checks): parser.error( "The check ids specified for '--check' and '--skip-check' must be mutually exclusive." ) return integration_feature_registry.run_pre_scan() if config.list: print_checks(framework=config.framework) return baseline = None if config.baseline: baseline = Baseline() baseline.from_json(config.baseline) external_checks_dir = get_external_checks_dir(config) url = None created_baseline_path = None if config.directory: exit_codes = [] for root_folder in config.directory: file = config.file scan_reports = runner_registry.run( root_folder=root_folder, external_checks_dir=external_checks_dir, files=file, guidelines=guidelines) if baseline: baseline.compare_and_reduce_reports(scan_reports) if bc_integration.is_integration_configured(): bc_integration.persist_repository( root_folder, excluded_paths=runner_filter.excluded_paths) bc_integration.persist_scan_results(scan_reports) url = bc_integration.commit_repository(config.branch) if config.create_baseline: overall_baseline = Baseline() for report in scan_reports: overall_baseline.add_findings_from_report(report) created_baseline_path = os.path.join( os.path.abspath(root_folder), '.checkov.baseline') with open(created_baseline_path, 'w') as f: json.dump(overall_baseline.to_dict(), f, indent=4) exit_codes.append( runner_registry.print_reports( scan_reports, config, url=url, created_baseline_path=created_baseline_path, baseline=baseline)) exit_code = 1 if 1 in exit_codes else 0 return exit_code elif config.file: scan_reports = runner_registry.run( external_checks_dir=external_checks_dir, files=config.file, guidelines=guidelines, repo_root_for_plan_enrichment=config.repo_root_for_plan_enrichment) if baseline: baseline.compare_and_reduce_reports(scan_reports) if config.create_baseline: overall_baseline = Baseline() for report in scan_reports: overall_baseline.add_findings_from_report(report) created_baseline_path = os.path.join( os.path.abspath(os.path.commonprefix(config.file)), '.checkov.baseline') with open(created_baseline_path, 'w') as f: json.dump(overall_baseline.to_dict(), f, indent=4) if bc_integration.is_integration_configured(): files = [os.path.abspath(file) for file in config.file] root_folder = os.path.split(os.path.commonprefix(files))[0] bc_integration.persist_repository( root_folder, files, excluded_paths=runner_filter.excluded_paths) bc_integration.persist_scan_results(scan_reports) url = bc_integration.commit_repository(config.branch) return runner_registry.print_reports( scan_reports, config, url=url, created_baseline_path=created_baseline_path, baseline=baseline) elif config.docker_image: if config.bc_api_key is None: parser.error( "--bc-api-key argument is required when using --docker-image") return if config.dockerfile_path is None: parser.error( "--dockerfile-path argument is required when using --docker-image" ) return if config.branch is None: parser.error( "--branch argument is required when using --docker-image") return bc_integration.commit_repository(config.branch) image_scanner.scan(config.docker_image, config.dockerfile_path) else: print(f"{banner}") bc_integration.onboarding()
def run(banner=checkov_banner, argv=sys.argv[1:]): parser = argparse.ArgumentParser( description='Infrastructure as code static analysis') add_parser_args(parser) args = parser.parse_args(argv) runner_filter = RunnerFilter( framework=args.framework, checks=args.check, skip_checks=args.skip_check, download_external_modules=convert_str_to_bool( args.download_external_modules), external_modules_download_path=args.external_modules_download_path, evaluate_variables=convert_str_to_bool(args.evaluate_variables)) if outer_registry: runner_registry = outer_registry runner_registry.runner_filter = runner_filter else: runner_registry = RunnerRegistry(banner, runner_filter, tf_runner(), cfn_runner(), k8_runner(), sls_runner(), arm_runner(), tf_plan_runner()) if args.version: print(version) return if args.bc_api_key: if args.repo_id is None: parser.error( "--repo-id argument is required when using --bc-api-key") if len(args.repo_id.split('/')) != 2: parser.error( "--repo-id argument format should be 'organization/repository_name' E.g " "bridgecrewio/checkov") bc_integration.setup_bridgecrew_credentials(bc_api_key=args.bc_api_key, repo_id=args.repo_id) guidelines = {} if not args.no_guide: guidelines = bc_integration.get_guidelines() if args.check and args.skip_check: parser.error( "--check and --skip-check can not be applied together. please use only one of them" ) return if args.list: print_checks(framework=args.framework) return external_checks_dir = get_external_checks_dir(args) if args.directory: for root_folder in args.directory: file = args.file scan_reports = runner_registry.run( root_folder=root_folder, external_checks_dir=external_checks_dir, files=file, guidelines=guidelines) if bc_integration.is_integration_configured(): bc_integration.persist_repository(root_folder) bc_integration.persist_scan_results(scan_reports) bc_integration.commit_repository(args.branch) runner_registry.print_reports(scan_reports, args) return elif args.file: scan_reports = runner_registry.run( external_checks_dir=external_checks_dir, files=args.file, guidelines=guidelines) if bc_integration.is_integration_configured(): files = [os.path.abspath(file) for file in args.file] root_folder = os.path.split(os.path.commonprefix(files))[0] bc_integration.persist_repository(root_folder) bc_integration.persist_scan_results(scan_reports) bc_integration.commit_repository(args.branch) runner_registry.print_reports(scan_reports, args) else: print(f"{banner}") bc_integration.onboarding()
def run( self, root_folder: str, external_checks_dir: Optional[List[str]] = None, files: Optional[List[str]] = None, runner_filter: RunnerFilter = RunnerFilter(), collect_skip_comments: bool = True, ) -> Report: report = Report(self.check_type) files_list = [] filepath_fn = None if external_checks_dir: for directory in external_checks_dir: arm_resource_registry.load_external_checks(directory) if files: files_list = files.copy() if root_folder: filepath_fn = lambda f: f'/{os.path.relpath(f, os.path.commonprefix((root_folder, f)))}' for root, d_names, f_names in os.walk(root_folder): filter_ignored_paths(root, d_names, runner_filter.excluded_paths) filter_ignored_paths(root, f_names, runner_filter.excluded_paths) for file in f_names: file_ending = os.path.splitext(file)[1] if file_ending in ARM_POSSIBLE_ENDINGS: files_list.append(os.path.join(root, file)) definitions, definitions_raw = get_files_definitions( files_list, filepath_fn) # Filter out empty files that have not been parsed successfully, and filter out non-CF template files definitions = { k: v for k, v in definitions.items() if v and v.__contains__("resources") } definitions_raw = { k: v for k, v in definitions_raw.items() if k in definitions.keys() } for arm_file in definitions.keys(): # There are a few cases here. If -f was used, there could be a leading / because it's an absolute path, # or there will be no leading slash; root_folder will always be none. # If -d is used, root_folder will be the value given, and -f will start with a / (hardcoded above). # The goal here is simply to get a valid path to the file (which arm_file does not always give). if arm_file[0] == '/': path_to_convert = (root_folder + arm_file) if root_folder else arm_file else: path_to_convert = (os.path.join( root_folder, arm_file)) if root_folder else arm_file file_abs_path = os.path.abspath(path_to_convert) if isinstance(definitions[arm_file], DictNode): arm_context_parser = ContextParser(arm_file, definitions[arm_file], definitions_raw[arm_file]) logging.debug( f"Template Dump for {arm_file}: {definitions[arm_file]}") if 'resources' in definitions[arm_file].keys(): arm_context_parser.evaluate_default_parameters() # Split out nested resources from base resource for resource in definitions[arm_file]['resources']: if isinstance( resource, dict) and "parent_name" in resource.keys(): continue nested_resources = [] nested_resources = arm_context_parser.search_deep_keys( "resources", resource, []) if nested_resources: for nr in nested_resources: nr_element = nr.pop() if nr_element: for element in nr_element: new_resource = {} new_resource = element if isinstance(new_resource, dict): new_resource[ "parent_name"] = resource[ "name"] new_resource[ "parent_type"] = resource[ "type"] definitions[arm_file][ 'resources'].append( new_resource) for resource in definitions[arm_file]['resources']: resource_id = arm_context_parser.extract_arm_resource_id( resource) report.add_resource(f'{arm_file}:{resource_id}') resource_name = arm_context_parser.extract_arm_resource_name( resource) entity_lines_range, entity_code_lines = arm_context_parser.extract_arm_resource_code_lines( resource) if entity_lines_range and entity_code_lines: # TODO - Variable Eval Message! variable_evaluations = {} skipped_checks = ContextParser.collect_skip_comments( resource) results = arm_resource_registry.scan( arm_file, {resource_name: resource}, skipped_checks, runner_filter) for check, check_result in results.items(): record = Record( check_id=check.id, bc_check_id=check.bc_id, check_name=check.name, check_result=check_result, code_block=entity_code_lines, file_path=arm_file, file_line_range=entity_lines_range, resource=resource_id, evaluations=variable_evaluations, check_class=check.__class__.__module__, file_abs_path=file_abs_path) record.set_guideline(check.guideline) report.add_record(record=record) if 'parameters' in definitions[arm_file].keys(): parameters = definitions[arm_file]['parameters'] for parameter_name, parameter_details in parameters.items( ): # TODO - Variable Eval Message! variable_evaluations = {} resource_id = f'parameter.{parameter_name}' resource_name = parameter_name entity_lines_range, entity_code_lines = arm_context_parser.extract_arm_resource_code_lines( parameter_details) if entity_lines_range and entity_code_lines: skipped_checks = ContextParser.collect_skip_comments( parameter_details) results = arm_parameter_registry.scan( arm_file, {resource_name: parameter_details}, skipped_checks, runner_filter) for check, check_result in results.items(): record = Record( check_id=check.id, bc_check_id=check.bc_id, check_name=check.name, check_result=check_result, code_block=entity_code_lines, file_path=arm_file, file_line_range=entity_lines_range, resource=resource_id, evaluations=variable_evaluations, check_class=check.__class__.__module__, file_abs_path=file_abs_path) record.set_guideline(check.guideline) report.add_record(record=record) return report