def scan( self, scanned_file: str, entity: Dict[str, Any], skipped_checks: List[_SkippedCheck], runner_filter: RunnerFilter, ) -> Dict[BaseCheck, Dict[str, Any]]: (entity_type, entity_name, entity_configuration) = self.extract_entity_details(entity) results: Dict[BaseCheck, Dict[str, Any]] = {} if not isinstance(entity_configuration, dict): return results checks = self.get_checks(entity_type) for check in checks: skip_info: _SkippedCheck = {} if skipped_checks: if check.id in [x["id"] for x in skipped_checks]: skip_info = [ x for x in skipped_checks if x["id"] == check.id ][0] if runner_filter.should_run_check(check.id, check.bc_id): result = self.run_check(check, entity_configuration, entity_name, entity_type, scanned_file, skip_info) results[check] = result return results
def search_for_suppression( check_id: str, bc_check_id: str, secret: PotentialSecret, runner_filter: RunnerFilter) -> Optional[_CheckResult]: if not runner_filter.should_run_check( check_id, bc_check_id) and check_id in CHECK_ID_TO_SECRET_TYPE.keys(): return { "result": CheckResult.SKIPPED, "suppress_comment": f"Secret scan {check_id} is skipped" } # Check for suppression comment in the line before, the line of, and the line after the secret for line_number in [ secret.line_number, secret.line_number - 1, secret.line_number + 1 ]: lt = linecache.getline(secret.filename, line_number) skip_search = re.search(COMMENT_REGEX, lt) if skip_search and (skip_search.group(2) == check_id or skip_search.group(2) == bc_check_id): return { "result": CheckResult.SKIPPED, "suppress_comment": skip_search.group(3)[1:] if skip_search.group(3) else "No comment provided" } return None
def run_checks(self, graph_connector, runner_filter: RunnerFilter): check_results = {} for check in self.checks: if runner_filter.checks and check.id not in runner_filter.checks \ or runner_filter.skip_checks and not runner_filter.should_run_check(check.id): continue passed, failed = check.run(graph_connector) check_result = self._process_check_result(passed, [], CheckResult.PASSED) check_result = self._process_check_result(failed, check_result, CheckResult.FAILED) check_results[check] = check_result return check_results
def run_checks( self, graph_connector: DiGraph, runner_filter: RunnerFilter ) -> Dict[BaseGraphCheck, List[Dict[str, Any]]]: check_results = {} for check in self.checks: if not runner_filter.should_run_check(check.id, check.bc_id): continue logging.debug(f'Running graph check: {check.id}') passed, failed = check.run(graph_connector) evaluated_keys = check.get_evaluated_keys() check_result = self._process_check_result(passed, [], CheckResult.PASSED, evaluated_keys) check_result = self._process_check_result(failed, check_result, CheckResult.FAILED, evaluated_keys) check_results[check] = check_result return check_results
def run_checks( self, graph_connector: DiGraph, runner_filter: RunnerFilter ) -> Dict[BaseGraphCheck, List[Dict[str, Any]]]: check_results = {} checks_to_run = [ c for c in self.checks if runner_filter.should_run_check(c.id, c.bc_id) ] with concurrent.futures.ThreadPoolExecutor() as executor: concurrent.futures.wait([ executor.submit(self.run_check_parallel, check, check_results, graph_connector) for check in checks_to_run ]) return check_results
def run( self, root_folder: Union[str, Path], external_checks_dir: Optional[List[str]] = None, files: Optional[List[str]] = None, runner_filter: RunnerFilter = RunnerFilter(), collect_skip_comments: bool = True, ) -> Report: report = Report(self.check_type) scan_results = self.prepare_and_scan(root_folder, files, runner_filter) if scan_results is None: return report for result in scan_results: package_file_path = Path(result["repository"]) try: package_file_path = package_file_path.relative_to( self._code_repo_path) except ValueError: # Path.is_relative_to() was implemented in Python 3.9 pass vulnerabilities = result.get("vulnerabilities") or [] rootless_file_path = str(package_file_path).replace( package_file_path.anchor, "", 1) for vulnerability in vulnerabilities: record = create_report_record( rootless_file_path=rootless_file_path, file_abs_path=result["repository"], check_class=self._check_class, vulnerability_details=vulnerability, runner_filter=runner_filter) if not runner_filter.should_run_check(record.check_id, record.bc_check_id): if runner_filter.checks: continue else: record.check_result = { "result": CheckResult.SKIPPED, "suppress_comment": f"{vulnerability['id']} is skipped" } report.add_resource(record.resource) report.add_record(record) return report
def run_checks( self, graph_connector: DiGraph, runner_filter: RunnerFilter ) -> Dict[BaseGraphCheck, List[Dict[str, Any]]]: check_results = {} for check in self.checks: if (runner_filter.checks and check.id not in runner_filter.checks ) or (runner_filter.skip_checks and not runner_filter.should_run_check(check.id)): continue passed, failed = check.run(graph_connector) check_result = self._process_check_result(passed, [], CheckResult.PASSED) check_result = self._process_check_result(failed, check_result, CheckResult.FAILED) check_results[check] = check_result return check_results
def test_should_run_specific_disable_AND_enable(self): instance = RunnerFilter(checks=["CHECK_1"], skip_checks=["CHECK_1"]) self.assertTrue(instance.should_run_check("CHECK_1"))
def run(self, root_folder: str, external_checks_dir: Optional[List[str]] = None, files: Optional[List[str]] = None, runner_filter: RunnerFilter = RunnerFilter(), collect_skip_comments: bool = True) -> Report: current_dir = os.path.dirname(os.path.realpath(__file__)) secrets = SecretsCollection() with transient_settings({ # Only run scans with only these plugins. 'plugins_used': [{ 'name': 'AWSKeyDetector' }, { 'name': 'ArtifactoryDetector' }, { 'name': 'AzureStorageKeyDetector' }, { 'name': 'BasicAuthDetector' }, { 'name': 'CloudantDetector' }, { 'name': 'IbmCloudIamDetector' }, { 'name': 'MailchimpDetector' }, { 'name': 'PrivateKeyDetector' }, { 'name': 'SlackDetector' }, { 'name': 'SoftlayerDetector' }, { 'name': 'SquareOAuthDetector' }, { 'name': 'StripeDetector' }, { 'name': 'TwilioKeyDetector' }, { 'name': 'EntropyKeywordCombinator', 'path': f'file://{current_dir}/plugins/entropy_keyword_combinator.py', 'limit': ENTROPY_KEYWORD_LIMIT }] }) as settings: report = Report(self.check_type) # Implement non IaC files (including .terraform dir) files_to_scan = files or [] excluded_paths = ( runner_filter.excluded_paths or []) + ignored_directories + [DEFAULT_EXTERNAL_MODULES_DIR] if root_folder: for root, d_names, f_names in os.walk(root_folder): filter_ignored_paths(root, d_names, excluded_paths) filter_ignored_paths(root, f_names, excluded_paths) for file in f_names: if file not in PROHIBITED_FILES and f".{file.split('.')[-1]}" in SUPPORTED_FILE_EXTENSIONS: files_to_scan.append(os.path.join(root, file)) logging.info( f'Secrets scanning will scan {len(files_to_scan)} files') settings.disable_filters( *['detect_secrets.filters.heuristic.is_indirect_reference']) Runner._scan_files(files_to_scan, secrets) for _, secret in iter(secrets): check_id = SECRET_TYPE_TO_ID.get(secret.type) bc_check_id = bc_integration.ckv_to_bc_id_mapping.get( check_id) if bc_integration.ckv_to_bc_id_mapping else None if not check_id: continue if runner_filter.checks and not runner_filter.should_run_check( check_id, bc_check_id): continue result: _CheckResult = {'result': CheckResult.FAILED} line_text = linecache.getline(secret.filename, secret.line_number) if line_text != "" and len(line_text.split( )) > 0 and line_text.split()[0] == 'git_commit': continue result = self.search_for_suppression( check_id=check_id, bc_check_id=bc_check_id, secret=secret, runner_filter=runner_filter, ) or result report.add_resource(f'{secret.filename}:{secret.secret_hash}') report.add_record( Record(check_id=check_id, bc_check_id=bc_check_id, check_name=secret.type, check_result=result, code_block=[(secret.line_number, line_text)], file_path= f'/{os.path.relpath(secret.filename, root_folder)}', file_line_range=[ secret.line_number, secret.line_number + 1 ], resource=secret.secret_hash, check_class=None, evaluations=None, file_abs_path=os.path.abspath(secret.filename))) return report
def test_should_run_external3(self): instance = RunnerFilter(checks=["EXT_CHECK_999"]) instance.notify_external_check("EXT_CHECK_999") self.assertTrue(instance.should_run_check("EXT_CHECK_999"))
def test_should_run_external_disabled(self): instance = RunnerFilter(skip_checks=["CHECK_1", "EXT_CHECK_999"]) instance.notify_external_check("EXT_CHECK_999") self.assertFalse(instance.should_run_check("EXT_CHECK_999"))
def test_should_run_specific_disable(self): instance = RunnerFilter(skip_checks=["CHECK_1"]) self.assertFalse(instance.should_run_check("CHECK_1"))
def test_should_run_omitted_specific_disable(self): instance = RunnerFilter(skip_checks=["CHECK_1"]) self.assertTrue(instance.should_run_check("CHECK_999"))
def test_should_run_default(self): instance = RunnerFilter() self.assertTrue(instance.should_run_check("CHECK_1"))
def test_should_run_omitted_specific_enable(self): instance = RunnerFilter(checks=["CHECK_1"]) self.assertFalse(instance.should_run_check("CHECK_999"))
def test_should_run_omitted_wildcard2_bc_id(self): instance = RunnerFilter(skip_checks=["BC_CHECK_AWS*"]) self.assertFalse( instance.should_run_check("CHECK_AWS_909", "BC_CHECK_AWS_909"))
def test_should_run_omitted_wildcard(self): instance = RunnerFilter(skip_checks=["CHECK_AWS*"]) self.assertTrue(instance.should_run_check("CHECK_999"))
def test_should_run_external4(self): instance = RunnerFilter(checks=["CHECK_1"], skip_checks=["CHECK_2"], all_external=True) instance.notify_external_check("EXT_CHECK_999") self.assertTrue(instance.should_run_check("EXT_CHECK_999"))
def test_should_run_wildcard_enable_bc(self): instance = RunnerFilter(checks=["BC_CHECK_*"]) self.assertTrue(instance.should_run_check("CHECK_1", "BC_CHECK_1"))
def test_should_run_omitted_wildcard4(self): instance = RunnerFilter(skip_checks=["CHECK_AWS*", "CHECK_AZURE_01"]) self.assertFalse(instance.should_run_check("CHECK_AZURE_01"))