def test_collect_widgets_dependencies(self, id_set): """ Given - A mapper entry in the id_set. When - Building dependency graph for pack. Then - Extracting the packs that the mapper depends on. """ expected_result = {('CommonScripts', True)} test_input = [ { "Dummy_widget": { "name": "Dummy Widget", "fromversion": "5.0.0", "pack": "dummy_pack", "scripts": [ "AssignAnalystToIncident" ] } } ] found_result = PackDependencies._collect_widget_dependencies( pack_widgets=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
def test_collect_detection_of_optional_dependencies_in_playbooks( self, integration_command, id_set): test_input = [{ "Dummy Playbook": { "name": "Dummy Playbook", "file_path": "dummy_path", "fromversion": "dummy_version", "implementing_scripts": [], "implementing_playbooks": [], "command_to_integration": { integration_command: "" }, "tests": ["dummy_playbook"], "pack": "dummy_pack" } }] found_result_set = PackDependencies._collect_playbooks_dependencies( pack_playbooks=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert len(found_result_set) > 0 for found_result in found_result_set: assert not found_result[ 1] # validate that mandatory is set to False
def test_build_dependency_graph_include_ignored_content(self, id_set): """ Given - A pack name which depends on unsupported content. When - Building dependency graph for pack. Then - Extracting the pack dependencies with unsupported content. """ pack_name = "ImpossibleTraveler" found_graph = PackDependencies.build_dependency_graph( pack_id=pack_name, id_set=id_set, verbose_file=VerboseFile(), exclude_ignored_dependencies=False) root_of_graph = [ n for n in found_graph.nodes if found_graph.in_degree(n) == 0 ][0] pack_dependencies = [ n for n in found_graph.nodes if found_graph.in_degree(n) > 0 ] assert root_of_graph == pack_name assert len(pack_dependencies) > 0 assert 'NonSupported' in pack_dependencies
def test_collect_playbooks_dependencies_on_incident_fields(self, id_set): expected_result = {("DigitalGuardian", True), ("EmployeeOffboarding", True)} test_input = [{ "Dummy Playbook": { "name": "Dummy Playbook", "file_path": "dummy_path", "fromversion": "dummy_version", "implementing_scripts": [], "implementing_playbooks": [], "command_to_integration": {}, "tests": ["dummy_playbook"], "pack": "dummy_pack", "incident_fields": ["digitalguardianusername", "Google Display Name"] } }] found_result = PackDependencies._collect_playbooks_dependencies( pack_playbooks=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
def test_collect_scripts_depends_on_with_two_inputs(self, id_set): expected_result = {('Active_Directory_Query', True), ('Feedsslabusech', True)} test_input = [{ "DummyScript1": { "name": "DummyScript1", "file_path": "dummy_path1", "depends_on": ["sslbl-get-indicators"], "pack": "dummy_pack" } }, { "DummyScript2": { "name": "DummyScript2", "file_path": "dummy_path1", "depends_on": ["ad-get-user"], "pack": "dummy_pack" } }] found_result = PackDependencies._collect_scripts_dependencies( pack_scripts=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
def test_collect_integration_dependencies(self, id_set): """ Given - An integration entry in the id_set. When - Building dependency graph for pack. Then - Extracting the packs that the integration depends on. """ expected_result = {("HelloWorld", True), ("Claroty", True), ("EWS", True), ("CrisisManagement", True), ("CommonTypes", True)} test_input = [{ "Dummy Integration": { "name": "Dummy Integration", "fromversion": "5.0.0", "pack": "dummy_pack", "classifiers": "HelloWorld", "mappers": ["Claroty-mapper", "EWS v2-mapper"], "incident_types": "HR Ticket", "indicator_fields": "CommonTypes", } }] found_result = PackDependencies._collect_integrations_dependencies( pack_integrations=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
def test_collect_classifier_dependencies(self, id_set): """ Given - A classifier entry in the id_set. When - Building dependency graph for pack. Then - Extracting the packs that the classifier depends on. """ expected_result = {("Claroty", True), ("PAN-OS", True), ("Logzio", True)} test_input = [{ "Dummy Classifier": { "name": "Dummy Classifier", "fromversion": "5.0.0", "pack": "dummy_pack", "incident_types": [ "Claroty Integrity Incident", "FirewallUpgrade", "Logz.io Alert" ], } }] found_result = PackDependencies._collect_classifiers_dependencies( pack_classifiers=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
def test_collect_incident_type_dependencies(self, id_set): """ Given - An incident type entry in the id_set. When - Building dependency graph for pack. Then - Extracting the packs that the incident type depends on. """ expected_result = {("AutoFocus", True), ("Volatility", True)} test_input = [{ "Dummy Incident Type": { "name": "Dummy Incident Type", "fromversion": "5.0.0", "pack": "dummy_pack", "playbooks": "Autofocus Query Samples, Sessions and Tags", "scripts": "AnalyzeMemImage" } }] found_result = PackDependencies._collect_incidents_types_dependencies( pack_incidents_types=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
def test_collect_scripts_command_to_integration(self, id_set): """ Given - A script entry in the id_set containing command_to_integration. When - Building dependency graph for pack. Then - Extracting the pack that the script depends on. - Should recognize the pack. """ expected_result = {('Active_Directory_Query', True)} test_input = [ { "DummyScript": { "name": "ADGetUser", "file_path": "Packs/Active_Directory_Query/Scripts/script-ADGetUser.yml", "depends_on": [ ], "command_to_integration": { "ad-search": "activedir" }, "pack": "Active_Directory_Query" } } ] found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
def test_collect_playbooks_dependencies_on_integrations(self, integration_command, expected_result, id_set): test_input = [ { "Dummy Playbook": { "name": "Dummy Playbook", "file_path": "dummy_path", "fromversion": "dummy_version", "implementing_scripts": [ ], "implementing_playbooks": [ ], "command_to_integration": { integration_command: "" }, "tests": [ "dummy_playbook" ], "pack": "dummy_pack" } } ] found_result = PackDependencies._collect_playbooks_dependencies(pack_playbooks=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
def test_collect_playbooks_dependencies_on_integrations_with_brand( self, id_set): command = "ip" pack_name = "ipinfo" test_input = [{ "Dummy Playbook": { "name": "Dummy Playbook", "file_path": "dummy_path", "fromversion": "dummy_version", "implementing_scripts": [], "implementing_playbooks": [], "command_to_integration": { command: pack_name }, "tests": ["dummy_playbook"], "pack": "dummy_pack" } }] found_result_set = PackDependencies._collect_playbooks_dependencies( pack_playbooks=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert len(found_result_set) == 1 found_result = found_result_set.pop() assert found_result[0] == pack_name assert found_result[1]
def test_collect_scripts_depends_on_two_integrations(self, id_set): """ Given - A script entry in the id_set depending on 2 integrations. When - Building dependency graph for pack. Then - Extracting the packs that the script depends on. - Should recognize both packs. """ expected_result = {('Active_Directory_Query', True), ('Feedsslabusech', True)} test_input = [ { "DummyScript": { "name": "DummyScript", "file_path": "dummy_path", "depends_on": [ "sslbl-get-indicators", "ad-get-user" ], "pack": "dummy_pack" } } ] found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
def test_collect_scripts__filter_toversion(self, id_set): """ Given - A script entry in the id_set depending on QRadar command. When - Building dependency graph for pack. Then - Extracting the packs that the script depends on. - Should ignore the Deprecated pack due to toversion settings of old QRadar integration. """ expected_result = {('QRadar', True)} test_input = [ { "DummyScript": { "name": "DummyScript", "file_path": "dummy_path", "depends_on": [ "qradar-searches", ], "pack": "dummy_pack" } } ] found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input, id_set=id_set, verbose_file=VerboseFile(), exclude_ignored_dependencies=False ) assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
def test_collect_scripts_depends_on_two_scripts(self, id_set): """ Given - A script entry in the id_set depending on 2 scripts. When - Building dependency graph for pack. Then - Extracting the packs that the script depends on. - Should recognize both packs. """ expected_result = {('HelloWorld', True), ('PrismaCloudCompute', True)} test_input = [ { "DummyScript": { "name": "DummyScript", "file_path": "dummy_path", "depends_on": [ "PrismaCloudComputeParseAuditAlert", "HelloWorldScript" ], "pack": "dummy_pack" } } ] found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
def test_collect_scripts_depends_on_integration(self, dependency_integration_command, expected_result, id_set): """ Given - A script entry in the id_set depending on integration commands. When - Building dependency graph for pack. Then - Extracting the packs that the script depends on. - Should recognize the pack. """ test_input = [ { "DummyScript": { "name": "DummyScript", "file_path": "dummy_path", "depends_on": [ dependency_integration_command ], "pack": "dummy_pack" } } ] found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
def main(): """ Main function for iterating over existing packs folder in content repo and creating json of all packs dependencies. The logic of pack dependency is identical to sdk find-dependencies command. """ option = option_handler() output_path = option.output_path id_set_path = option.id_set_path IGNORED_FILES.append( GCPConfig.BASE_PACK) # skip dependency calculation of Base pack # loading id set json with open(id_set_path, 'r') as id_set_file: id_set = json.load(id_set_file) pack_dependencies_result = {} print("Starting dependencies calculation") # starting iteration over pack folders for pack in os.scandir(PACKS_FULL_PATH): if not pack.is_dir() or pack.name in IGNORED_FILES: print_warning( f"Skipping dependency calculation of {pack.name} pack.") continue # skipping ignored packs print(f"Calculating {pack.name} pack dependencies.") try: dependency_graph = PackDependencies.build_dependency_graph( pack_id=pack.name, id_set=id_set, verbose_file=VerboseFile(''), ) first_level_dependencies, all_level_dependencies = parse_for_pack_metadata( dependency_graph, pack.name) except Exception as e: print_error( f"Failed calculating {pack.name} pack dependencies. Additional info:\n{e}" ) continue pack_dependencies_result[pack.name] = { "dependencies": first_level_dependencies, "displayedImages": list(first_level_dependencies.keys()), "allLevelDependencies": all_level_dependencies, "path": os.path.join(PACKS_FOLDER, pack.name), "fullPath": pack.path } print( f"Number of created pack dependencies entries: {len(pack_dependencies_result.keys())}" ) # finished iteration over pack folders print_color("Finished dependencies calculation", LOG_COLORS.GREEN) with open(output_path, 'w') as pack_dependencies_file: json.dump(pack_dependencies_result, pack_dependencies_file, indent=4) print_color(f"Created packs dependencies file at: {output_path}", LOG_COLORS.GREEN)
def test_build_dependency_graph(self, id_set): pack_name = "ImpossibleTraveler" found_graph = PackDependencies.build_dependency_graph(pack_id=pack_name, id_set=id_set, verbose_file=VerboseFile(), ) root_of_graph = [n for n in found_graph.nodes if found_graph.in_degree(n) == 0][0] pack_dependencies = [n for n in found_graph.nodes if found_graph.in_degree(n) > 0] assert root_of_graph == pack_name assert len(pack_dependencies) > 0
def test_collect_scripts_command_to_integrations_and_script_executions(self, id_set): """ Given - A script entry in the id_set containing command_to_integrations with a reputation command and script_executions. When - Building dependency graph for pack. Then - Extracting the packs that the script depends on. - Should recognize the mandatory pack and the non mandatory packs. """ expected_result = { ('Active_Directory_Query', True), ('Recorded_Future', False), ('illuminate', False), ('ThreatQ', False), ('Anomali_ThreatStream', False), ('URLHaus', False), ('Symantec_Deepsight', False), ('XForceExchange', False), ('Active_Directory_Query', True), ('XFE', False), ('MISP', False), ('AlienVault_OTX', False), ('ThreatMiner', False), ('isight', False), ('CrowdStrikeIntel', False), ('ReversingLabs_A1000', False), ('PolySwarm', False), ('TruSTAR', False), ('ReversingLabs_Titanium_Cloud', False), ('ThreatExchange', False), ('EclecticIQ', False), ('AutoFocus', False), ('McAfee-TIE', False), ('Maltiverse', False), ('Palo_Alto_Networks_WildFire', False), ('Polygon', False), ('Cofense-Intelligence', False), ('Lastline', False), ('ThreatConnect', False), ('VirusTotal', False), ('Flashpoint', False) } test_input = [ { "DummyScript": { "name": "double_dependency", "file_path": "Packs/DeprecatedContent/Scripts/script-ADIsUserMember.yml", "deprecated": False, "depends_on": [ ], "command_to_integration": { "file": "many integrations" }, "script_executions": [ "ADGetUser", ], "pack": "Active_Directory_Query" } } ] found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
def test_collect_scripts_depends_on_integration( self, dependency_integration_command, expected_result, id_set): test_input = [{ "DummyScript": { "name": "DummyScript", "file_path": "dummy_path", "depends_on": [dependency_integration_command], "pack": "dummy_pack" } }] found_result = PackDependencies._collect_scripts_dependencies( pack_scripts=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
def get_all_packs_dependency_graph(id_set: dict, packs: list) -> Iterable: """ Gets a graph with dependencies for all packs Args: id_set: The content of id_set file packs: The packs that should be part of the dependencies calculation Returns: A graph with all packs dependencies """ logging.info("Calculating pack dependencies.") try: dependency_graph = PackDependencies.build_all_dependencies_graph( packs, id_set=id_set, verbose_file=VerboseFile('')) return dependency_graph except Exception: logging.exception("Failed calculating dependencies graph") exit(2)
def test_collect_indicator_type_dependencies(self, id_set): """ Given - An indicator type entry in the id_set. When - Building dependency graph for pack. Then - Extracting the packs that the indicator type depends on. """ expected_result = { # integration dependencies ("Feedsslabusech", False), ("AbuseDB", False), ("ActiveMQ", False), # script dependencies ("CommonScripts", True), ("Carbon_Black_Enterprise_Response", True) } test_input = [ { "Dummy Indicator Type": { "name": "Dummy Indicator Type", "fromversion": "5.0.0", "pack": "dummy_pack", "integrations": [ "abuse.ch SSL Blacklist Feed", "AbuseIPDB", "ActiveMQ" ], "scripts": [ "AssignAnalystToIncident", "CBAlerts" ] } } ] found_result = PackDependencies._collect_indicators_types_dependencies( pack_indicators_types=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
def test_collect_incident_field_dependencies(self, id_set): """ Given - An incident field entry in the id_set. When - Building dependency graph for pack. Then - Extracting the packs that the incident field depends on. """ expected_result = { # incident types # ("Expanse", True), ("IllusiveNetworks", True), # scripts ("Carbon_Black_Enterprise_Response", True), ("Phishing", True) } test_input = [ { "Dummy Incident Field": { "name": "Dummy Incident Field", "fromversion": "5.0.0", "pack": "dummy_pack", "incident_types": [ "Expanse Appearance", "Illusive Networks Incident" ], "scripts": [ "CBLiveFetchFiles", "CheckEmailAuthenticity" ] } } ] found_result = PackDependencies._collect_incidents_fields_dependencies( pack_incidents_fields=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
def test_collect_layouts_dependencies(self, id_set): """ Given - A layout entry in the id_set. When - Building dependency graph for pack. Then - Extracting the packs that the layout depends on. """ expected_result = {("FeedMitreAttack", True), ("PrismaCloudCompute", True), ("CommonTypes", True), ("CrisisManagement", True)} test_input = [{ "Dummy Layout": { "typeID": "dummy_layout", "name": "Dummy Layout", "pack": "dummy_pack", "kind": "edit", "path": "dummy_path", "incident_and_indicator_types": ["MITRE ATT&CK", "Prisma Cloud Compute Cloud Discovery"], "incident_and_indicator_fields": ["indicator_adminname", "indicator_jobtitle"] } }] found_result = PackDependencies._collect_layouts_dependencies( pack_layouts=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
def test_collect_layouts_dependencies_filter_toversion(self, id_set): """ Given - A layout entry in the id_set. When - Building dependency graph for pack. Then - Extracting the packs that the layout depends on. - Should ignore the NonSupported pack due to toversion settings of both indicator type and field. """ expected_result = {("CommonTypes", True)} test_input = [ { "Dummy Layout": { "typeID": "dummy_layout", "name": "Dummy Layout", "pack": "dummy_pack", "kind": "edit", "path": "dummy_path", "incident_and_indicator_types": [ "accountRep", ], "incident_and_indicator_fields": [ "indicator_tags", ] } } ] found_result = PackDependencies._collect_layouts_dependencies(pack_layouts=test_input, id_set=id_set, verbose_file=VerboseFile(), exclude_ignored_dependencies=False, ) assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
def test_collect_detection_of_optional_dependencies( self, generic_command, id_set): test_input = [{ "DummyScript": { "name": "DummyScript", "file_path": "dummy_path", "depends_on": [generic_command], "pack": "dummy_pack" } }] dependencies_set = PackDependencies._collect_scripts_dependencies( pack_scripts=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert len(dependencies_set) > 0 for dependency_data in dependencies_set: assert not dependency_data[ 1] # validate that mandatory is set to False
def test_collect_mapper_dependencies(self, id_set): """ Given - A mapper entry in the id_set. When - Building dependency graph for pack. Then - Extracting the packs that the mapper depends on. """ expected_result = {("AccessInvestigation", True), ("CommonTypes", True), ("PrismaCloud", True), ("BruteForce", True)} test_input = [ { "Dummy Mapper": { "name": "Dummy Mapper", "fromversion": "5.0.0", "pack": "dummy_pack", "incident_types": [ "Access", "Authentication", "AWS CloudTrail Misconfiguration" ], "incident_fields": [ "incident_accountgroups", "incident_accountid" ], } } ] found_result = PackDependencies._collect_mappers_dependencies( pack_mappers=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
def test_collect_scripts_depends_on_two_scripts(self, id_set): expected_result = {('HelloWorld', True), ('PrismaCloudCompute', True)} test_input = [{ "DummyScript": { "name": "DummyScript", "file_path": "dummy_path", "depends_on": ["PrismaCloudComputeParseAuditAlert", "HelloWorldScript"], "pack": "dummy_pack" } }] found_result = PackDependencies._collect_scripts_dependencies( pack_scripts=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
def test_collect_scripts_script_executions(self, id_set): """ Given - A script entry in the id_set containing a script_executions, e.g: demisto.executeCommand(<command>). When - Building dependency graph for pack. Then - Extracting the pack that the script depends on. - Should recognize the pack. """ expected_result = {('Active_Directory_Query', True)} test_input = [ { "DummyScript": { "name": "ADIsUserMember", "file_path": "Packs/DeprecatedContent/Scripts/script-ADIsUserMember.yml", "deprecated": False, "depends_on": [ ], "script_executions": [ "ADGetUser", ], "pack": "Active_Directory_Query" } } ] found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
def test_collect_playbooks_dependencies_skip_unavailable(self, id_set): """ Given - A playbook entry in the id_set. - When - Building dependency graph for pack. Then - Extracting the packs that the playbook depends on. """ expected_result = { # playbooks: ('Slack', False), ('Indeni', True), # integrations: ('FeedAlienVault', False), ('ipinfo', True), ('FeedAutofocus', True), # scripts: ('GetServerURL', False), ('HelloWorld', True), } test_input = [ { 'Dummy Playbook': { 'name': 'Dummy Playbook', 'file_path': 'dummy_path', 'fromversion': 'dummy_version', 'implementing_scripts': [ 'GetServerURL', 'HelloWorldScript', ], 'implementing_playbooks': [ 'Failed Login Playbook - Slack v2', 'Indeni Demo', ], 'command_to_integration': { 'alienvault-get-indicators': '', 'ip': 'ipinfo', 'autofocus-get-indicators': '', }, 'tests': ['dummy_playbook'], 'pack': 'dummy_pack', 'incident_fields': [], 'skippable_tasks': [ 'Print', 'Failed Login Playbook - Slack v2', 'alienvault-get-indicators', 'GetServerURL', ] } }, ] found_result = PackDependencies._collect_playbooks_dependencies( pack_playbooks=test_input, id_set=id_set, verbose_file=VerboseFile(), ) assert IsEqualFunctions.is_sets_equal(found_result, expected_result)