Example #1
0
    def handle_dependencies(self, pack_name: str, id_set_path: str,
                            logger: logging.Logger) -> None:
        """Updates pack's dependencies using the find_dependencies command.

        Args:
            pack_name (str): The pack's name.
            id_set_path (str): the id_set file path.
            logger (logging.Logger): System logger already initialized.
        """
        calculated_dependencies = PackDependencies.find_dependencies(
            pack_name,
            id_set_path=id_set_path,
            update_pack_metadata=False,
            silent_mode=True,
            complete_data=True)

        # If it is a core pack, check that no new mandatory packs (that are not core packs) were added
        # They can be overridden in the user metadata to be not mandatory so we need to check there as well
        if pack_name in CORE_PACKS_LIST:
            mandatory_dependencies = [
                k for k, v in calculated_dependencies.items()
                if v.get('mandatory', False) is True and k not in
                CORE_PACKS_LIST and k not in self.dependencies.keys()
            ]
            if mandatory_dependencies:
                logger.error(
                    f'New mandatory dependencies {mandatory_dependencies} were '
                    f'found in the core pack {pack_name}')

        self.dependencies.update(calculated_dependencies)
    def test_collect_scripts__filter_toversion(self, id_set):
        """
        Given
            - A script entry in the id_set depending on QRadar command.

        When
            - Building dependency graph for pack.

        Then
            - Extracting the packs that the script depends on.
            - Should ignore the Deprecated pack due to toversion settings of old QRadar integration.
        """
        expected_result = {('QRadar', True)}

        test_input = [
            {
                "DummyScript": {
                    "name": "DummyScript",
                    "file_path": "dummy_path",
                    "depends_on": [
                        "qradar-searches",
                    ],
                    "pack": "dummy_pack"
                }
            }
        ]

        found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input,
                                                                      id_set=id_set,
                                                                      verbose_file=VerboseFile(),
                                                                      exclude_ignored_dependencies=False
                                                                      )

        assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
    def validate_pack_dependencies(self, id_set_path=None):
        try:
            click.secho(f'\nRunning pack dependencies validation on {self.pack}\n',
                        fg="bright_cyan")
            core_pack_list = tools.get_remote_file('Tests/Marketplace/core_packs_list.json') or []

            first_level_dependencies = PackDependencies.find_dependencies(
                self.pack, id_set_path=id_set_path, silent_mode=True, exclude_ignored_dependencies=False,
                update_pack_metadata=False)

            for core_pack in core_pack_list:
                first_level_dependencies.pop(core_pack, None)
            if not first_level_dependencies:
                return True

            dependency_result = json.dumps(first_level_dependencies, indent=4)
            click.echo(click.style(f"Found dependencies result for {self.pack} pack:", bold=True))
            click.echo(click.style(dependency_result, bold=True))
            non_supported_pack = first_level_dependencies.get('NonSupported', {})
            deprecated_pack = first_level_dependencies.get('DeprecatedContent', {})

            if (non_supported_pack.get('mandatory')) or (deprecated_pack.get('mandatory')):
                error_message, error_code = Errors.invalid_package_dependencies(self.pack)
                if self._add_error((error_message, error_code), file_path=self.pack_path):
                    return False
            return True
        except ValueError as e:
            if "Couldn't find any items for pack" in str(e):
                error_message, error_code = Errors.invalid_id_set()
                if self._add_error((error_message, error_code), file_path=self.pack_path):
                    return False
                return True
            else:
                raise
    def test_collect_incident_type_dependencies(self, id_set):
        """
        Given
            - An incident type entry in the id_set.
        When
            - Building dependency graph for pack.
        Then
            - Extracting the packs that the incident type depends on.
        """
        expected_result = {("AutoFocus", True), ("Volatility", True)}

        test_input = [{
            "Dummy Incident Type": {
                "name": "Dummy Incident Type",
                "fromversion": "5.0.0",
                "pack": "dummy_pack",
                "playbooks": "Autofocus Query Samples, Sessions and Tags",
                "scripts": "AnalyzeMemImage"
            }
        }]

        found_result = PackDependencies._collect_incidents_types_dependencies(
            pack_incidents_types=test_input,
            id_set=id_set,
            verbose_file=VerboseFile(),
        )

        assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
    def test_collect_classifier_dependencies(self, id_set):
        """
        Given
            - A classifier entry in the id_set.
        When
            - Building dependency graph for pack.
        Then
            - Extracting the packs that the classifier depends on.
        """
        expected_result = {("Claroty", True), ("PAN-OS", True),
                           ("Logzio", True)}

        test_input = [{
            "Dummy Classifier": {
                "name":
                "Dummy Classifier",
                "fromversion":
                "5.0.0",
                "pack":
                "dummy_pack",
                "incident_types": [
                    "Claroty Integrity Incident", "FirewallUpgrade",
                    "Logz.io Alert"
                ],
            }
        }]

        found_result = PackDependencies._collect_classifiers_dependencies(
            pack_classifiers=test_input,
            id_set=id_set,
            verbose_file=VerboseFile(),
        )

        assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
    def test_collect_playbooks_dependencies_on_incident_fields(self, id_set):
        expected_result = {("DigitalGuardian", True),
                           ("EmployeeOffboarding", True)}
        test_input = [{
            "Dummy Playbook": {
                "name":
                "Dummy Playbook",
                "file_path":
                "dummy_path",
                "fromversion":
                "dummy_version",
                "implementing_scripts": [],
                "implementing_playbooks": [],
                "command_to_integration": {},
                "tests": ["dummy_playbook"],
                "pack":
                "dummy_pack",
                "incident_fields":
                ["digitalguardianusername", "Google Display Name"]
            }
        }]

        found_result = PackDependencies._collect_playbooks_dependencies(
            pack_playbooks=test_input,
            id_set=id_set,
            verbose_file=VerboseFile(),
        )

        assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
    def test_collect_integration_dependencies(self, id_set):
        """
        Given
            - An integration entry in the id_set.
        When
            - Building dependency graph for pack.
        Then
            - Extracting the packs that the integration depends on.
        """
        expected_result = {("HelloWorld", True), ("Claroty", True),
                           ("EWS", True), ("CrisisManagement", True),
                           ("CommonTypes", True)}

        test_input = [{
            "Dummy Integration": {
                "name": "Dummy Integration",
                "fromversion": "5.0.0",
                "pack": "dummy_pack",
                "classifiers": "HelloWorld",
                "mappers": ["Claroty-mapper", "EWS v2-mapper"],
                "incident_types": "HR Ticket",
                "indicator_fields": "CommonTypes",
            }
        }]

        found_result = PackDependencies._collect_integrations_dependencies(
            pack_integrations=test_input,
            id_set=id_set,
            verbose_file=VerboseFile(),
        )

        assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
    def test_collect_playbooks_dependencies_on_integrations_with_brand(
            self, id_set):
        command = "ip"
        pack_name = "ipinfo"
        test_input = [{
            "Dummy Playbook": {
                "name": "Dummy Playbook",
                "file_path": "dummy_path",
                "fromversion": "dummy_version",
                "implementing_scripts": [],
                "implementing_playbooks": [],
                "command_to_integration": {
                    command: pack_name
                },
                "tests": ["dummy_playbook"],
                "pack": "dummy_pack"
            }
        }]
        found_result_set = PackDependencies._collect_playbooks_dependencies(
            pack_playbooks=test_input,
            id_set=id_set,
            verbose_file=VerboseFile(),
        )

        assert len(found_result_set) == 1
        found_result = found_result_set.pop()
        assert found_result[0] == pack_name
        assert found_result[1]
    def test_collect_detection_of_optional_dependencies_in_playbooks(
            self, integration_command, id_set):
        test_input = [{
            "Dummy Playbook": {
                "name": "Dummy Playbook",
                "file_path": "dummy_path",
                "fromversion": "dummy_version",
                "implementing_scripts": [],
                "implementing_playbooks": [],
                "command_to_integration": {
                    integration_command: ""
                },
                "tests": ["dummy_playbook"],
                "pack": "dummy_pack"
            }
        }]

        found_result_set = PackDependencies._collect_playbooks_dependencies(
            pack_playbooks=test_input,
            id_set=id_set,
            verbose_file=VerboseFile(),
        )

        assert len(found_result_set) > 0

        for found_result in found_result_set:
            assert not found_result[
                1]  # validate that mandatory is set to False
Example #10
0
def calculate_single_pack_dependencies(pack: str, dependency_graph: object) -> Tuple[dict, list, str]:
    """
    Calculates pack dependencies given a pack and a dependencies graph.
    First is extract the dependencies subgraph of the given graph only using DFS algorithm with the pack as source.

    Then, for all the dependencies of that pack it Replaces the 'mandatory_for_packs' key with a boolean key 'mandatory'
    which indicates whether this dependency is mandatory for this pack or not.

    Then using that subgraph we get the first-level dependencies and all-levels dependencies.

    Args:
        pack: The pack for which we need to calculate the dependencies
        dependency_graph: The full dependencies graph

    Returns:
        first_level_dependencies: A dict of the form {'dependency_name': {'mandatory': < >, 'display_name': < >}}
        all_level_dependencies: A list with all dependencies names
        pack: The pack name
    """
    install_logging('Calculate Packs Dependencies.log', include_process_name=True)
    first_level_dependencies = {}
    all_level_dependencies = []
    try:
        logging.info(f"Calculating {pack} pack dependencies.")
        subgraph = PackDependencies.get_dependencies_subgraph_by_dfs(dependency_graph, pack)
        for dependency_pack, additional_data in subgraph.nodes(data=True):
            logging.debug(f'Iterating dependency {dependency_pack} for pack {pack}')
            additional_data['mandatory'] = pack in additional_data['mandatory_for_packs']
            del additional_data['mandatory_for_packs']
            first_level_dependencies, all_level_dependencies = parse_for_pack_metadata(subgraph, pack)
    except Exception:
        logging.exception(f"Failed calculating {pack} pack dependencies")
    return first_level_dependencies, all_level_dependencies, pack
    def test_search_for_specific_pack_script_item(self, id_set):
        pack_id = "PrismaCloudCompute"
        found_filtered_result = PackDependencies._search_for_pack_items(pack_id, id_set['scripts'])
        expected_result = [
            {
                "PrismaCloudComputeParseAuditAlert": {
                    "name": "PrismaCloudComputeParseAuditAlert",
                    "file_path": "Packs/PrismaCloudCompute/Scripts/PrismaCloudComputeParseAuditAlert/PrismaCloudComputeParseAuditAlert.yml",
                    "pack": "PrismaCloudCompute"
                }
            },
            {
                "PrismaCloudComputeParseCloudDiscoveryAlert": {
                    "name": "PrismaCloudComputeParseCloudDiscoveryAlert",
                    "file_path": "Packs/PrismaCloudCompute/Scripts/PrismaCloudComputeParseCloudDiscoveryAlert/PrismaCloudComputeParseCloudDiscoveryAlert.yml",
                    "pack": "PrismaCloudCompute"
                }
            },
            {
                "PrismaCloudComputeParseComplianceAlert": {
                    "name": "PrismaCloudComputeParseComplianceAlert",
                    "file_path": "Packs/PrismaCloudCompute/Scripts/PrismaCloudComputeParseComplianceAlert/PrismaCloudComputeParseComplianceAlert.yml",
                    "pack": "PrismaCloudCompute"
                }
            },
            {
                "PrismaCloudComputeParseVulnerabilityAlert": {
                    "name": "PrismaCloudComputeParseVulnerabilityAlert",
                    "file_path": "Packs/PrismaCloudCompute/Scripts/PrismaCloudComputeParseVulnerabilityAlert/PrismaCloudComputeParseVulnerabilityAlert.yml",
                    "pack": "PrismaCloudCompute"
                }
            }
        ]

        assert found_filtered_result == expected_result
    def test_collect_scripts_depends_on_with_two_inputs(self, id_set):
        test_input = [
            {
                "DummyScript1": {
                    "name": "DummyScript1",
                    "file_path": "dummy_path1",
                    "depends_on": [
                        "sslbl-get-indicators"
                    ],
                    "pack": "dummy_pack"
                }
            },
            {
                "DummyScript2": {
                    "name": "DummyScript2",
                    "file_path": "dummy_path1",
                    "depends_on": [
                        "ad-get-user"
                    ],
                    "pack": "dummy_pack"
                }
            }
        ]

        found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input, id_set=id_set)

        assert found_result == {('Active_Directory_Query', True), ('Feedsslabusech', True)}
    def test_build_dependency_graph_include_ignored_content(self, id_set):
        """
        Given
            - A pack name which depends on unsupported content.
        When
            - Building dependency graph for pack.
        Then
            - Extracting the pack dependencies with unsupported content.
        """

        pack_name = "ImpossibleTraveler"
        found_graph = PackDependencies.build_dependency_graph(
            pack_id=pack_name,
            id_set=id_set,
            verbose_file=VerboseFile(),
            exclude_ignored_dependencies=False)
        root_of_graph = [
            n for n in found_graph.nodes if found_graph.in_degree(n) == 0
        ][0]
        pack_dependencies = [
            n for n in found_graph.nodes if found_graph.in_degree(n) > 0
        ]

        assert root_of_graph == pack_name
        assert len(pack_dependencies) > 0
        assert 'NonSupported' in pack_dependencies
    def test_collect_scripts_depends_on_with_two_inputs(self, id_set):
        expected_result = {('Active_Directory_Query', True), ('Feedsslabusech', True)}

        test_input = [
            {
                "DummyScript1": {
                    "name": "DummyScript1",
                    "file_path": "dummy_path1",
                    "depends_on": [
                        "sslbl-get-indicators"
                    ],
                    "pack": "dummy_pack"
                }
            },
            {
                "DummyScript2": {
                    "name": "DummyScript2",
                    "file_path": "dummy_path1",
                    "depends_on": [
                        "ad-get-user"
                    ],
                    "pack": "dummy_pack"
                }
            }
        ]

        found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input,
                                                                      id_set=id_set,
                                                                      verbose_file=VerboseFile(),
                                                                      )

        assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
Example #15
0
def main():
    """ Main function for iterating over existing packs folder in content repo and creating json of all
    packs dependencies. The logic of pack dependency is identical to sdk find-dependencies command.

    """
    option = option_handler()
    output_path = option.output_path
    id_set_path = option.id_set_path
    IGNORED_FILES.append(
        GCPConfig.BASE_PACK)  # skip dependency calculation of Base pack
    # loading id set json
    with open(id_set_path, 'r') as id_set_file:
        id_set = json.load(id_set_file)

    pack_dependencies_result = {}

    print("Starting dependencies calculation")
    # starting iteration over pack folders
    for pack in os.scandir(PACKS_FULL_PATH):
        if not pack.is_dir() or pack.name in IGNORED_FILES:
            print_warning(
                f"Skipping dependency calculation of {pack.name} pack.")
            continue  # skipping ignored packs
        print(f"Calculating {pack.name} pack dependencies.")

        try:
            dependency_graph = PackDependencies.build_dependency_graph(
                pack_id=pack.name,
                id_set=id_set,
                verbose_file=VerboseFile(''),
            )
            first_level_dependencies, all_level_dependencies = parse_for_pack_metadata(
                dependency_graph, pack.name)

        except Exception as e:
            print_error(
                f"Failed calculating {pack.name} pack dependencies. Additional info:\n{e}"
            )
            continue

        pack_dependencies_result[pack.name] = {
            "dependencies": first_level_dependencies,
            "displayedImages": list(first_level_dependencies.keys()),
            "allLevelDependencies": all_level_dependencies,
            "path": os.path.join(PACKS_FOLDER, pack.name),
            "fullPath": pack.path
        }

    print(
        f"Number of created pack dependencies entries: {len(pack_dependencies_result.keys())}"
    )
    # finished iteration over pack folders
    print_color("Finished dependencies calculation", LOG_COLORS.GREEN)

    with open(output_path, 'w') as pack_dependencies_file:
        json.dump(pack_dependencies_result, pack_dependencies_file, indent=4)

    print_color(f"Created packs dependencies file at: {output_path}",
                LOG_COLORS.GREEN)
Example #16
0
    def test_build_dependency_graph(self, id_set):
        pack_name = "ImpossibleTraveler"
        found_graph = PackDependencies.build_dependency_graph(pack_id=pack_name, id_set=id_set)
        root_of_graph = [n for n in found_graph.nodes if found_graph.in_degree(n) == 0][0]
        pack_dependencies = [n for n in found_graph.nodes if found_graph.in_degree(n) > 0]

        assert root_of_graph == pack_name
        assert len(pack_dependencies) > 0
    def test_collect_scripts_command_to_integrations_and_script_executions(self, id_set):
        """
        Given
            - A script entry in the id_set containing command_to_integrations with a reputation command
             and script_executions.

        When
            - Building dependency graph for pack.

        Then
            - Extracting the packs that the script depends on.
            - Should recognize the mandatory pack and the non mandatory packs.
        """
        expected_result = {
            ('Active_Directory_Query', True), ('Recorded_Future', False), ('illuminate', False), ('ThreatQ', False),
            ('Anomali_ThreatStream', False), ('URLHaus', False), ('Symantec_Deepsight', False),
            ('XForceExchange', False), ('Active_Directory_Query', True), ('XFE', False), ('MISP', False),
            ('AlienVault_OTX', False), ('ThreatMiner', False), ('isight', False), ('CrowdStrikeIntel', False),
            ('ReversingLabs_A1000', False), ('PolySwarm', False), ('TruSTAR', False),
            ('ReversingLabs_Titanium_Cloud', False), ('ThreatExchange', False), ('EclecticIQ', False),
            ('AutoFocus', False), ('McAfee-TIE', False), ('Maltiverse', False), ('Palo_Alto_Networks_WildFire', False),
            ('Polygon', False), ('Cofense-Intelligence', False), ('Lastline', False), ('ThreatConnect', False),
            ('VirusTotal', False), ('Flashpoint', False)
        }

        test_input = [
            {
                "DummyScript": {
                    "name": "double_dependency",
                    "file_path": "Packs/DeprecatedContent/Scripts/script-ADIsUserMember.yml",
                    "deprecated": False,
                    "depends_on": [
                    ],
                    "command_to_integration": {
                        "file": "many integrations"
                    },
                    "script_executions": [
                        "ADGetUser",
                    ],
                    "pack": "Active_Directory_Query"
                }
            }
        ]

        found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input,
                                                                      id_set=id_set,
                                                                      verbose_file=VerboseFile(),
                                                                      )

        assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
    def test_collect_scripts_depends_on_integration(self, dependency_integration_command, expected_result, id_set):
        test_input = [
            {
                "DummyScript": {
                    "name": "DummyScript",
                    "file_path": "dummy_path",
                    "depends_on": [
                        dependency_integration_command
                    ],
                    "pack": "dummy_pack"
                }
            }
        ]
        found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input, id_set=id_set)

        assert found_result == expected_result
Example #19
0
    def validate_pack_dependencies(self):
        try:
            click.secho(f'\nRunning pack dependencies validation on {self.pack}\n',
                        fg="bright_cyan")
            core_pack_list = get_core_pack_list()

            first_level_dependencies = PackDependencies.find_dependencies(
                self.pack, id_set_path=self.id_set_path, silent_mode=True, exclude_ignored_dependencies=False,
                update_pack_metadata=False, skip_id_set_creation=self.skip_id_set_creation, use_pack_metadata=True
            )

            if not first_level_dependencies:
                if not self.suppress_print:
                    click.secho("No first level dependencies found", fg="yellow")
                return True

            for core_pack in core_pack_list:
                first_level_dependencies.pop(core_pack, None)
            if not first_level_dependencies:
                if not self.suppress_print:
                    click.secho("Found first level dependencies only on core packs", fg="yellow")
                return True

            dependency_result = json.dumps(first_level_dependencies, indent=4)
            click.echo(click.style(f"Found dependencies result for {self.pack} pack:", bold=True))
            click.echo(click.style(dependency_result, bold=True))

            if self.pack in core_pack_list:
                if not self.validate_core_pack_dependencies(first_level_dependencies):
                    return False

            non_supported_pack = first_level_dependencies.get('NonSupported', {})
            deprecated_pack = first_level_dependencies.get('DeprecatedContent', {})

            if not self.is_invalid_package_dependencies(non_supported_pack, deprecated_pack):
                return False

            return True

        except ValueError as e:
            if "Couldn't find any items for pack" in str(e):
                error_message, error_code = Errors.invalid_id_set()
                if self._add_error((error_message, error_code), file_path=self.pack_path):
                    return False
                return True
            else:
                raise
Example #20
0
    def test_collect_scripts_depends_on_script(self, dependency_script, expected_result, id_set):
        test_input = [
            {
                "DummyScript": {
                    "name": "DummyScript",
                    "file_path": "dummy_path",
                    "depends_on": [
                        dependency_script
                    ],
                    "pack": "dummy_pack"
                }
            }
        ]

        found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input, id_set=id_set)

        assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
Example #21
0
def get_all_packs_dependency_graph(id_set: dict, packs: list) -> Iterable:
    """
    Gets a graph with dependencies for all packs
    Args:
        id_set: The content of id_set file
        packs: The packs that should be part of the dependencies calculation

    Returns:
        A graph with all packs dependencies
    """
    logging.info("Calculating pack dependencies.")
    try:
        dependency_graph = PackDependencies.build_all_dependencies_graph(packs, id_set=id_set, verbose=False)
        return dependency_graph
    except Exception:
        logging.exception("Failed calculating dependencies graph")
        sys.exit(2)
    def test_collect_scripts_depends_on_two_scripts(self, id_set):
        test_input = [
            {
                "DummyScript": {
                    "name": "DummyScript",
                    "file_path": "dummy_path",
                    "depends_on": [
                        "PrismaCloudComputeParseAuditAlert",
                        "HelloWorldScript"
                    ],
                    "pack": "dummy_pack"
                }
            }
        ]
        found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input, id_set=id_set)

        assert found_result == {('HelloWorld', True), ('PrismaCloudCompute', True)}
    def test_collect_scripts_depends_on_integration(
            self, dependency_integration_command, expected_result, id_set):
        test_input = [{
            "DummyScript": {
                "name": "DummyScript",
                "file_path": "dummy_path",
                "depends_on": [dependency_integration_command],
                "pack": "dummy_pack"
            }
        }]

        found_result = PackDependencies._collect_scripts_dependencies(
            pack_scripts=test_input,
            id_set=id_set,
            verbose_file=VerboseFile(),
        )

        assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
    def test_search_for_specific_pack_playbook_item(self, id_set):
        pack_id = "Expanse"
        found_filtered_result = PackDependencies._search_for_pack_items(pack_id, id_set['playbooks'])
        expected_result = [
            {
                "ExpanseParseRawIncident": {
                    "name": "Expanse Incident Playbook",
                    "file_path": "Packs/Expanse/Playbooks/Expanse_Incident_Playbook.yml",
                    "fromversion": "5.0.0",
                    "implementing_scripts": [
                        "ExpanseParseRawIncident"
                    ],
                    "pack": "Expanse"
                }
            }
        ]

        assert found_filtered_result == expected_result
    def test_collect_indicator_type_dependencies(self, id_set):
        """
        Given
            - An indicator type entry in the id_set.

        When
            - Building dependency graph for pack.

        Then
            - Extracting the packs that the indicator type depends on.
        """
        expected_result = {
            # integration dependencies
            ("Feedsslabusech", False), ("AbuseDB", False), ("ActiveMQ", False),
            # script dependencies
            ("CommonScripts", True), ("Carbon_Black_Enterprise_Response", True)
        }

        test_input = [
            {
                "Dummy Indicator Type": {
                    "name": "Dummy Indicator Type",
                    "fromversion": "5.0.0",
                    "pack": "dummy_pack",
                    "integrations": [
                        "abuse.ch SSL Blacklist Feed",
                        "AbuseIPDB",
                        "ActiveMQ"
                    ],
                    "scripts": [
                        "AssignAnalystToIncident",
                        "CBAlerts"
                    ]
                }
            }
        ]

        found_result = PackDependencies._collect_indicators_types_dependencies(
            pack_indicators_types=test_input,
            id_set=id_set,
            verbose_file=VerboseFile(),
        )

        assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
    def test_collect_incident_field_dependencies(self, id_set):
        """
        Given
            - An incident field entry in the id_set.

        When
            - Building dependency graph for pack.

        Then
            - Extracting the packs that the incident field depends on.
        """
        expected_result = {
            # incident types
            # ("Expanse", True), ("IllusiveNetworks", True),
            # scripts
            ("Carbon_Black_Enterprise_Response", True), ("Phishing", True)
        }

        test_input = [
            {
                "Dummy Incident Field": {
                    "name": "Dummy Incident Field",
                    "fromversion": "5.0.0",
                    "pack": "dummy_pack",
                    "incident_types": [
                        "Expanse Appearance",
                        "Illusive Networks Incident"
                    ],
                    "scripts": [
                        "CBLiveFetchFiles",
                        "CheckEmailAuthenticity"
                    ]
                }
            }
        ]

        found_result = PackDependencies._collect_incidents_fields_dependencies(
            pack_incidents_fields=test_input,
            id_set=id_set,
            verbose_file=VerboseFile(),
        )

        assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
    def test_collect_scripts_depends_on_with_two_inputs(self, id_set):
        """
        Given
            - 2 scripts entries in the id_set depending on different integrations.

        When
            - Building dependency graph for the packs.

        Then
            - Extracting the packs that the scripts depends on.
            - Should recognize both packs.
        """
        expected_result = {('Active_Directory_Query', True), ('Feedsslabusech', True)}

        test_input = [
            {
                "DummyScript1": {
                    "name": "DummyScript1",
                    "file_path": "dummy_path1",
                    "depends_on": [
                        "sslbl-get-indicators"
                    ],
                    "pack": "dummy_pack"
                }
            },
            {
                "DummyScript2": {
                    "name": "DummyScript2",
                    "file_path": "dummy_path1",
                    "depends_on": [
                        "ad-get-user"
                    ],
                    "pack": "dummy_pack"
                }
            }
        ]

        found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input,
                                                                      id_set=id_set,
                                                                      verbose_file=VerboseFile(),
                                                                      )

        assert IsEqualFunctions.is_sets_equal(found_result, expected_result)
Example #28
0
    def test_collect_detection_of_optional_dependencies(self, generic_command, id_set):
        test_input = [
            {
                "DummyScript": {
                    "name": "DummyScript",
                    "file_path": "dummy_path",
                    "depends_on": [
                        generic_command
                    ],
                    "pack": "dummy_pack"
                }
            }
        ]

        dependencies_set = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input, id_set=id_set)

        assert len(dependencies_set) > 0

        for dependency_data in dependencies_set:
            assert not dependency_data[1]  # validate that mandatory is set to False
    def test_search_for_specific_pack_playbook_item(self, id_set):
        pack_id = "Expanse"

        expected_result = [{
            "ExpanseParseRawIncident": {
                "name": "Expanse Incident Playbook",
                "file_path":
                "Packs/Expanse/Playbooks/Expanse_Incident_Playbook.yml",
                "fromversion": "5.0.0",
                "implementing_scripts": ["ExpanseParseRawIncident"],
                "tests": ["No tests (auto formatted)"],
                "pack": "Expanse"
            }
        }]

        found_filtered_result = PackDependencies._search_for_pack_items(
            pack_id, id_set['playbooks'])

        assert IsEqualFunctions.is_lists_equal(found_filtered_result,
                                               expected_result)
    def test_collect_layouts_dependencies(self, id_set):
        """
        Given
            - A layout entry in the id_set.

        When
            - Building dependency graph for pack.

        Then
            - Extracting the packs that the layout depends on.
        """
        expected_result = {("FeedMitreAttack", True),
                           ("PrismaCloudCompute", True), ("CommonTypes", True),
                           ("CrisisManagement", True)}

        test_input = [{
            "Dummy Layout": {
                "typeID":
                "dummy_layout",
                "name":
                "Dummy Layout",
                "pack":
                "dummy_pack",
                "kind":
                "edit",
                "path":
                "dummy_path",
                "incident_and_indicator_types":
                ["MITRE ATT&CK", "Prisma Cloud Compute Cloud Discovery"],
                "incident_and_indicator_fields":
                ["indicator_adminname", "indicator_jobtitle"]
            }
        }]

        found_result = PackDependencies._collect_layouts_dependencies(
            pack_layouts=test_input,
            id_set=id_set,
            verbose_file=VerboseFile(),
        )

        assert IsEqualFunctions.is_sets_equal(found_result, expected_result)