def find_badhostnameusage(profilepackage):
    device_groups = profilepackage.device_groups
    devicegroup_objects = profilepackage.devicegroup_objects
    devicegroup_exclusive_objects = profilepackage.devicegroup_exclusive_objects

    _, _, validator_function = get_policy_validators()['BadHostname']

    bad_hostname_results = validator_function(profilepackage)
    bad_address_objects = set()
    for entry in bad_hostname_results:
        bad_address_objects.add(entry.data.get('name'))

    badentries = []
    for i, device_group in enumerate(device_groups):
        logger.info(
            f"({i + 1}/{len(device_groups)}) Checking {device_group}'s Address Groups"
        )
        for entry in devicegroup_objects[device_group]['AddressGroups']:
            address_group_members = []
            for ag_member in entry.findall('./static/member'):
                address_group_members.append(ag_member.text)
            bad_members = bad_address_objects & set(address_group_members)
            if bad_members:
                text = f"Device Group {device_group}'s Address Group '{entry.get('name')}' uses the following address objects which don't resolve: {sorted(bad_members)}"
                badentries.append(
                    BadEntry(data=entry,
                             text=text,
                             device_group=device_group,
                             entry_type='AddressGroups'))

    for i, device_group in enumerate(device_groups):
        for ruletype in ('SecurityPreRules', 'SecurityPostRules'):
            rules = devicegroup_exclusive_objects[device_group][ruletype]
            logger.info(
                f"({i + 1}/{len(device_groups)}) Checking {device_group}'s {ruletype}"
            )

            for entry in rules:
                # Disabled rules can be ignored
                if entry.find("./disabled") is not None and entry.find(
                        "./disabled").text == "yes":
                    continue

                rule_name = entry.get('name')
                source_members = set(
                    [sm.text for sm in entry.findall('./source/member')])
                dest_members = set(
                    [dm.text for dm in entry.findall('./destination/member')])

                for members, direction in [(source_members, 'Source'),
                                           (dest_members, 'Dest')]:
                    bad_members = bad_address_objects & members
                    if bad_members:
                        text = f"Device Group {device_group}'s {ruletype} '{rule_name}' {direction} contain the following address objects which don't resolve: {sorted(bad_members)}"
                        badentries.append(
                            BadEntry(data=entry,
                                     text=text,
                                     device_group=device_group,
                                     entry_type=ruletype))
    return badentries
Ejemplo n.º 2
0
def find_misleading_services(profilepackage):
    device_groups = profilepackage.device_groups
    devicegroup_objects = profilepackage.devicegroup_objects

    PROTOCOL_TYPES = ('tcp', 'udp')

    badentries = []

    logger.info("*" * 80)
    logger.info("Checking for misleading Service objects")

    for i, device_group in enumerate(device_groups):
        logger.info(
            f"({i+1}/{len(device_groups)}) Checking {device_group}'s Service objects"
        )
        for service_entry in devicegroup_objects[device_group]['Services']:
            # For simplicity, convert the XML object to a dict:
            service_dict = xml_object_to_dict(service_entry)
            entry_name = service_dict['entry']['@name']
            for protocol_type in PROTOCOL_TYPES:
                if protocol_type in service_dict['entry']['protocol'].keys():
                    entry_protocol = protocol_type
                    break
            else:
                # This should not be possible!
                continue
            entry_port = service_dict['entry']['protocol'][entry_protocol][
                'port']
            contains_protocol = 'tcp' in entry_name.lower(
            ) or 'udp' in entry_name.lower()
            contains_port = re.search(r'\d{3,}', entry_name) is not None
            protocol_correct = entry_protocol in entry_name.lower()
            port_correct = entry_port.split('-', 1)[0] in entry_name

            if contains_protocol or contains_port:
                if contains_protocol and not protocol_correct and contains_port and not port_correct:
                    text = f"Device Group {device_group}'s Service {entry_name} uses protocol {entry_protocol} and port {entry_port}"
                    badentries.append(
                        BadEntry(data=service_entry,
                                 text=text,
                                 device_group=device_group,
                                 entry_type='Services'))
                elif contains_protocol and not protocol_correct:
                    text = f"Device Group {device_group}'s Service {entry_name} uses protocol {entry_protocol}"
                    badentries.append(
                        BadEntry(data=service_entry,
                                 text=text,
                                 device_group=device_group,
                                 entry_type='Services'))
                elif contains_port and not port_correct:
                    text = f"Device Group {device_group}'s Service {entry_name} uses port {entry_port}"
                    badentries.append(
                        BadEntry(data=service_entry,
                                 text=text,
                                 device_group=device_group,
                                 entry_type='Services'))
    return badentries
def find_unqualified_fqdn(profilepackage):
    device_groups = profilepackage.device_groups
    devicegroup_objects = profilepackage.devicegroup_objects
    ignored_dns_prefixes = tuple([prefix.lower() for prefix in profilepackage.settings.get('Ignored DNS Prefixes','').split(',')])

    badentries = []

    logger.info("*" * 80)
    logger.info("Checking for FQDN entries that are hostnames and not FQDNs")

    bad_address_objects = set()
    for i, device_group in enumerate(device_groups):
        logger.info(f"({i + 1}/{len(device_groups)}) Checking {device_group}'s Addresses")
        for entry in devicegroup_objects[device_group]['Addresses']:
            entry_name = entry.get('name')
            for fqdn_node in entry.findall('fqdn'):
                fqdn_text = fqdn_node.text.lower()
                if any(fqdn_text.startswith(ignored_prefix) for ignored_prefix in ignored_dns_prefixes):
                    continue
                # FQDN lookups are slow, so only lookup entries that don't have anything resembling a TLD
                if '.' in fqdn_text:
                    continue
                fqdn = cached_fqdn_lookup(fqdn_text)
                if fqdn.lower() != fqdn_text.lower():
                    bad_address_objects.add(entry_name)
                    text = f"Device Group {device_group}'s address '{entry_name}' uses a hostname of '{fqdn_text}' instead of an FQDN of: '{fqdn}'"
                    badentries.append(
                        BadEntry(data=(entry, fqdn), text=text, device_group=device_group, entry_type='Addresses'))
    return badentries
def find_superseding_rules(profilepackage):
    device_groups = profilepackage.device_groups
    devicegroup_objects = profilepackage.devicegroup_objects
    device_group_hierarchy_parent = profilepackage.device_group_hierarchy_parent

    badentries = []

    logger.info("*" * 80)
    logger.info("Checking for Superseding rules")

    for i, device_group in enumerate(device_groups):
        logger.info(f"Checking Device group {device_group}")
        # As security rules are inherited from parent device groups, we'll need to check those too
        all_rules = get_all_rules_for_dg(device_group,
                                         device_group_hierarchy_parent,
                                         devicegroup_objects)
        transformed_rules = transform_rules(all_rules)
        superseding_rules = find_superseding(device_group, transformed_rules)

        # Report overlapping rules
        for prior_tuple, superseding_tuple in superseding_rules:
            prior_dg, prior_ruletype, prior_rule_name, prior_rule_entry = prior_tuple
            dg, ruletype, rule_name, rule_entry = superseding_tuple

            text = f"{prior_dg}'s {prior_ruletype} '{prior_rule_name}' is superseded by {dg}'s {ruletype} '{rule_name}'"
            logger.debug(text)
            badentries.append(
                BadEntry(data=(prior_tuple, superseding_tuple),
                         text=text,
                         device_group=device_group,
                         entry_type=None))

    return badentries
def fqdn_contains_ip(profilepackage):
    device_groups = profilepackage.device_groups
    pan_config = profilepackage.pan_config

    IP_REGEX = r"^((25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\.){3}(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])$"
    badentries = []

    logger.info("*" * 80)

    for i, device_group in enumerate(device_groups):
        logger.info(
            f"({i + 1}/{len(device_groups)}) Checking {device_group}'s Addresses"
        )
        for entry in pan_config.get_devicegroup_object('Addresses',
                                                       device_group):
            entry_name = entry.get('name')
            for fqdn_node in entry.findall('fqdn'):
                ip_in_fqdn = re.search(IP_REGEX, fqdn_node.text)
                if ip_in_fqdn is not None:
                    text = f"Device Group {device_group}'s address '{entry_name}' uses the following FQDN which appears to be an IP: '{fqdn_node.text}'"
                    badentries.append(
                        BadEntry(data=entry,
                                 text=text,
                                 device_group=device_group,
                                 entry_type='Addresses'))
    return badentries
def replace_addressgroup_contents(addressgroups_needing_replacement, address_to_replacement):
    badentries = []
    for object_dg, object_type, object_entry in addressgroups_needing_replacement:
        object_policy_dict = xml_object_to_dict(object_entry)['entry']
        new_addresses = []
        replacements_made = {}

        # If it's an addressgroup with only one member, it'll be parsed as a string, not a list
        if isinstance(object_policy_dict['static']['member'], str):
            member_to_replace = object_policy_dict['static']['member']
            replacements_made[member_to_replace] = address_to_replacement[member_to_replace]
            new_addresses.append(address_to_replacement[member_to_replace])
        else:
            for member in object_policy_dict['static']['member']:
                if member in new_addresses:
                    # Member is already present, nothing to do
                    continue
                elif member not in address_to_replacement:
                    # Member is not present and doesn't need to be replaced, so keep it as is:
                    new_addresses.append(member)
                elif member in address_to_replacement and address_to_replacement[member] not in new_addresses:
                    # Member needs to be replaced, and replacement is not already present, so add it:
                    new_addresses.append(address_to_replacement[member])
                    replacements_made[member] = address_to_replacement[member]
                else:
                    # Member needs to be replaced, but replacement is already present, so nothing to do:
                    continue
        assert object_policy_dict['static']['member'] != new_addresses

        object_policy_dict['static']['member'] = new_addresses
        text = f"Replace the following Address members in {object_dg}'s {object_type} {object_entry.get('name')}: {sorted([k + ' with ' + v for k, v in replacements_made.items()])}"
        badentries.append(BadEntry(data=[object_entry, object_policy_dict], text=text, device_group=object_dg, entry_type=object_type))
    return badentries
Ejemplo n.º 7
0
def find_local_similar_names(devicegroup_objects, device_group, object_type1, object_type2):
    """Finds objects in a single devicegroup which share a namespace and have names
    which are the same except for their case. For example, Address objects with similar names as other
    Address or Address Group objects in the same device group.
    """
    names_to_objects = collections.defaultdict(list)
    for obj_type in [object_type1, object_type2]:
        for local_obj in devicegroup_objects[device_group][obj_type]:
            local_obj_name = local_obj.get('name')
            names_to_objects[local_obj_name.lower()].append((device_group, obj_type, local_obj))

    badentries = []
    for _, dupes in names_to_objects.items():
        if len(dupes) == 1:
            continue
        obj1s = [entry[2].get('name') for entry in dupes if entry[1] == object_type1]
        obj1s_text = f"{object_type1}: {obj1s}"
        obj2s = [entry[2].get('name') for entry in dupes if entry[1] == object_type2]
        obj2s_text = f"{object_type2}: {obj2s}"

        if obj1s and obj2s:
            suffix_text = obj1s_text + " and " + obj2s_text
        elif obj1s:
            suffix_text = obj1s_text
        elif obj2s:
            suffix_text = obj2s_text

        text = f"Device Group {device_group} contains objects with similar names: {suffix_text}"
        badentries.append(BadEntry(data=dupes, text=text, device_group=device_group, entry_type=object_type1))
    return badentries
def find_bad_log_setting(profilepackage):
    mandated_log_profile = profilepackage.settings['Mandated Logging Profile']
    device_groups = profilepackage.device_groups
    pan_config = profilepackage.pan_config

    if not mandated_log_profile:
        return []

    badentries = []

    logger.info("*" * 80)
    logger.info("Checking for incorrect log settings")

    for i, device_group in enumerate(device_groups):
        for ruletype in ('SecurityPreRules', 'SecurityPostRules'):
            rules = pan_config.get_devicegroup_policy(ruletype, device_group)
            logger.info(f"({i+1}/{len(device_groups)}) Checking {device_group}'s {ruletype}")

            for entry in rules:
                rule_name = entry.get('name')
                # Disabled rules can be ignored
                if entry.find("./disabled") is not None and entry.find("./disabled").text == "yes":
                    continue

                log_setting_node = entry.find("./log-setting")

                if log_setting_node is not None:
                    log_setting = log_setting_node.text
                else:
                    log_setting = None

                if mandated_log_profile == 'default' and log_setting is None:
                    # 'default' has special treatment, in that if the 'default'
                    # profile exists, entries without a value will automatically
                    # use the 'default' log profile.
                    continue
                elif log_setting is None:
                    text = f"Device Group {device_group}'s {ruletype} '{rule_name}' doesn't use any log profile!"
                    logger.debug(text)
                    badentries.append(BadEntry(data=[entry, mandated_log_profile], text=text, device_group=device_group, entry_type=ruletype))
                elif log_setting != mandated_log_profile:
                    text = f"Device Group {device_group}'s {ruletype} '{rule_name}' doesn't use log profile '{mandated_log_profile}', instead it uses '{log_setting}'"
                    logger.debug(text)
                    badentries.append(BadEntry(data=[entry, mandated_log_profile], text=text, device_group=device_group, entry_type=ruletype))

    return badentries
def find_redundant_addresses(profilepackage):
    device_groups = profilepackage.device_groups
    pan_config = profilepackage.pan_config

    badentries = []

    logger.info("*" * 80)
    logger.info("Checking for redundant rule address members")

    for i, device_group in enumerate(device_groups):
        logger.info(f"Checking Device group {device_group}")
        # Build the list of all AddressGroups:
        object_type = 'AddressGroups'
        addressgroup_member_xpath = './static/member'
        addressgroups_to_underlying_addresses = build_group_member_mapping(
            pan_config, device_group, object_type, addressgroup_member_xpath)
        # Build mapping of entries to group names:
        # Equivalent groups are not an issue -> They can be deduped with a separate validator
        # However, this won't be able to detect nested groups replacements well. That can be a future enhancement.
        members_to_groupnames = {}
        for group_name, members in addressgroups_to_underlying_addresses.items(
        ):
            members_to_groupnames[tuple(sorted(members))] = group_name

        for ruletype in ('SecurityPreRules', 'SecurityPostRules'):
            for rule_entry in pan_config.get_devicegroup_policy(
                    ruletype, device_group):
                # Skip disabled rules:
                if rule_entry.find(
                        "./disabled") is not None and rule_entry.find(
                            "./disabled").text == "yes":
                    continue
                members_to_replace = {}
                for direction in ('source', 'destination'):
                    # Determine which entries are equivalent to Address Groups
                    address_like_members = tuple(
                        sorted([
                            elem.text for elem in rule_entry.findall(
                                f'./{direction}/member')
                        ]))
                    if address_like_members in members_to_groupnames:
                        groupname = members_to_groupnames[address_like_members]
                        members_to_replace[direction] = groupname
                if members_to_replace:
                    rule_name = rule_entry.get('name')
                    text = f"Device Group {device_group}'s {ruletype} '{rule_name}' "
                    direction_strings = []
                    for direction, groupname in members_to_replace.items():
                        direction_string = f"{direction} addresses can be replaced with '{groupname}'"
                        direction_strings += [direction_string]
                    text += " and ".join(direction_strings)
                    badentries.append(
                        BadEntry(data=(ruletype, rule_entry,
                                       members_to_replace),
                                 text=text,
                                 device_group=device_group,
                                 entry_type='Address'))
    return badentries
def find_redundant_services(profilepackage):
    device_groups = profilepackage.device_groups
    pan_config = profilepackage.pan_config

    badentries = []

    logger.info("*" * 80)
    logger.info("Checking for redundant rule members")

    for i, device_group in enumerate(device_groups):
        logger.info(f"Checking Device group {device_group}")
        # Build the list of all ServiceGroups:
        object_type = 'ServiceGroups'
        service_member_xpath = './members/member'
        servicegroups_to_underlying_services = build_group_member_mapping(
            pan_config, device_group, object_type, service_member_xpath)

        for ruletype in ('SecurityPreRules', 'SecurityPostRules'):
            for rule_entry in pan_config.get_devicegroup_policy(
                    ruletype, device_group):
                # Skip disabled rules:
                if rule_entry.find(
                        "./disabled") is not None and rule_entry.find(
                            "./disabled").text == "yes":
                    continue
                members_to_remove = []
                # Determine which entries are Service Groups
                service_members = [
                    elem.text
                    for elem in rule_entry.findall('./service/member')
                ]
                servicegroups_in_use = []
                for service_like_member in service_members:
                    if service_like_member in servicegroups_to_underlying_services:
                        servicegroups_in_use += [service_like_member]
                # See which address objects are contained within the rule's other addressgroup objects:
                for service_like_member in service_members:
                    for sg in servicegroups_in_use:
                        if service_like_member in servicegroups_to_underlying_services[
                                sg]:
                            members_to_remove += [(service_like_member, sg)]

                if members_to_remove:
                    rule_name = rule_entry.get('name')
                    entries_string = ", ".join([
                        f"'{redundant_entry}' is in '{containing_entry}'" for
                        redundant_entry, containing_entry in members_to_remove
                    ])
                    text = f"Device Group {device_group}'s {ruletype} '{rule_name}'\'s services list contains redundant members: {entries_string}"
                    badentries.append(
                        BadEntry(data=(ruletype, rule_entry,
                                       members_to_remove),
                                 text=text,
                                 device_group=device_group,
                                 entry_type='Address'))
    return badentries
def find_unused_service_like_object(profilepackage, object_type,
                                    object_friendly_type):
    device_groups = profilepackage.device_groups
    devicegroup_objects = profilepackage.devicegroup_objects
    pan_config = profilepackage.pan_config

    rule_limit_enabled = profilepackage.rule_limit_enabled

    if rule_limit_enabled:
        return []

    badentries = []

    logger.info("*" * 80)
    logger.info(f"Checking for unused {object_friendly_type} objects")

    for i, device_group in enumerate(device_groups):
        logger.info(
            f"({i + 1}/{len(device_groups)}) Checking {device_group}'s {object_friendly_type} objects"
        )
        services = {
            entry.get('name'): entry
            for entry in devicegroup_objects[device_group][object_type]
        }

        # A Services object can be used by any child device group's Services Group or Policy. Need to check all of them.
        services_in_use = set()
        for child_dg in devicegroup_objects[device_group][
                'all_child_device_groups']:
            # First check all child Services Groups
            for servicegroup in devicegroup_objects[child_dg]['ServiceGroups']:
                for member_element in servicegroup.findall('./members/member'):
                    services_in_use.add(member_element.text)
            # Then check all of the policies
            for policytype in pan_config.SUPPORTED_POLICY_TYPES:
                for policy_entry in devicegroup_objects[child_dg][policytype]:
                    if policytype in ("NATPreRules", "NATPostRules"):
                        for service_element in policy_entry.findall(
                                './service'):
                            services_in_use.add(service_element.text)
                    else:
                        for service_child_element in policy_entry.findall(
                                './service/'):
                            services_in_use.add(service_child_element.text)

        unused_services = sorted(set(services.keys()) - services_in_use)
        for unused_service in unused_services:
            text = f"Device Group {device_group}'s {object_friendly_type} {unused_service} is not in use for any Policies or Service Groups"
            badentries.append(
                BadEntry(data=[services[unused_service]],
                         text=text,
                         device_group=device_group,
                         entry_type=object_type))

    return badentries
def find_redundant_members(profilepackage):
    device_groups = profilepackage.device_groups
    pan_config = profilepackage.pan_config

    badentries = []

    logger.info("*" * 80)
    logger.info("Checking for redundant rule members")

    for i, device_group in enumerate(device_groups):
        logger.info(f"Checking Device group {device_group}")
        # Build the list of all ServiceGroups:
        object_type = 'ServiceGroups'
        service_member_xpath = './members/member'
        servicegroups_to_underlying_services = build_group_member_mapping(
            pan_config, device_group, object_type, service_member_xpath)
        # Build mapping of entries to group names:
        # Equivalent groups are not an issue -> They can be deduped with a separate validator
        # However, this won't be able to detect nested groups replacements well. That can be a future enhancement.
        members_to_groupnames = {}
        for group_name, members in servicegroups_to_underlying_services.items(
        ):
            members_to_groupnames[tuple(sorted(members))] = group_name

        for ruletype in ('SecurityPreRules', 'SecurityPostRules'):
            for rule_entry in pan_config.get_devicegroup_policy(
                    ruletype, device_group):
                # Skip disabled rules:
                if rule_entry.find(
                        "./disabled") is not None and rule_entry.find(
                            "./disabled").text == "yes":
                    continue

                # Obtain the list of members, then normalize them so we can check for inclusion:
                service_members = tuple(
                    sorted([
                        elem.text
                        for elem in rule_entry.findall('./service/member')
                    ]))
                # Check if the normalized members are already present as a ServiceGroup
                if service_members in members_to_groupnames:
                    groupname = members_to_groupnames[service_members]
                    rule_name = rule_entry.get('name')
                    text = f"Device Group {device_group}'s {ruletype} '{rule_name}' Services can be replaced with ServiceGroup: {groupname}"
                    badentries.append(
                        BadEntry(data=(ruletype, rule_entry, groupname),
                                 text=text,
                                 device_group=device_group,
                                 entry_type='Address'))
    return badentries
Ejemplo n.º 13
0
def find_unused_security_profile_groups(profilepackage, object_type,
                                        object_friendly_type):
    device_groups = profilepackage.device_groups
    pan_config = profilepackage.pan_config
    devicegroup_objects = profilepackage.devicegroup_objects
    rule_limit_enabled = profilepackage.rule_limit_enabled

    if rule_limit_enabled:
        return []

    badentries = []

    logger.info("*" * 80)
    logger.info(f"Checking for unused {object_friendly_type} objects")

    for i, device_group in enumerate(device_groups):
        logger.info(
            f"({i + 1}/{len(device_groups)}) Checking {device_group}'s {object_friendly_type} objects"
        )
        groups = {
            entry.get('name'): entry
            for entry in pan_config.get_devicegroup_object(
                object_type, device_group)
        }
        if not groups:
            continue
        # A Security Profile Group object can be used by any child device group's Security Policy. Need to check all of them.
        groups_in_use = set()
        for child_dg in devicegroup_objects[device_group][
                'all_child_device_groups']:
            for policytype in ["SecurityPreRules", "SecurityPostRules"]:
                security_rules = pan_config.get_devicegroup_policy(
                    policytype, child_dg)
                for policy_entry in security_rules:
                    for service_child_element in policy_entry.findall(
                            'profile-setting/group/member'):
                        groups_in_use.add(service_child_element.text)

        unused_groups = sorted(set(groups.keys()) - groups_in_use)
        for unused_group in unused_groups:
            text = f"Device Group {device_group}'s {object_friendly_type} {unused_group} is not used by any Security Policies"
            badentries.append(
                BadEntry(data=[groups[unused_group]],
                         text=text,
                         device_group=device_group,
                         entry_type=object_type))
    return badentries
def find_unconventional_services(profilepackage):
    device_groups = profilepackage.device_groups
    pan_config = profilepackage.pan_config

    service_name_format = profilepackage.settings.get('service name format')
    if not service_name_format:
        return []

    badentries = []

    logger.info("*"*80)
    logger.info("Checking for misleading Service objects")

    PROTOCOL_TYPES = ('tcp', 'udp')
    for i, device_group in enumerate(device_groups):
        logger.info(f"({i+1}/{len(device_groups)}) Checking {device_group}'s Service objects")
        for service_entry in pan_config.get_devicegroup_object('Services', device_group):
            # For simplicity, convert the XML object to a dict:
            service_dict = xml_object_to_dict(service_entry)
            service_name = service_dict['entry']['@name']
            for protocol_type in PROTOCOL_TYPES:
                if protocol_type in service_dict['entry']['protocol'].keys():
                    entry_protocol = protocol_type
                    break
            else:
                # This should not be possible!
                continue
            # Values retrieved are <transport>, <source-port>, <port>, <override>
            service_fields = {}
            service_fields['transport'] = protocol_type
            service_fields['source_port'] = service_dict['entry']['protocol'][entry_protocol].get('source-port', '')
            service_fields['port'] = service_dict['entry']['protocol'][entry_protocol].get('port', '')
            override = service_dict['entry']['protocol'][entry_protocol].get('override')
            if override:
                service_fields['override'] = tuple(override.keys())[0]
            else:
                service_fields['override'] = ''

            calculated_name = service_name_format.format(**service_fields)

            if service_name != calculated_name:
                text = f"Device Group {device_group}'s Service {service_name} should instead be named {calculated_name}"
                badentries.append(BadEntry(data=[service_entry, calculated_name], text=text, device_group=device_group, entry_type='Services'))
    return badentries
Ejemplo n.º 15
0
def find_disabled_policies(profilepackage):
    devicegroup_objects = profilepackage.devicegroup_objects
    pan_config = profilepackage.pan_config
    ignored_disabled_rules = set(profilepackage.settings.get('Ignored Disabled Policies', "").split(','))

    policies_to_delete = []
    for i, device_group in enumerate(devicegroup_objects):
        for policy_type in pan_config.SUPPORTED_POLICY_TYPES:
            policies = devicegroup_objects[device_group][policy_type]
            for policy_entry in policies:
                disabled = (policy_entry.find('disabled') is not None and policy_entry.find('disabled').text == 'yes')
                if disabled:
                    policy_name = policy_entry.get('name')
                    if policy_name in ignored_disabled_rules:
                        continue
                    text = f"Device Group {device_group}'s {policy_type} \"{policy_name}\" is disabled"
                    policy_to_delete = BadEntry(data=[policy_entry], text=text, device_group=device_group, entry_type=policy_type)
                    policies_to_delete.append(policy_to_delete)

    return policies_to_delete
Ejemplo n.º 16
0
def find_IPandFQDN(profilepackage):
    device_groups = profilepackage.device_groups
    pan_config = profilepackage.pan_config

    badentries = []

    logger.info("*" * 80)

    for i, device_group in enumerate(device_groups):
        fqdns = []
        ips = collections.defaultdict(list)
        ips_fqdns_resolve_to = collections.Counter()
        logger.info(f"({i + 1}/{len(device_groups)}) Checking {device_group}'s Addresses")
        for entry in pan_config.get_devicegroup_object('Addresses', device_group):
            entry_name = entry.get('name')
            entry_dict = xml_object_to_dict(entry)
            # If it's a single IP, store it in the dictionary of IPs:
            # Only handle IPv4 for now:
            if 'ip-netmask' in entry_dict['entry'] and '.' in entry_dict['entry']['ip-netmask']:
                if '/' not in entry_dict['entry']['ip-netmask'] or '/32' in entry_dict['entry']['ip-netmask']:
                    ipnetmask_value = entry_dict['entry']['ip-netmask']
                    ip = ipnetmask_value.split('/', 1)[0]
                    ips[ip].append((entry_name, ipnetmask_value, entry))
            # Add FQDNs to the list of FQDNs:
            elif 'fqdn' in entry_dict['entry']:
                fqdn = entry_dict['entry']['fqdn']
                _, _, ipaddrlist = cached_dns_ex_lookup(fqdn)
                for ip in ipaddrlist:
                    fqdns.append((entry_name, fqdn, ip))
                    ips_fqdns_resolve_to[ip] += 1

        # Now that we have the data, we're ready to review the fqdns for what's present in the IPs:
        for fqdn_name, fqdn, ip in fqdns:
            # Skip IPs that have multiple FQDNs on the firewall resolve to them, because it's ambiguous which fqdn to use
            if ip in ips and ips_fqdns_resolve_to[ip] == 1:
                for address_name, ipnetmask_value, address_entry in ips[ip]:
                    text = f"Device Group {device_group}'s address {address_name} with IP {ipnetmask_value} can be replaced with an fqdn of {fqdn}"
                    badentries.append(BadEntry(data=(address_entry, fqdn), text=text, device_group=device_group, entry_type='Addresses'))
    return badentries
def replace_policy_contents(policies_needing_replacement, address_to_replacement):
    badentries = []
    for policy_dg, policy_type, policy_entry in policies_needing_replacement:
        object_policy_dict = xml_object_to_dict(policy_entry)['entry']
        replacements_made = {}
        for direction in ('source', 'destination'):
            object_policy_dict[direction]['member'], replacements_made = replace_member_contents(object_policy_dict[direction]['member'], address_to_replacement, replacements_made)

        # Extra places to check for NAT objects:
        if policy_type in ("NATPreRules", "NATPostRules"):
            for translation in ('source-translation', 'destination-translation'):
                if translation not in object_policy_dict:
                    continue
                if object_policy_dict[translation].get('translated-address'):
                    object_policy_dict[translation]['translated-address'], replacements_made = replace_member_contents(object_policy_dict[translation]['translated-address'], address_to_replacement, replacements_made)
                if object_policy_dict[translation].get('dynamic-ip-and-port', {}).get('translated-address', {}).get('member'):
                    object_policy_dict[translation]['dynamic-ip-and-port']['translated-address']['member'], replacements_made = replace_member_contents(object_policy_dict[translation]['dynamic-ip-and-port']['translated-address']['member'], address_to_replacement, replacements_made)
                if object_policy_dict[translation].get('static-ip', {}).get('translated-address', {}).get('member'):
                    object_policy_dict[translation]['static-ip']['translated-address']['member'], replacements_made = replace_member_contents(object_policy_dict[translation]['static-ip']['translated-address']['member'], address_to_replacement, replacements_made)
        text = f"Replace the following Address members in {policy_dg}'s {policy_type} {policy_entry.get('name')}: {sorted([k + ' with ' + v for k, v in replacements_made.items()])}"
        badentries.append(BadEntry(data=[policy_entry, object_policy_dict], text=text, device_group=policy_dg, entry_type=policy_type))
    return badentries
def find_badhostname(profilepackage):
    device_groups = profilepackage.device_groups
    devicegroup_objects = profilepackage.devicegroup_objects
    ignored_dns_prefixes = tuple([
        prefix.lower() for prefix in profilepackage.settings.get(
            'Ignored DNS Prefixes', '').split(',')
    ])

    badentries = []

    logger.info("*" * 80)
    logger.info("Checking for non-resolving hostnames")

    bad_address_objects = set()
    for i, device_group in enumerate(device_groups):
        logger.info(
            f"({i + 1}/{len(device_groups)}) Checking {device_group}'s Addresses"
        )
        for entry in devicegroup_objects[device_group]['Addresses']:
            entry_name = entry.get('name')
            for fqdn_node in entry.findall('fqdn'):
                fqdn_text = fqdn_node.text.lower()
                if any(
                        fqdn_text.startswith(ignored_prefix)
                        for ignored_prefix in ignored_dns_prefixes):
                    continue
                ip = cached_dns_lookup(fqdn_text)
                if ip is None:
                    bad_address_objects.add(entry_name)
                    text = f"Device Group {device_group}'s address '{entry_name}' uses the following FQDN which doesn't resolve: '{fqdn_text}'"
                    badentries.append(
                        BadEntry(data=entry,
                                 text=text,
                                 device_group=device_group,
                                 entry_type='Addresses'))
    return badentries
def find_unconventional_addresses(profilepackage):
    device_groups = profilepackage.device_groups
    pan_config = profilepackage.pan_config

    fqdn_name_format = profilepackage.settings.get('fqdn name format')
    range_name_format = profilepackage.settings.get('range name format')
    wildcard_name_format = profilepackage.settings.get('wildcard name format')
    host_name_format = profilepackage.settings.get('host name format')
    net_name_format = profilepackage.settings.get('net name format')
    colon_replacement = profilepackage.settings.get('ipv6 colon replacement char')
    if not fqdn_name_format or not host_name_format or not net_name_format or not range_name_format or not wildcard_name_format:
        return []

    badentries = []

    logger.info("*"*80)
    logger.info("Checking for misleading Address objects")

    ADDRESS_TYPES = ('fqdn', 'ip-netmask', 'ip-range', 'ip-wildcard')
    for i, device_group in enumerate(device_groups):
        logger.info(f"({i+1}/{len(device_groups)}) Checking {device_group}'s Address objects")
        for address_entry in pan_config.get_devicegroup_object('Addresses', device_group):
            # For simplicity, convert the XML object to a dict:
            address_dict = xml_object_to_dict(address_entry)
            address_name = address_dict['entry']['@name']

            for address_t in ADDRESS_TYPES:
                if address_t in address_dict['entry'].keys():
                    address_type = address_t
                    break
            else:
                # This should not be possible!
                continue

            address_fields = {}
            if address_type == 'fqdn':
                address_fields['fqdn'] = address_dict['entry']['fqdn']
                calculated_name = fqdn_name_format.format(**address_fields)
            elif address_type == 'ip-range':
                address_fields['range'] = address_dict['entry']['ip-range']
                calculated_name = range_name_format.format(**address_fields)
            elif address_type == 'ip-wildcard':
                address_fields['mask'] = address_dict['entry']['ip-wildcard']
                calculated_name = wildcard_name_format.format(**address_fields)
            elif address_type == 'ip-netmask':
                address_fields['host'] = address_dict['entry']['ip-netmask'].split('/', 1)[0]
                if colon_replacement and ':' in address_fields['host']:
                    address_fields['host'] = address_fields['host'].replace(':', colon_replacement)
                if '/' in address_dict['entry']['ip-netmask']:
                    address_fields['network'] = address_dict['entry']['ip-netmask'].split('/', 1)[1]
                else:
                    address_fields['network'] = ''

                # We'll use the host name pattern for /32's or entries without a netmask:
                is_host = '/' not in address_dict['entry']['ip-netmask'] or ('.' in address_dict['entry']['ip-netmask'] and '/32' in address_dict['entry']['ip-netmask']) or (':' in address_dict['entry']['ip-netmask'] and '/128' in address_dict['entry']['ip-netmask'])
                if is_host:
                    calculated_name = host_name_format.format(**address_fields)
                else:
                    calculated_name = net_name_format.format(**address_fields)

            # PA supports a max char length of 63:
            calculated_name = calculated_name[:63]
            if address_name != calculated_name:
                text = f"Device Group {device_group}'s Address {address_name} should instead be named {calculated_name}"
                badentries.append(BadEntry(data=[address_entry, calculated_name], text=text, device_group=device_group, entry_type='Addresses'))
    return badentries
def find_missing_zones(profilepackage):
    device_groups = profilepackage.device_groups
    devicegroup_objects = profilepackage.devicegroup_objects
    devicegroup_exclusive_objects = profilepackage.devicegroup_exclusive_objects
    device_group_hierarchy_parent = profilepackage.device_group_hierarchy_parent
    api_key = profilepackage.api_key
    no_api = profilepackage.no_api

    if no_api:
        return []

    badentries = []
    logger.info ("*"*80)
    logger.info ("Checking for Missing Zones")
    for i, device_group in enumerate(device_groups):
        firewalls = devicegroup_objects[device_group]['all_active_child_firewalls']

        addresses = {entry.get('name'):entry for entry in devicegroup_objects[device_group]['Addresses']}
        address_groups = {entry.get('name'):entry for entry in devicegroup_objects[device_group]['AddressGroups']}

        # Address and Address Group objects can be inherited from parent device groups, so we need data from them too
        parent_dgs = []
        current_dg = device_group_hierarchy_parent.get(device_group)
        while current_dg:
            parent_dgs.append(current_dg)
            current_dg = device_group_hierarchy_parent.get(current_dg)

        for parent_dg in parent_dgs:
            for address_entry in devicegroup_objects[parent_dg]['Addresses']:
                addresses[address_entry.get('name')] = address_entry
            for address_group_entry in devicegroup_objects[parent_dg]['AddressGroups']:
                address_group_members = [elem.text for elem in address_group_entry.findall('./static/member')]
                address_groups[address_group_entry.get('name')] = address_group_members

        for ruletype in ('SecurityPreRules', 'SecurityPostRules'):
            rules = devicegroup_exclusive_objects[device_group][ruletype]
            logger.info (f"({i+1}/{len(device_groups)}) Checking {device_group}'s {ruletype}")

            total_entries = len(rules)
            for j, entry in enumerate(rules):
                logger.info (f'({j+1}/{total_entries}) {entry.get("name")}')
                # Disabled rules can be ignored
                if entry.find("./disabled") is not None and entry.find("./disabled").text == "yes":
                    continue

                rule_name = entry.get('name')
                src_zones = sorted([elem.text for elem in entry.findall('./from/member')])
                src_members = sorted([elem.text for elem in entry.findall('./source/member')])
                dest_zones = sorted([elem.text for elem in entry.findall('./to/member')])
                dest_members = sorted([elem.text for elem in entry.findall('./destination/member')])

                # Analyze each rule for missing zones
                for members, zones, zonetype in [(src_members, src_zones, 'Source'), (dest_members, dest_zones, 'Dest')]:
                    # If the rules allow 'any' zone, it'll work (although it's quite ugly)
                    if 'any' in zones:
                        continue
                    calculated_zones_to_members = collections.defaultdict(list)
                    for member in members:
                        for firewall in firewalls:
                            member_zones, _ = get_zone_for_source_member(firewall, api_key, member, address_groups, addresses, None)
                            # If out calculated zones are missing a zone, that's fine - because that will
                            # be a false negative, not a false positive
                            for member_zone in member_zones:
                                calculated_zones_to_members[member_zone].append(member)
                    # Determine which zones were calculated to be needed, but aren't present:
                    missing_zones = sorted(set(calculated_zones_to_members) - set(zones))

                    if missing_zones:
                        missing_template = "Members {members} require {zonetype} zone '{zone}'."
                        missing_text = " ".join([missing_template.format(zone=zone, members=sorted(set(calculated_zones_to_members[zone])), zonetype=zonetype) for zone in missing_zones])
                        text = f"Device Group '{device_group}'s {ruletype} '{rule_name}' uses {zonetype} zones {zones}. " + missing_text
                        logger.info (text)
                        badentries.append(BadEntry(data=entry, text=text, device_group=device_group, entry_type=ruletype))
    return badentries
def find_extra_rules(profilepackage):
    device_groups = profilepackage.device_groups
    devicegroup_objects = profilepackage.devicegroup_objects
    devicegroup_exclusive_objects = profilepackage.devicegroup_exclusive_objects
    api_key = profilepackage.api_key
    device_group_hierarchy_parent = profilepackage.device_group_hierarchy_parent
    no_api = profilepackage.no_api

    if no_api:
        return []

    badentries = []
    logger.info ("*"*80)
    logger.info ("Checking for Extra rules")
    for i, device_group in enumerate(device_groups):
        firewalls = devicegroup_objects[device_group]['all_active_child_firewalls']

        addresses = {entry.get('name'):entry for entry in devicegroup_objects[device_group]['Addresses']}
        address_groups = {entry.get('name'):entry for entry in devicegroup_objects[device_group]['AddressGroups']}

        # Address and Address Group objects can be inherited from parent device groups, so we need data from them too
        parent_dgs = []
        current_dg = device_group_hierarchy_parent.get(device_group)
        while current_dg:
            parent_dgs.append(current_dg)
            current_dg = device_group_hierarchy_parent.get(current_dg)

        for parent_dg in parent_dgs:
            for address_entry in devicegroup_objects[parent_dg]['Addresses']:
                addresses[address_entry.get('name')] = address_entry
            for address_group_entry in devicegroup_objects[parent_dg]['AddressGroups']:
                address_group_members = [elem.text for elem in address_group_entry.findall('./static/member')]
                address_groups[address_group_entry.get('name')] = address_group_members

        for ruletype in ('SecurityPreRules', 'SecurityPostRules'):
            rules = devicegroup_exclusive_objects[device_group][ruletype]
            logger.info (f"({i+1}/{len(device_groups)}) Checking {device_group}'s {ruletype}")

            total_entries = len(rules)
            for j, entry in enumerate(rules):
                logger.info (f'({j+1}/{total_entries}) {entry.get("name")}')
                # Disabled rules can be ignored
                if entry.find("./disabled") is not None and entry.find("./disabled").text == "yes":
                    continue

                rule_name = entry.get('name')
                src_members = sorted([elem.text for elem in entry.findall('./source/member')])
                dest_members = sorted([elem.text for elem in entry.findall('./destination/member')])

                # Analyze the rules for rules where if there is one source/dest zone and both are the same,
                # so the rule isn't needed.
                # Note: Members can be an Address or Address Group Looking up the zones for
                # every single IP does not scale - there could be many /16's However, grabbing only a single IP
                # address from each member can result in false positives if a single Address object contains a large
                # enough subnet that it spans multiple zones. To avoid false positives, we'll only report an issue if
                # all of a policy's members are resolvable to a single IP.

                missing_ips = False
                ips = {'Source': [], 'Dest': []}
                for members, zonetype in [(src_members, 'Source'), (dest_members, 'Dest')]:
                    for member_name in members:
                        if member_name == 'any':
                            missing_ips = True
                            break

                        if member_name in address_groups:
                            all_contained_address_names = get_underlying_address_objects(member_name, address_groups, addresses)
                        elif member_name in addresses:
                            all_contained_address_names = [member_name]

                        for address_name in all_contained_address_names:
                            # Only look up IPs if there is a single entry in the Address Object.
                            address_entry = addresses[address_name]
                            address_dict = xml_object_to_dict(address_entry)['entry']
                            if "fqdn" in address_dict or ("ip-netmask" in address_dict and ("/" not in address_dict['ip-netmask'] or "/32" in address_dict['ip-netmask'])):
                                ip = get_single_ip_from_address(address_entry)
                                if ip:
                                    ips[zonetype] += [ip]
                            else:
                                missing_ips = True
                                break
                        if missing_ips:
                            break
                    if missing_ips:
                        break
                if missing_ips:
                    continue

                calculated_src_zones = set()
                for firewall in firewalls:
                    for ip in ips['Source']:
                        try:
                            zone = get_firewall_zone(firewall, api_key, ip)
                            calculated_src_zones.add(zone)
                        except:
                            pass

                calculated_dest_zones = set()
                for firewall in firewalls:
                    for ip in ips['Dest']:
                        try:
                            zone = get_firewall_zone(firewall, api_key, ip)
                            calculated_dest_zones.add(zone)
                        except:
                            pass

                if len(calculated_src_zones) == 1 and calculated_src_zones == calculated_dest_zones:
                    text = f"Device Group '{device_group}'s {ruletype} '{rule_name}' was calculated to only need the same source and dest zone of '{list(calculated_dest_zones)[0]}'."
                    logger.info (text)
                    badentries.append( BadEntry(data=entry, text=text, device_group=device_group, entry_type=ruletype) )
    return badentries
def find_extra_zones(profilepackage):
    device_groups = profilepackage.device_groups
    devicegroup_objects = profilepackage.devicegroup_objects
    devicegroup_exclusive_objects = profilepackage.devicegroup_exclusive_objects
    api_key = profilepackage.api_key
    device_group_hierarchy_parent = profilepackage.device_group_hierarchy_parent
    no_api = profilepackage.no_api

    if no_api:
        return []

    badentries = []
    logger.info ("*"*80)
    logger.info ("Checking for Extra Zones")
    for i, device_group in enumerate(device_groups):
        firewalls = devicegroup_objects[device_group]['all_active_child_firewalls']

        addresses = {entry.get('name'):entry for entry in devicegroup_objects[device_group]['Addresses']}
        address_groups = {entry.get('name'):entry for entry in devicegroup_objects[device_group]['AddressGroups']}

        # Address and Address Group objects can be inherited from parent device groups, so we need data from them too
        parent_dgs = []
        current_dg = device_group_hierarchy_parent.get(device_group)
        while current_dg:
            parent_dgs.append(current_dg)
            current_dg = device_group_hierarchy_parent.get(current_dg)

        for parent_dg in parent_dgs:
            for address_entry in devicegroup_objects[parent_dg]['Addresses']:
                addresses[address_entry.get('name')] = address_entry
            for address_group_entry in devicegroup_objects[parent_dg]['AddressGroups']:
                address_group_members = [elem.text for elem in address_group_entry.findall('./static/member')]
                address_groups[address_group_entry.get('name')] = address_group_members

        for ruletype in ('SecurityPreRules', 'SecurityPostRules'):
            rules = devicegroup_exclusive_objects[device_group][ruletype]
            logger.info (f"({i+1}/{len(device_groups)}) Checking {device_group}'s {ruletype}")

            total_entries = len(rules)
            for j, entry in enumerate(rules):
                logger.info (f'({j+1}/{total_entries}) {entry.get("name")}')
                # Disabled rules can be ignored
                if entry.find("./disabled") is not None and entry.find("./disabled").text == "yes":
                    continue

                rule_name = entry.get('name')
                src_zones = sorted([elem.text for elem in entry.findall('./from/member')])
                src_members = sorted([elem.text for elem in entry.findall('./source/member')])
                dest_zones = sorted([elem.text for elem in entry.findall('./to/member')])
                dest_members = sorted([elem.text for elem in entry.findall('./destination/member')])

                # Analyze each rule for extra zones
                for members, zones, zonetype in [(src_members, src_zones, 'Source'), (dest_members, dest_zones, 'Dest')]:
                    # If the rule allows 'any' source address, it's a zone-based rule, not an address-based one
                    if 'any' in members:
                        continue
                    calculated_zones_to_members = collections.defaultdict(list)
                    missing_any = False
                    for member in members:
                        for firewall in firewalls:
                            member_zones, is_complete = get_zone_for_source_member(firewall, api_key, member, address_groups, addresses, None)
                            # If we can't calculate the complete set of expected zones, we can't determine if a zone is extra
                            if not is_complete:
                                missing_any = True
                                break
                            for member_zone in member_zones:
                                calculated_zones_to_members[member_zone].append(member)
                        if missing_any:
                            break
                    # We need a complete list of calculated zones to determine if
                    # any zones present are extra
                    if missing_any:
                        continue

                    extra_zones = sorted(set(zones) - set(calculated_zones_to_members))
                    if extra_zones:
                        text = f"Device Group '{device_group}'s {ruletype} '{rule_name}' uses {zonetype} zones {zones}. The {zonetype} zones should be {sorted(calculated_zones_to_members)}. The following {zonetype} zones are not needed: {extra_zones}"
                        logger.info (text)
                        badentries.append( BadEntry(data=entry, text=text, device_group=device_group, entry_type=ruletype) )
    return badentries
Ejemplo n.º 23
0
def find_misleading_addresses(profilepackage):
    device_groups = profilepackage.device_groups
    devicegroup_objects = profilepackage.devicegroup_objects

    # NOTE: IP Wildcards not supported yet
    ADDRESS_TYPES = ('ip-netmask', 'ip-range', 'fqdn')
    IP_REGEX = r"((25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\.){3}(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])"
    badentries = []

    logger.info("*" * 80)
    logger.info("Checking for misleading Address objects")

    for i, device_group in enumerate(device_groups):
        logger.info(
            f"({i+1}/{len(device_groups)}) Checking {device_group}'s Address objects"
        )
        for address_entry in devicegroup_objects[device_group]['Addresses']:
            # For simplicity, convert the XML object to a dict:
            address_dict = xml_object_to_dict(address_entry)
            entry_name = address_dict['entry']['@name']
            for address_type in ADDRESS_TYPES:
                if address_type in address_dict['entry'].keys():
                    entry_type = address_type
                    break
            else:
                # Wildcards are unsupported, and so skipped
                continue

            entry_value = address_dict['entry'][entry_type]

            # The exact strategy will depend on the content type
            # For FQDNs, the domain should be present in the name
            if entry_type == 'fqdn':
                if entry_value.lower().split('.',
                                             1)[0] not in entry_name.lower():
                    text = f"Device Group {device_group}'s Address {entry_name} has a misleading value of {entry_value}, because the FQDN's domain is not present in the name"
                    badentries.append(
                        BadEntry(data=address_entry,
                                 text=text,
                                 device_group=device_group,
                                 entry_type='Addresses'))
            # For IPs, the IP should be present in the name, if the name 'looks' like it contains an IP (based on regex):
            elif entry_type == 'ip-netmask':
                # This can optionally include a '/'
                ip_address = entry_value.split('/', 1)[0]
                if ip_address not in entry_name and re.search(
                        IP_REGEX, entry_name) is not None:
                    text = f"Device Group {device_group}'s Address {entry_name} appears to contain an IP address in the name, but has a different value of {entry_value}"
                    badentries.append(
                        BadEntry(data=address_entry,
                                 text=text,
                                 device_group=device_group,
                                 entry_type='Addresses'))
            elif entry_type == 'ip-range':
                # This can optionally include a '-'
                ip_address = entry_value.split('-', 1)[0]
                if ip_address not in entry_name and re.search(
                        IP_REGEX, entry_name) is not None:
                    text = f"Device Group {device_group}'s Address {entry_name} appears to contain an IP address in the name, but has a different value of {entry_value}"
                    badentries.append(
                        BadEntry(data=address_entry,
                                 text=text,
                                 device_group=device_group,
                                 entry_type='Addresses'))
    return badentries
Ejemplo n.º 24
0
def consolidate_service_like_objects(profilepackage, object_type,
                                     object_friendly_type, validator_function):
    pan_config = profilepackage.pan_config
    devicegroup_objects = profilepackage.devicegroup_objects

    logger.info("*" * 80)
    logger.info(
        f"Checking for unused {object_friendly_type} objects to consolidate")

    # Objects will only be consolidated at the same device group level, to avoid potential scope issues
    equivalent_objects = validator_function(profilepackage)
    dg_to_objects_to_consolidate = find_objects_needing_consolidation(
        equivalent_objects)

    if not dg_to_objects_to_consolidate:
        logger.info(f"There were no {object_friendly_type} to consolidate")
        return dg_to_objects_to_consolidate

    badentries = []
    for device_group, objects_to_consolidate in dg_to_objects_to_consolidate.items(
    ):
        # Determine which object is most commonly-used to minimize the amount of changes that will be needed
        service_to_replacement = find_replacement_objects(
            pan_config, devicegroup_objects, device_group,
            objects_to_consolidate)
        # Get the list of objects that will need to be updated:
        servicegroups_needing_replacement, policies_needing_replacement = find_objects_policies_needing_replacement(
            pan_config, devicegroup_objects, device_group,
            service_to_replacement)

        # Now that we know which objects need replacements, we can iterate through
        # and make those replacements!
        # First replace the contents of servicegroups
        for object_dg, object_type, object_entry in servicegroups_needing_replacement:
            object_policy_dict = xml_object_to_dict(object_entry)['entry']
            new_services = []
            replacements_made = {}
            for member in object_policy_dict['members']['member']:
                if member in new_services:
                    # Member is already present, nothing to do
                    continue
                elif member not in service_to_replacement:
                    # Member is not present and doesn't need to be replaced, so keep it as is:
                    new_services.append(member)
                elif member in service_to_replacement and service_to_replacement[
                        member] not in new_services:
                    # Member needs to be replaced, and replacement is not already present, so add it:
                    new_services.append(service_to_replacement[member])
                    replacements_made[member] = service_to_replacement[member]
                else:
                    # Member needs to be replaced, but replacement is already present, so nothing to do:
                    continue
            assert object_policy_dict['members']['member'] != new_services
            object_policy_dict['members']['member'] = new_services
            text = f"Replace the following Service members in {object_dg}'s {object_type} {object_entry.get('name')}: {sorted([k + ' with ' + v for k, v in replacements_made.items()])}"
            badentries.append(
                BadEntry(data=[object_entry, object_policy_dict],
                         text=text,
                         device_group=object_dg,
                         entry_type=object_type))

        # Then replace the contents of policies
        for policy_dg, policy_type, policy_entry in policies_needing_replacement:
            object_policy_dict = xml_object_to_dict(policy_entry)['entry']
            replacements_made = {}
            if policy_type in ("NATPreRules", "NATPostRules"):
                # NAT rules are limited to a single service
                member_to_replace = object_policy_dict['service']
                replacements_made[member_to_replace] = service_to_replacement[
                    member_to_replace]
                object_policy_dict['service'] = service_to_replacement[
                    member_to_replace]
            # If it's a policy with only one member, it'll be parsed as a string, not a list
            elif isinstance(object_policy_dict['service']['member'], str):
                member_to_replace = object_policy_dict['service']['member']
                replacements_made[member_to_replace] = service_to_replacement[
                    member_to_replace]
                object_policy_dict['service'][
                    'member'] = service_to_replacement[member_to_replace]
            else:
                # Iterate through the policy's members to see which need to be replaced, and
                # with what. Then store what changed in replacements_made
                new_services = []
                for member in object_policy_dict['service']['member']:
                    if member in new_services:
                        # Member is already present, nothing to do
                        continue
                    elif member not in service_to_replacement:
                        # Member is not present and doesn't need to be replaced, so keep it as is:
                        new_services.append(member)
                    elif member in service_to_replacement and service_to_replacement[
                            member] not in new_services:
                        # Member needs to be replaced, and replacement is not already present, so add it:
                        replacements_made[member] = service_to_replacement[
                            member]
                        new_services.append(service_to_replacement[member])
                    else:
                        # Member needs to be replaced, but replacement is already present, so nothing to do:
                        continue
                assert object_policy_dict['service']['member'] != new_services
                object_policy_dict['service']['member'] = new_services
            text = f"Replace the following Service members in {policy_dg}'s {policy_type} {policy_entry.get('name')}: {sorted([k + ' with ' + v for k, v in replacements_made.items()])}"
            badentries.append(
                BadEntry(data=[policy_entry, object_policy_dict],
                         text=text,
                         device_group=policy_dg,
                         entry_type=policy_type))
    return badentries
def find_shadowing_objects(profilepackage, object_type):
    device_groups = profilepackage.device_groups
    devicegroup_objects = profilepackage.devicegroup_objects
    device_group_hierarchy_parent = profilepackage.device_group_hierarchy_parent

    badentries = []

    logger.info("*" * 80)
    logger.info(f"Checking for shadowing {object_type} objects")

    for i, device_group in enumerate(device_groups):
        logger.info(
            f"({i + 1}/{len(device_groups)}) Checking {device_group}'s address objects"
        )
        names_to_obj = {
            entry.get('name'): entry
            for entry in devicegroup_objects[device_group][object_type]
        }

        # An object can be inherited from any parent device group. Need to check all of them.
        names_to_dg_obj_from_parent_dgs = collections.defaultdict(list)

        parent_dgs = []
        current_dg = device_group_hierarchy_parent.get(device_group)
        while current_dg:
            parent_dgs.append(current_dg)
            current_dg = device_group_hierarchy_parent.get(current_dg)

        for parent_dg in parent_dgs:
            for obj in devicegroup_objects[parent_dg][object_type]:
                names_to_dg_obj_from_parent_dgs[obj.get('name')].append(
                    (parent_dg, obj))

        overlapping_names = sorted(
            set(names_to_obj.keys())
            & set(names_to_dg_obj_from_parent_dgs.keys()))

        for overlapping_name in overlapping_names:
            local_obj = names_to_obj[overlapping_name]
            normalized_obj = normalize_object(local_obj, object_type)

            unique_device_groups = set()
            shadowed_objects = [local_obj]
            normalized_objects = set([normalized_obj])
            for dg, obj in names_to_dg_obj_from_parent_dgs[overlapping_name]:
                unique_device_groups.add(dg)
                shadowed_objects.append(obj)
                normalized_objects.add(normalize_object(obj, object_type))

            all_consistent = len(normalized_objects) == 1
            sorted_dgs = sorted(unique_device_groups)

            if all_consistent:
                same_text = "and the contents are equivalent"
            else:
                same_text = "and the contents are NOT equivalent"
            data = names_to_dg_obj_from_parent_dgs[overlapping_name] + [[
                device_group, local_obj
            ]]
            text = f"Device Group {device_group}'s {object_type} '{overlapping_name}' is already present in Device Group {sorted_dgs} {same_text}"
            badentries.append(
                BadEntry(data=data,
                         text=text,
                         device_group=device_group,
                         entry_type=object_type))

    return badentries
def find_equivalent_objects(profilepackage, object_type):
    """
    Generic function for finding all objects in the hierarchy with effectively the same values
    """
    device_groups = profilepackage.device_groups
    pan_config = profilepackage.pan_config
    device_group_hierarchy_parent = profilepackage.device_group_hierarchy_parent
    ignore_description = profilepackage.settings.getboolean(
        "Equivalent objects ignore description", False)
    ignore_tags = profilepackage.settings.getboolean(
        "Equivalent objects ignore tags", False)

    badentries = []

    logger.info("*" * 80)
    logger.info(f"Checking for equivalent {object_type} objects")

    for i, device_group in enumerate(device_groups):
        logger.info(
            f"({i + 1}/{len(device_groups)}) Checking {device_group}'s address objects"
        )
        # An object can be inherited from any parent device group. Need to check all of them.
        # Basic strategy: Normalize all objects, then report on the subset present in this device group
        parent_dgs = []
        current_dg = device_group_hierarchy_parent.get(device_group)
        while current_dg:
            parent_dgs.append(current_dg)
            current_dg = device_group_hierarchy_parent.get(current_dg)

        all_equivalent_objects = collections.defaultdict(list)
        for dg in parent_dgs:
            for obj in pan_config.get_devicegroup_object(object_type, dg):
                object_data = normalize_object(obj, object_type,
                                               ignore_description, ignore_tags)
                all_equivalent_objects[object_data].append((dg, obj))

        local_equivalencies = set()
        for obj in pan_config.get_devicegroup_object(object_type,
                                                     device_group):
            object_data = normalize_object(obj, object_type,
                                           ignore_description, ignore_tags)
            local_equivalencies.add(object_data)
            all_equivalent_objects[object_data].append((device_group, obj))

        equivalencies_to_examine = sorted(
            set(local_equivalencies) & set(all_equivalent_objects.keys()))

        for equivalencies in equivalencies_to_examine:
            entries = all_equivalent_objects[equivalencies]
            if len(entries) >= 2:
                equivalency_texts = []
                for dg, obj in entries:
                    equivalency_text = f'Device Group: {dg}, Name: {obj.get("name")}'
                    equivalency_texts.append(equivalency_text)
                text = f"Device Group {device_group} has the following equivalent {object_type}: {equivalency_texts}"
                badentries.append(
                    BadEntry(data=entries,
                             text=text,
                             device_group=device_group,
                             entry_type=object_type))
    return badentries
Ejemplo n.º 27
0
def find_shadowing_addresses_and_groups(profilepackage):
    device_groups = profilepackage.device_groups
    devicegroup_objects = profilepackage.devicegroup_objects
    device_group_hierarchy_parent = profilepackage.device_group_hierarchy_parent
    rule_limit_enabled = profilepackage.rule_limit_enabled

    if rule_limit_enabled:
        return []

    badentries = []

    logger.info("*" * 80)
    logger.info("Checking for shadowing Address and Address Group objects")

    for i, device_group in enumerate(device_groups):
        logger.info(
            f"({i + 1}/{len(device_groups)}) Checking {device_group}'s address objects"
        )
        object_entries = {}
        object_entries['Addresses'] = {
            entry.get('name'): (device_group, 'Addresses', entry)
            for entry in devicegroup_objects[device_group]['Addresses']
        }
        object_entries['AddressGroups'] = {
            entry.get('name'): (device_group, 'AddressGroups', entry)
            for entry in devicegroup_objects[device_group]['AddressGroups']
        }

        # An address or group can be inherited from any parent device group's Address group or policy.
        # Need to check all parent device groups.
        parent_dgs = []
        current_dg = device_group_hierarchy_parent.get(device_group)
        while current_dg:
            parent_dgs.append(current_dg)
            current_dg = device_group_hierarchy_parent.get(current_dg)

        addresses_in_use = collections.defaultdict(list)
        addressgroups_in_use = collections.defaultdict(list)
        for parent_dg in parent_dgs:
            # First check all parent Addresses
            for address in devicegroup_objects[parent_dg]['Addresses']:
                addresses_in_use[address.get('name')].append(
                    (parent_dg, 'Addresses', address))
            # Then check all parent Address Groups
            for address_group in devicegroup_objects[parent_dg][
                    'AddressGroups']:
                addressgroups_in_use[address_group.get('name')].append(
                    (parent_dg, 'AddressGroups', address_group))

        # Three separate types of 'shadowing' are possible:
        # Address objects with names present in a parent device group
        # Address Group objects with names present in a parent device group
        # Address objects with the same names as the Address Group objects in the current device group
        # These are each processed separately
        names_in_use = set(addresses_in_use.keys()) | set(
            addressgroups_in_use.keys())
        shadowed_objects_mappings = {}
        shadowed_objects_mappings['Addresses'] = sorted(
            set(object_entries['Addresses'].keys()) & names_in_use)
        shadowed_objects_mappings['AddressGroups'] = sorted(
            set(object_entries['AddressGroups'].keys()) & names_in_use)
        local_shadowing_names = sorted(
            set(object_entries['Addresses'].keys())
            & set(object_entries['AddressGroups'].keys()))

        # Address objects with names present in a parent device group
        obj_types = ['Addresses', 'AddressGroups']
        for obj_type in obj_types:
            shadowed_objects = shadowed_objects_mappings[obj_type]
            for shadowing_address_name in shadowed_objects:
                shadowing_addresses = addresses_in_use[shadowing_address_name]
                shadowing_addressgroups = addressgroups_in_use[
                    shadowing_address_name]

                address_dgs = [entry[0] for entry in shadowing_addresses]
                addressgroup_dgs = [
                    entry[0] for entry in shadowing_addressgroups
                ]
                address_dg_text = f"as an Address in Device Groups: {address_dgs}"
                addressgroup_dg_text = f"as an Address Group in Device Groups: {addressgroup_dgs}"
                if shadowing_addresses and shadowing_addressgroups:
                    suffix_text = address_dg_text + " and " + addressgroup_dg_text
                elif shadowing_addresses:
                    suffix_text = address_dg_text
                elif shadowing_addressgroups:
                    suffix_text = addressgroup_dg_text
                else:
                    raise Exception(
                        "Shouldn't be possible to not have any Device Groups!")

                data = [object_entries[obj_type][shadowing_address_name]
                        ] + shadowing_addresses + shadowing_addressgroups
                text = f"Device Group {device_group}'s {obj_type} {shadowing_address_name} is already present {suffix_text}"
                badentries.append(
                    BadEntry(data=data,
                             text=text,
                             device_group=device_group,
                             entry_type=obj_type))

        for local_overlap in local_shadowing_names:
            text = f"Device Group {device_group}'s contains both an Address and Address Group with the same name of '{local_overlap}' Address: {object_entries['Addresses'][local_overlap]}, AddressGroups: {object_entries['AddressGroups'][local_overlap]}"
            data = [
                object_entries['AddressGroups'][local_overlap],
                object_entries['Addresses'][local_overlap]
            ]
            badentries.append(
                BadEntry(data=data,
                         text=text,
                         device_group=device_group,
                         entry_type='Addresses'))

    return badentries
def find_redundant_addresses(profilepackage):
    device_groups = profilepackage.device_groups
    pan_config = profilepackage.pan_config

    badentries = []

    logger.info("*" * 80)
    logger.info("Checking for redundant rule members")

    for i, device_group in enumerate(device_groups):
        logger.info(f"Checking Device group {device_group}")
        # Build the list of all AddressGroups:
        object_type = 'AddressGroups'
        addressgroup_member_xpath = './static/member'
        addressgroups_to_underlying_addresses = build_group_member_mapping(
            pan_config, device_group, object_type, addressgroup_member_xpath)

        for ruletype in ('SecurityPreRules', 'SecurityPostRules'):
            for rule_entry in pan_config.get_devicegroup_policy(
                    ruletype, device_group):
                # Skip disabled rules:
                if rule_entry.find(
                        "./disabled") is not None and rule_entry.find(
                            "./disabled").text == "yes":
                    continue
                members_to_remove = collections.defaultdict(list)
                for direction in ('source', 'destination'):
                    # Determine which entries are Address Groups
                    address_like_members = [
                        elem.text
                        for elem in rule_entry.findall(f'./{direction}/member')
                    ]
                    addressgroups_in_use = []
                    for address_like_member in address_like_members:
                        if address_like_member in addressgroups_to_underlying_addresses:
                            addressgroups_in_use += [address_like_member]
                    # See which address objects are contained within the rule's other addressgroup objects:
                    for address_like_member in address_like_members:
                        for ag in addressgroups_in_use:
                            if address_like_member in addressgroups_to_underlying_addresses[
                                    ag]:
                                members_to_remove[direction] += [
                                    (address_like_member, ag)
                                ]
                if members_to_remove:
                    rule_name = rule_entry.get('name')
                    text = f"Device Group {device_group}'s {ruletype} '{rule_name}' contains redundant members. "
                    for direction, entries in members_to_remove.items():
                        entries_string = ", ".join([
                            f"'{entry[0]}' is in '{entry[1]}'"
                            for entry in entries
                        ])
                        direction_string = f"For {direction}: {entries_string}"
                        text += direction_string
                    badentries.append(
                        BadEntry(data=(ruletype, rule_entry,
                                       members_to_remove),
                                 text=text,
                                 device_group=device_group,
                                 entry_type='Address'))
    return badentries