Esempio n. 1
0
def remove_redundant_rule_members(profilepackage):
    panorama = profilepackage.settings.get("Panorama")
    api_key = profilepackage.api_key
    pan_config = profilepackage.pan_config
    version = pan_config.get_major_version()

    _, _, validator_function = get_policy_validators(
    )['RedundantRuleAddresses']
    logger.info("*" * 80)
    logger.info("Checking for redundant rule addresses")

    rules_to_update = validator_function(profilepackage)

    logger.info(f"Replacing the contents of {len(rules_to_update)} Policies")
    for badentry in rules_to_update:
        object_policy_dg = badentry.device_group
        rule_type, rule_entry, members_to_remove = badentry.data
        rule_dict = xml_object_to_dict(rule_entry)['entry']
        for direction, member_and_containing_pairs in members_to_remove.items(
        ):
            for member, _ in member_and_containing_pairs:
                # It's possible a member is contained in two of a rule's address groups
                if member in rule_dict[direction]['member']:
                    rule_dict[direction]['member'].remove(member)
        pan_api.update_devicegroup_policy(panorama, version, api_key,
                                          rule_dict, rule_type,
                                          object_policy_dg)
    pan_api.validate_commit(panorama, api_key)
    logger.info("Replacement complete. Please commit in the firewall.")
    return rules_to_update
def remove_redundant_rule_services(profilepackage):
    panorama = profilepackage.settings.get("Panorama")
    api_key = profilepackage.api_key
    pan_config = profilepackage.pan_config
    version = pan_config.get_major_version()

    _, _, validator_function = get_policy_validators()['ShadowingRules']
    logger.info("*" * 80)
    logger.info("Checking for shadowed rules to disable")

    rules_to_update = validator_function(profilepackage)

    logger.info(f"Disabling {len(rules_to_update)} Policies")
    for badentry in rules_to_update:
        shadowed_tuple = badentry.data[0]
        device_group, ruletype, rule_name, rule_entry = shadowed_tuple
        disabled = (rule_entry.find('disabled') is not None
                    and rule_entry.find('disabled').text == 'yes')
        if not disabled:
            policy_dict = xml_object_to_dict(rule_entry)['entry']
            policy_dict['disabled'] = 'yes'
            logger.info(f"Disabling {device_group}'s {ruletype} {rule_name}")
            pan_api.update_devicegroup_policy(panorama, version, api_key,
                                              policy_dict, ruletype,
                                              device_group)
    pan_api.validate_commit(panorama, api_key)
    logger.info("Disabling complete. Please commit in the firewall.")
    return rules_to_update
def replace_addressgroup_contents(addressgroups_needing_replacement, address_to_replacement):
    badentries = []
    for object_dg, object_type, object_entry in addressgroups_needing_replacement:
        object_policy_dict = xml_object_to_dict(object_entry)['entry']
        new_addresses = []
        replacements_made = {}

        # If it's an addressgroup with only one member, it'll be parsed as a string, not a list
        if isinstance(object_policy_dict['static']['member'], str):
            member_to_replace = object_policy_dict['static']['member']
            replacements_made[member_to_replace] = address_to_replacement[member_to_replace]
            new_addresses.append(address_to_replacement[member_to_replace])
        else:
            for member in object_policy_dict['static']['member']:
                if member in new_addresses:
                    # Member is already present, nothing to do
                    continue
                elif member not in address_to_replacement:
                    # Member is not present and doesn't need to be replaced, so keep it as is:
                    new_addresses.append(member)
                elif member in address_to_replacement and address_to_replacement[member] not in new_addresses:
                    # Member needs to be replaced, and replacement is not already present, so add it:
                    new_addresses.append(address_to_replacement[member])
                    replacements_made[member] = address_to_replacement[member]
                else:
                    # Member needs to be replaced, but replacement is already present, so nothing to do:
                    continue
        assert object_policy_dict['static']['member'] != new_addresses

        object_policy_dict['static']['member'] = new_addresses
        text = f"Replace the following Address members in {object_dg}'s {object_type} {object_entry.get('name')}: {sorted([k + ' with ' + v for k, v in replacements_made.items()])}"
        badentries.append(BadEntry(data=[object_entry, object_policy_dict], text=text, device_group=object_dg, entry_type=object_type))
    return badentries
Esempio n. 4
0
def find_misleading_services(profilepackage):
    device_groups = profilepackage.device_groups
    devicegroup_objects = profilepackage.devicegroup_objects

    PROTOCOL_TYPES = ('tcp', 'udp')

    badentries = []

    logger.info("*" * 80)
    logger.info("Checking for misleading Service objects")

    for i, device_group in enumerate(device_groups):
        logger.info(
            f"({i+1}/{len(device_groups)}) Checking {device_group}'s Service objects"
        )
        for service_entry in devicegroup_objects[device_group]['Services']:
            # For simplicity, convert the XML object to a dict:
            service_dict = xml_object_to_dict(service_entry)
            entry_name = service_dict['entry']['@name']
            for protocol_type in PROTOCOL_TYPES:
                if protocol_type in service_dict['entry']['protocol'].keys():
                    entry_protocol = protocol_type
                    break
            else:
                # This should not be possible!
                continue
            entry_port = service_dict['entry']['protocol'][entry_protocol][
                'port']
            contains_protocol = 'tcp' in entry_name.lower(
            ) or 'udp' in entry_name.lower()
            contains_port = re.search(r'\d{3,}', entry_name) is not None
            protocol_correct = entry_protocol in entry_name.lower()
            port_correct = entry_port.split('-', 1)[0] in entry_name

            if contains_protocol or contains_port:
                if contains_protocol and not protocol_correct and contains_port and not port_correct:
                    text = f"Device Group {device_group}'s Service {entry_name} uses protocol {entry_protocol} and port {entry_port}"
                    badentries.append(
                        BadEntry(data=service_entry,
                                 text=text,
                                 device_group=device_group,
                                 entry_type='Services'))
                elif contains_protocol and not protocol_correct:
                    text = f"Device Group {device_group}'s Service {entry_name} uses protocol {entry_protocol}"
                    badentries.append(
                        BadEntry(data=service_entry,
                                 text=text,
                                 device_group=device_group,
                                 entry_type='Services'))
                elif contains_port and not port_correct:
                    text = f"Device Group {device_group}'s Service {entry_name} uses port {entry_port}"
                    badentries.append(
                        BadEntry(data=service_entry,
                                 text=text,
                                 device_group=device_group,
                                 entry_type='Services'))
    return badentries
def fix_bad_log_setting(profilepackage):
    panorama = profilepackage.settings.get("Panorama")
    api_key = profilepackage.api_key
    pan_config = profilepackage.pan_config
    version = pan_config.get_major_version()

    _, _, validator = get_policy_validators()['UnqualifiedFQDN']
    problems = validator(profilepackage)

    for problem in problems:
        entry = xml_object_to_dict(problem.data[0])['entry']
        object_type = problem.entry_type
        device_group = problem.device_group
        entry['fqdn'] = problem.data[1]
        pan_api.update_devicegroup_object(panorama, version, api_key, entry,
                                          object_type, device_group)

    return problems
def fix_bad_log_setting(profilepackage):
    panorama = profilepackage.settings.get("Panorama")
    api_key = profilepackage.api_key
    pan_config = profilepackage.pan_config
    version = pan_config.get_major_version()

    _, _, validator = get_policy_validators()['BadLogSetting']
    problems = validator(profilepackage)

    for problem in problems:
        entry = xml_object_to_dict(problem.data[0])['entry']
        ruletype = problem.entry_type
        device_group = problem.device_group
        entry["log-setting"] = problem.data[1]
        logger.debug(f"Updating {device_group}'s {ruletype} {problem.data[0].get('name')} log-setting to {entry['log-setting']}")
        pan_api.update_devicegroup_policy(panorama, version, api_key, entry, ruletype, device_group)

    return problems
def find_unconventional_services(profilepackage):
    device_groups = profilepackage.device_groups
    pan_config = profilepackage.pan_config

    service_name_format = profilepackage.settings.get('service name format')
    if not service_name_format:
        return []

    badentries = []

    logger.info("*"*80)
    logger.info("Checking for misleading Service objects")

    PROTOCOL_TYPES = ('tcp', 'udp')
    for i, device_group in enumerate(device_groups):
        logger.info(f"({i+1}/{len(device_groups)}) Checking {device_group}'s Service objects")
        for service_entry in pan_config.get_devicegroup_object('Services', device_group):
            # For simplicity, convert the XML object to a dict:
            service_dict = xml_object_to_dict(service_entry)
            service_name = service_dict['entry']['@name']
            for protocol_type in PROTOCOL_TYPES:
                if protocol_type in service_dict['entry']['protocol'].keys():
                    entry_protocol = protocol_type
                    break
            else:
                # This should not be possible!
                continue
            # Values retrieved are <transport>, <source-port>, <port>, <override>
            service_fields = {}
            service_fields['transport'] = protocol_type
            service_fields['source_port'] = service_dict['entry']['protocol'][entry_protocol].get('source-port', '')
            service_fields['port'] = service_dict['entry']['protocol'][entry_protocol].get('port', '')
            override = service_dict['entry']['protocol'][entry_protocol].get('override')
            if override:
                service_fields['override'] = tuple(override.keys())[0]
            else:
                service_fields['override'] = ''

            calculated_name = service_name_format.format(**service_fields)

            if service_name != calculated_name:
                text = f"Device Group {device_group}'s Service {service_name} should instead be named {calculated_name}"
                badentries.append(BadEntry(data=[service_entry, calculated_name], text=text, device_group=device_group, entry_type='Services'))
    return badentries
Esempio n. 8
0
def fix_bad_log_setting(profilepackage):
    panorama = profilepackage.settings.get("Panorama")
    api_key = profilepackage.api_key
    pan_config = profilepackage.pan_config
    version = pan_config.get_major_version()

    _, _, validator = get_policy_validators()['IPWithResolvingFQDN']
    problems = validator(profilepackage)

    for problem in problems:
        object_type = problem.entry_type
        device_group = problem.device_group
        address_entry, fqdn = problem.data
        updated_object = xml_object_to_dict(address_entry)['entry']
        logger.debug(f"Updating {device_group} Address {address_entry.get('name')} from {updated_object['ip-netmask']} to {fqdn}")
        del updated_object['ip-netmask']
        updated_object['fqdn'] = fqdn
        pan_api.update_devicegroup_object(panorama, version, api_key, updated_object, object_type, device_group)

    return problems
def replace_policy_contents(policies_needing_replacement, address_to_replacement):
    badentries = []
    for policy_dg, policy_type, policy_entry in policies_needing_replacement:
        object_policy_dict = xml_object_to_dict(policy_entry)['entry']
        replacements_made = {}
        for direction in ('source', 'destination'):
            object_policy_dict[direction]['member'], replacements_made = replace_member_contents(object_policy_dict[direction]['member'], address_to_replacement, replacements_made)

        # Extra places to check for NAT objects:
        if policy_type in ("NATPreRules", "NATPostRules"):
            for translation in ('source-translation', 'destination-translation'):
                if translation not in object_policy_dict:
                    continue
                if object_policy_dict[translation].get('translated-address'):
                    object_policy_dict[translation]['translated-address'], replacements_made = replace_member_contents(object_policy_dict[translation]['translated-address'], address_to_replacement, replacements_made)
                if object_policy_dict[translation].get('dynamic-ip-and-port', {}).get('translated-address', {}).get('member'):
                    object_policy_dict[translation]['dynamic-ip-and-port']['translated-address']['member'], replacements_made = replace_member_contents(object_policy_dict[translation]['dynamic-ip-and-port']['translated-address']['member'], address_to_replacement, replacements_made)
                if object_policy_dict[translation].get('static-ip', {}).get('translated-address', {}).get('member'):
                    object_policy_dict[translation]['static-ip']['translated-address']['member'], replacements_made = replace_member_contents(object_policy_dict[translation]['static-ip']['translated-address']['member'], address_to_replacement, replacements_made)
        text = f"Replace the following Address members in {policy_dg}'s {policy_type} {policy_entry.get('name')}: {sorted([k + ' with ' + v for k, v in replacements_made.items()])}"
        badentries.append(BadEntry(data=[policy_entry, object_policy_dict], text=text, device_group=policy_dg, entry_type=policy_type))
    return badentries
Esempio n. 10
0
def find_IPandFQDN(profilepackage):
    device_groups = profilepackage.device_groups
    pan_config = profilepackage.pan_config

    badentries = []

    logger.info("*" * 80)

    for i, device_group in enumerate(device_groups):
        fqdns = []
        ips = collections.defaultdict(list)
        ips_fqdns_resolve_to = collections.Counter()
        logger.info(f"({i + 1}/{len(device_groups)}) Checking {device_group}'s Addresses")
        for entry in pan_config.get_devicegroup_object('Addresses', device_group):
            entry_name = entry.get('name')
            entry_dict = xml_object_to_dict(entry)
            # If it's a single IP, store it in the dictionary of IPs:
            # Only handle IPv4 for now:
            if 'ip-netmask' in entry_dict['entry'] and '.' in entry_dict['entry']['ip-netmask']:
                if '/' not in entry_dict['entry']['ip-netmask'] or '/32' in entry_dict['entry']['ip-netmask']:
                    ipnetmask_value = entry_dict['entry']['ip-netmask']
                    ip = ipnetmask_value.split('/', 1)[0]
                    ips[ip].append((entry_name, ipnetmask_value, entry))
            # Add FQDNs to the list of FQDNs:
            elif 'fqdn' in entry_dict['entry']:
                fqdn = entry_dict['entry']['fqdn']
                _, _, ipaddrlist = cached_dns_ex_lookup(fqdn)
                for ip in ipaddrlist:
                    fqdns.append((entry_name, fqdn, ip))
                    ips_fqdns_resolve_to[ip] += 1

        # Now that we have the data, we're ready to review the fqdns for what's present in the IPs:
        for fqdn_name, fqdn, ip in fqdns:
            # Skip IPs that have multiple FQDNs on the firewall resolve to them, because it's ambiguous which fqdn to use
            if ip in ips and ips_fqdns_resolve_to[ip] == 1:
                for address_name, ipnetmask_value, address_entry in ips[ip]:
                    text = f"Device Group {device_group}'s address {address_name} with IP {ipnetmask_value} can be replaced with an fqdn of {fqdn}"
                    badentries.append(BadEntry(data=(address_entry, fqdn), text=text, device_group=device_group, entry_type='Addresses'))
    return badentries
def find_unconventional_addresses(profilepackage):
    device_groups = profilepackage.device_groups
    pan_config = profilepackage.pan_config

    fqdn_name_format = profilepackage.settings.get('fqdn name format')
    range_name_format = profilepackage.settings.get('range name format')
    wildcard_name_format = profilepackage.settings.get('wildcard name format')
    host_name_format = profilepackage.settings.get('host name format')
    net_name_format = profilepackage.settings.get('net name format')
    colon_replacement = profilepackage.settings.get('ipv6 colon replacement char')
    if not fqdn_name_format or not host_name_format or not net_name_format or not range_name_format or not wildcard_name_format:
        return []

    badentries = []

    logger.info("*"*80)
    logger.info("Checking for misleading Address objects")

    ADDRESS_TYPES = ('fqdn', 'ip-netmask', 'ip-range', 'ip-wildcard')
    for i, device_group in enumerate(device_groups):
        logger.info(f"({i+1}/{len(device_groups)}) Checking {device_group}'s Address objects")
        for address_entry in pan_config.get_devicegroup_object('Addresses', device_group):
            # For simplicity, convert the XML object to a dict:
            address_dict = xml_object_to_dict(address_entry)
            address_name = address_dict['entry']['@name']

            for address_t in ADDRESS_TYPES:
                if address_t in address_dict['entry'].keys():
                    address_type = address_t
                    break
            else:
                # This should not be possible!
                continue

            address_fields = {}
            if address_type == 'fqdn':
                address_fields['fqdn'] = address_dict['entry']['fqdn']
                calculated_name = fqdn_name_format.format(**address_fields)
            elif address_type == 'ip-range':
                address_fields['range'] = address_dict['entry']['ip-range']
                calculated_name = range_name_format.format(**address_fields)
            elif address_type == 'ip-wildcard':
                address_fields['mask'] = address_dict['entry']['ip-wildcard']
                calculated_name = wildcard_name_format.format(**address_fields)
            elif address_type == 'ip-netmask':
                address_fields['host'] = address_dict['entry']['ip-netmask'].split('/', 1)[0]
                if colon_replacement and ':' in address_fields['host']:
                    address_fields['host'] = address_fields['host'].replace(':', colon_replacement)
                if '/' in address_dict['entry']['ip-netmask']:
                    address_fields['network'] = address_dict['entry']['ip-netmask'].split('/', 1)[1]
                else:
                    address_fields['network'] = ''

                # We'll use the host name pattern for /32's or entries without a netmask:
                is_host = '/' not in address_dict['entry']['ip-netmask'] or ('.' in address_dict['entry']['ip-netmask'] and '/32' in address_dict['entry']['ip-netmask']) or (':' in address_dict['entry']['ip-netmask'] and '/128' in address_dict['entry']['ip-netmask'])
                if is_host:
                    calculated_name = host_name_format.format(**address_fields)
                else:
                    calculated_name = net_name_format.format(**address_fields)

            # PA supports a max char length of 63:
            calculated_name = calculated_name[:63]
            if address_name != calculated_name:
                text = f"Device Group {device_group}'s Address {address_name} should instead be named {calculated_name}"
                badentries.append(BadEntry(data=[address_entry, calculated_name], text=text, device_group=device_group, entry_type='Addresses'))
    return badentries
def get_zone_for_source_member(firewall, api_key, member_name, address_groups, addresses, regions):
    # Note: Members can be an IP, Subnet, IP Range, Address, Address Group, or Region (in that order of resolution priority).
    # Looking up the zone for every single IP in a subnet does not scale - there could be a /16's,
    # so instead we'll only get the zone for the first object in the address group/Region/Subnet/IP Range
    # and return a bool as to whether or not the result is complete
    # Returns: list of zones as List[str], True if it's complete

    if member_name == 'any':
        return [], False

    try:
        if member_name in address_groups:
            all_contained_address_names = get_underlying_address_objects(member_name, address_groups, addresses)
            if len(all_contained_address_names) == 0:
                return [], True
            else:
                zones = []
                is_complete = True
                for address_name in all_contained_address_names:
                    # Only look up IPs if there is a single entry in the Address Object.
                    address_entry = addresses[address_name]
                    address_dict = xml_object_to_dict(address_entry)['entry']
                    if "fqdn" in address_dict or ("ip-netmask" in address_dict and ("/" not in address_dict['ip-netmask'] or "/32" in address_dict['ip-netmask'])):
                        ip = get_single_ip_from_address(address_entry)
                        if ip:
                            zones += [get_firewall_zone(firewall, api_key, ip)]
                    else:
                        is_complete = False
                return zones, is_complete
        elif member_name in addresses:
            zones = []
            is_complete = True
            address_entry = addresses[member_name]
            address_dict = xml_object_to_dict(address_entry)['entry']
            if "fqdn" in address_dict or ("ip-netmask" in address_dict and ("/" not in address_dict['ip-netmask'] or "/32" in address_dict['ip-netmask'])):
                ip = get_single_ip_from_address(address_entry)
                if ip:
                    zones += [get_firewall_zone(firewall, api_key, ip)]
            else:
                is_complete = False
            return zones, is_complete
        else:
            # Entry is an IP, subnet, or range:
            # Attempt to extract an IP
            # Use ipaddress.ip_address and ipaddress.ip_network to validate
            if '-' in member_name:
                start_range, end_range = member_name.split('-')
                ip = start_range
                ipaddress.ip_address(ip)
                is_complete = (start_range == end_range)
            elif '/' in member_name:
                ip, mask = member_name.split('/')
                ipaddress.ip_address(ip)
                is_complete = (mask == 32)
            else:
                ip = member_name
                ipaddress.ip_address(ip)
                is_complete = True
            zones = [get_firewall_zone(firewall, api_key, ip)]
            return zones, is_complete
    except:
        return [], False
def find_extra_rules(profilepackage):
    device_groups = profilepackage.device_groups
    devicegroup_objects = profilepackage.devicegroup_objects
    devicegroup_exclusive_objects = profilepackage.devicegroup_exclusive_objects
    api_key = profilepackage.api_key
    device_group_hierarchy_parent = profilepackage.device_group_hierarchy_parent
    no_api = profilepackage.no_api

    if no_api:
        return []

    badentries = []
    logger.info ("*"*80)
    logger.info ("Checking for Extra rules")
    for i, device_group in enumerate(device_groups):
        firewalls = devicegroup_objects[device_group]['all_active_child_firewalls']

        addresses = {entry.get('name'):entry for entry in devicegroup_objects[device_group]['Addresses']}
        address_groups = {entry.get('name'):entry for entry in devicegroup_objects[device_group]['AddressGroups']}

        # Address and Address Group objects can be inherited from parent device groups, so we need data from them too
        parent_dgs = []
        current_dg = device_group_hierarchy_parent.get(device_group)
        while current_dg:
            parent_dgs.append(current_dg)
            current_dg = device_group_hierarchy_parent.get(current_dg)

        for parent_dg in parent_dgs:
            for address_entry in devicegroup_objects[parent_dg]['Addresses']:
                addresses[address_entry.get('name')] = address_entry
            for address_group_entry in devicegroup_objects[parent_dg]['AddressGroups']:
                address_group_members = [elem.text for elem in address_group_entry.findall('./static/member')]
                address_groups[address_group_entry.get('name')] = address_group_members

        for ruletype in ('SecurityPreRules', 'SecurityPostRules'):
            rules = devicegroup_exclusive_objects[device_group][ruletype]
            logger.info (f"({i+1}/{len(device_groups)}) Checking {device_group}'s {ruletype}")

            total_entries = len(rules)
            for j, entry in enumerate(rules):
                logger.info (f'({j+1}/{total_entries}) {entry.get("name")}')
                # Disabled rules can be ignored
                if entry.find("./disabled") is not None and entry.find("./disabled").text == "yes":
                    continue

                rule_name = entry.get('name')
                src_members = sorted([elem.text for elem in entry.findall('./source/member')])
                dest_members = sorted([elem.text for elem in entry.findall('./destination/member')])

                # Analyze the rules for rules where if there is one source/dest zone and both are the same,
                # so the rule isn't needed.
                # Note: Members can be an Address or Address Group Looking up the zones for
                # every single IP does not scale - there could be many /16's However, grabbing only a single IP
                # address from each member can result in false positives if a single Address object contains a large
                # enough subnet that it spans multiple zones. To avoid false positives, we'll only report an issue if
                # all of a policy's members are resolvable to a single IP.

                missing_ips = False
                ips = {'Source': [], 'Dest': []}
                for members, zonetype in [(src_members, 'Source'), (dest_members, 'Dest')]:
                    for member_name in members:
                        if member_name == 'any':
                            missing_ips = True
                            break

                        if member_name in address_groups:
                            all_contained_address_names = get_underlying_address_objects(member_name, address_groups, addresses)
                        elif member_name in addresses:
                            all_contained_address_names = [member_name]

                        for address_name in all_contained_address_names:
                            # Only look up IPs if there is a single entry in the Address Object.
                            address_entry = addresses[address_name]
                            address_dict = xml_object_to_dict(address_entry)['entry']
                            if "fqdn" in address_dict or ("ip-netmask" in address_dict and ("/" not in address_dict['ip-netmask'] or "/32" in address_dict['ip-netmask'])):
                                ip = get_single_ip_from_address(address_entry)
                                if ip:
                                    ips[zonetype] += [ip]
                            else:
                                missing_ips = True
                                break
                        if missing_ips:
                            break
                    if missing_ips:
                        break
                if missing_ips:
                    continue

                calculated_src_zones = set()
                for firewall in firewalls:
                    for ip in ips['Source']:
                        try:
                            zone = get_firewall_zone(firewall, api_key, ip)
                            calculated_src_zones.add(zone)
                        except:
                            pass

                calculated_dest_zones = set()
                for firewall in firewalls:
                    for ip in ips['Dest']:
                        try:
                            zone = get_firewall_zone(firewall, api_key, ip)
                            calculated_dest_zones.add(zone)
                        except:
                            pass

                if len(calculated_src_zones) == 1 and calculated_src_zones == calculated_dest_zones:
                    text = f"Device Group '{device_group}'s {ruletype} '{rule_name}' was calculated to only need the same source and dest zone of '{list(calculated_dest_zones)[0]}'."
                    logger.info (text)
                    badentries.append( BadEntry(data=entry, text=text, device_group=device_group, entry_type=ruletype) )
    return badentries
Esempio n. 14
0
def consolidate_service_like_objects(profilepackage, object_type,
                                     object_friendly_type, validator_function):
    pan_config = profilepackage.pan_config
    devicegroup_objects = profilepackage.devicegroup_objects

    logger.info("*" * 80)
    logger.info(
        f"Checking for unused {object_friendly_type} objects to consolidate")

    # Objects will only be consolidated at the same device group level, to avoid potential scope issues
    equivalent_objects = validator_function(profilepackage)
    dg_to_objects_to_consolidate = find_objects_needing_consolidation(
        equivalent_objects)

    if not dg_to_objects_to_consolidate:
        logger.info(f"There were no {object_friendly_type} to consolidate")
        return dg_to_objects_to_consolidate

    badentries = []
    for device_group, objects_to_consolidate in dg_to_objects_to_consolidate.items(
    ):
        # Determine which object is most commonly-used to minimize the amount of changes that will be needed
        service_to_replacement = find_replacement_objects(
            pan_config, devicegroup_objects, device_group,
            objects_to_consolidate)
        # Get the list of objects that will need to be updated:
        servicegroups_needing_replacement, policies_needing_replacement = find_objects_policies_needing_replacement(
            pan_config, devicegroup_objects, device_group,
            service_to_replacement)

        # Now that we know which objects need replacements, we can iterate through
        # and make those replacements!
        # First replace the contents of servicegroups
        for object_dg, object_type, object_entry in servicegroups_needing_replacement:
            object_policy_dict = xml_object_to_dict(object_entry)['entry']
            new_services = []
            replacements_made = {}
            for member in object_policy_dict['members']['member']:
                if member in new_services:
                    # Member is already present, nothing to do
                    continue
                elif member not in service_to_replacement:
                    # Member is not present and doesn't need to be replaced, so keep it as is:
                    new_services.append(member)
                elif member in service_to_replacement and service_to_replacement[
                        member] not in new_services:
                    # Member needs to be replaced, and replacement is not already present, so add it:
                    new_services.append(service_to_replacement[member])
                    replacements_made[member] = service_to_replacement[member]
                else:
                    # Member needs to be replaced, but replacement is already present, so nothing to do:
                    continue
            assert object_policy_dict['members']['member'] != new_services
            object_policy_dict['members']['member'] = new_services
            text = f"Replace the following Service members in {object_dg}'s {object_type} {object_entry.get('name')}: {sorted([k + ' with ' + v for k, v in replacements_made.items()])}"
            badentries.append(
                BadEntry(data=[object_entry, object_policy_dict],
                         text=text,
                         device_group=object_dg,
                         entry_type=object_type))

        # Then replace the contents of policies
        for policy_dg, policy_type, policy_entry in policies_needing_replacement:
            object_policy_dict = xml_object_to_dict(policy_entry)['entry']
            replacements_made = {}
            if policy_type in ("NATPreRules", "NATPostRules"):
                # NAT rules are limited to a single service
                member_to_replace = object_policy_dict['service']
                replacements_made[member_to_replace] = service_to_replacement[
                    member_to_replace]
                object_policy_dict['service'] = service_to_replacement[
                    member_to_replace]
            # If it's a policy with only one member, it'll be parsed as a string, not a list
            elif isinstance(object_policy_dict['service']['member'], str):
                member_to_replace = object_policy_dict['service']['member']
                replacements_made[member_to_replace] = service_to_replacement[
                    member_to_replace]
                object_policy_dict['service'][
                    'member'] = service_to_replacement[member_to_replace]
            else:
                # Iterate through the policy's members to see which need to be replaced, and
                # with what. Then store what changed in replacements_made
                new_services = []
                for member in object_policy_dict['service']['member']:
                    if member in new_services:
                        # Member is already present, nothing to do
                        continue
                    elif member not in service_to_replacement:
                        # Member is not present and doesn't need to be replaced, so keep it as is:
                        new_services.append(member)
                    elif member in service_to_replacement and service_to_replacement[
                            member] not in new_services:
                        # Member needs to be replaced, and replacement is not already present, so add it:
                        replacements_made[member] = service_to_replacement[
                            member]
                        new_services.append(service_to_replacement[member])
                    else:
                        # Member needs to be replaced, but replacement is already present, so nothing to do:
                        continue
                assert object_policy_dict['service']['member'] != new_services
                object_policy_dict['service']['member'] = new_services
            text = f"Replace the following Service members in {policy_dg}'s {policy_type} {policy_entry.get('name')}: {sorted([k + ' with ' + v for k, v in replacements_made.items()])}"
            badentries.append(
                BadEntry(data=[policy_entry, object_policy_dict],
                         text=text,
                         device_group=policy_dg,
                         entry_type=policy_type))
    return badentries
Esempio n. 15
0
def find_misleading_addresses(profilepackage):
    device_groups = profilepackage.device_groups
    devicegroup_objects = profilepackage.devicegroup_objects

    # NOTE: IP Wildcards not supported yet
    ADDRESS_TYPES = ('ip-netmask', 'ip-range', 'fqdn')
    IP_REGEX = r"((25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\.){3}(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])"
    badentries = []

    logger.info("*" * 80)
    logger.info("Checking for misleading Address objects")

    for i, device_group in enumerate(device_groups):
        logger.info(
            f"({i+1}/{len(device_groups)}) Checking {device_group}'s Address objects"
        )
        for address_entry in devicegroup_objects[device_group]['Addresses']:
            # For simplicity, convert the XML object to a dict:
            address_dict = xml_object_to_dict(address_entry)
            entry_name = address_dict['entry']['@name']
            for address_type in ADDRESS_TYPES:
                if address_type in address_dict['entry'].keys():
                    entry_type = address_type
                    break
            else:
                # Wildcards are unsupported, and so skipped
                continue

            entry_value = address_dict['entry'][entry_type]

            # The exact strategy will depend on the content type
            # For FQDNs, the domain should be present in the name
            if entry_type == 'fqdn':
                if entry_value.lower().split('.',
                                             1)[0] not in entry_name.lower():
                    text = f"Device Group {device_group}'s Address {entry_name} has a misleading value of {entry_value}, because the FQDN's domain is not present in the name"
                    badentries.append(
                        BadEntry(data=address_entry,
                                 text=text,
                                 device_group=device_group,
                                 entry_type='Addresses'))
            # For IPs, the IP should be present in the name, if the name 'looks' like it contains an IP (based on regex):
            elif entry_type == 'ip-netmask':
                # This can optionally include a '/'
                ip_address = entry_value.split('/', 1)[0]
                if ip_address not in entry_name and re.search(
                        IP_REGEX, entry_name) is not None:
                    text = f"Device Group {device_group}'s Address {entry_name} appears to contain an IP address in the name, but has a different value of {entry_value}"
                    badentries.append(
                        BadEntry(data=address_entry,
                                 text=text,
                                 device_group=device_group,
                                 entry_type='Addresses'))
            elif entry_type == 'ip-range':
                # This can optionally include a '-'
                ip_address = entry_value.split('-', 1)[0]
                if ip_address not in entry_name and re.search(
                        IP_REGEX, entry_name) is not None:
                    text = f"Device Group {device_group}'s Address {entry_name} appears to contain an IP address in the name, but has a different value of {entry_value}"
                    badentries.append(
                        BadEntry(data=address_entry,
                                 text=text,
                                 device_group=device_group,
                                 entry_type='Addresses'))
    return badentries