def test_simplify_combine_iterations(): """A set of rules that requires a few passes to simplify is fully simplified""" rules = { 'testapp': [ r('10.0.0.0', IPSet([IP('20.0.0.0')])), r('10.0.0.0', IPSet([IP('30.0.0.0')])), r('11.0.0.0', IPSet([IP('20.0.0.0'), IP('30.0.0.0')])), r('12.0.0.0', IPSet([IP('20.0.0.0'), IP('30.0.0.0')])), r(IPSet([IP('10.0.0.0'), IP('11.0.0.0'), IP('12.0.0.0')]), IPSet([IP('30.0.0.0'), IP('40.0.0.0')])), ] } exp = { 'testapp': [ r(IPSet([IP('10.0.0.0'), IP('11.0.0.0'), IP('12.0.0.0')]), IPSet([IP('20.0.0.0'), IP('30.0.0.0'), IP('40.0.0.0')])), ] } eq_(simplify_rules(rules), exp)
def process_rules(app_map, policies, zone_nets, policies_by_zone_pair, src_per_policy, dst_per_policy): logger.info("processing rules") # turn policies into a list of Rules (permit only), limited by zone, # that do not overlap. The tricky bit here is processing policies in # order and accounting for denies. We do this once for each # (from_zone, to_zone, app) tuple. The other tricky bit is handling # the application "any", which we treat as including all applications # used anywhere, and also record in a special "@@other" app. rules_by_app = {} all_apps = set(itertools.chain(*[p.applications for p in policies])) all_apps = all_apps | set(app_map.keys()) all_apps.discard('any') global_policies = policies_by_zone_pair.get((None, None), []) for from_zone, to_zone in itertools.product(zone_nets, zone_nets): zpolicies = sorted(policies_by_zone_pair.get((from_zone, to_zone), []), key=lambda p: p.sequence) zpolicies += global_policies logger.debug(" from-zone %s to-zone %s (%d policies)", from_zone, to_zone, len(zpolicies)) rule_count = 0 apps = set(itertools.chain(*[p.applications for p in zpolicies])) if 'any' in apps: apps = all_apps for app in apps | set(['@@other']): mapped_app = app_map[app] # for each app, count down the IP pairs that have not matched a # rule yet, starting with the zones' IP spaces. This simulates sequential # processing of the policies. remaining_pairs = IPPairs( (zone_nets[from_zone], zone_nets[to_zone])) rules = rules_by_app.setdefault(mapped_app, []) for pol in zpolicies + global_policies: if app not in pol.applications and 'any' not in pol.applications: continue src = src_per_policy[pol] dst = dst_per_policy[pol] # if the policy is a "permit", add rules for each # src/destination pair if pol.action == 'permit': for s, d in remaining_pairs: s = s & src d = d & dst if len(s) and len(d): rules.append(Rule(s, d, mapped_app, pol.name)) rule_count += 1 # regardless, consider this src/dst pair matched remaining_pairs = remaining_pairs - IPPairs((src, dst)) # if we've matched everything, we're done if not remaining_pairs: break logger.debug(" from-zone %s to-zone %s => %d rules", from_zone, to_zone, rule_count) # only include @@other if it's used if not rules_by_app['@@other']: del rules_by_app['@@other'] # simplify and return the result return simplify_rules(rules_by_app)
def test_simplify_no_combine(): """With no common sources or destinations, nothing changes""" rules = {'testapp': [ r('10.0.0.0', '20.0.0.0'), r('20.0.0.0', '30.0.0.0'), r('30.0.0.0', '40.0.0.0'), ]} eq_(simplify_rules(rules), rules)
def test_simplify_no_combine(): """With no common sources or destinations, nothing changes""" rules = { 'testapp': [ r('10.0.0.0', '20.0.0.0'), r('20.0.0.0', '30.0.0.0'), r('30.0.0.0', '40.0.0.0'), ] } eq_(simplify_rules(rules), rules)
def test_simplify_combine(): """Rules with the same source are combined""" rules = {'testapp': [ r('10.0.0.0', '20.0.0.0'), r('20.0.0.0', '30.0.0.0'), r('10.0.0.0', '40.0.0.0'), ]} exp = {'testapp': [ r('10.0.0.0', IPSet([IP('20.0.0.0'), IP('40.0.0.0')])), r('20.0.0.0', '30.0.0.0'), ]} eq_(simplify_rules(rules), exp)
def test_simplify_combine_iterations(): """A set of rules that requires a few passes to simplify is fully simplified""" rules = {'testapp': [ r('10.0.0.0', IPSet([IP('20.0.0.0')])), r('10.0.0.0', IPSet([IP('30.0.0.0')])), r('11.0.0.0', IPSet([IP('20.0.0.0'), IP('30.0.0.0')])), r('12.0.0.0', IPSet([IP('20.0.0.0'), IP('30.0.0.0')])), r(IPSet([IP('10.0.0.0'), IP('11.0.0.0'), IP('12.0.0.0')]), IPSet([IP('30.0.0.0'), IP('40.0.0.0')])), ]} exp = {'testapp': [ r(IPSet([IP('10.0.0.0'), IP('11.0.0.0'), IP('12.0.0.0')]), IPSet([IP('20.0.0.0'), IP('30.0.0.0'), IP('40.0.0.0')])), ]} eq_(simplify_rules(rules), exp)
def test_simplify_combine(): """Rules with the same source are combined""" rules = { 'testapp': [ r('10.0.0.0', '20.0.0.0'), r('20.0.0.0', '30.0.0.0'), r('10.0.0.0', '40.0.0.0'), ] } exp = { 'testapp': [ r('10.0.0.0', IPSet([IP('20.0.0.0'), IP('40.0.0.0')])), r('20.0.0.0', '30.0.0.0'), ] } eq_(simplify_rules(rules), exp)
def combine(address_spaces, routes, sources): # get the set of all apps all_apps = set() for rules in sources.itervalues(): all_apps = all_apps | set(rules) # for each address space, add any apps which aren't explicitly specified in that # address space, but *are* specified in the combined ruleset, as copies of that # address space's '@@other' app. This ensures that each space has the same set # of apps. for rules in sources.itervalues(): other = rules.get('@@other', []) missing_apps = all_apps - set(rules) for app in missing_apps: rules[app] = [ Rule(src=r.src, dst=r.dst, app=app, name=r.name) for r in other ] combined_rules = {} for app in all_apps: logger.info("combining app %s", app) # The idea here is this: for each pair of address spaces, look at the # set of rules specified in the routes. Only write combined rules for # flows for which are allowed by all rulesets. for local_sp_name, local_sp in address_spaces.iteritems(): for remote_sp_name, remote_sp in address_spaces.iteritems(): source_names = routes[local_sp_name, remote_sp_name] if not source_names: continue logger.debug(" from %s to %s using %s", local_sp_name, remote_sp_name, ', '.join(source_names)) rulesets = [sources[n][app] for n in source_names] # if we only have one source, this is pretty easy: # just limit each rule to the relevant IP spaces; otherwise # we need to do a recursive intersection if len(rulesets) == 1: new_rules = rules_from_to(rulesets[0], local_sp, remote_sp) else: new_rules = intersect_rules(rulesets, local_sp, remote_sp) if new_rules: combined_rules.setdefault(app, []).extend(new_rules) rules = simplify_rules(combined_rules) return rules
def combine(address_spaces, routes, sources): # get the set of all apps all_apps = set() for rules in sources.itervalues(): all_apps = all_apps | set(rules) # for each address space, add any apps which aren't explicitly specified in that # address space, but *are* specified in the combined ruleset, as copies of that # address space's '@@other' app. This ensures that each space has the same set # of apps. for rules in sources.itervalues(): other = rules.get('@@other', []) missing_apps = all_apps - set(rules) for app in missing_apps: rules[app] = [Rule(src=r.src, dst=r.dst, app=app, name=r.name) for r in other] combined_rules = {} for app in all_apps: logger.info("combining app %s", app) # The idea here is this: for each pair of address spaces, look at the # set of rules specified in the routes. Only write combined rules for # flows for which are allowed by all rulesets. for local_sp_name, local_sp in address_spaces.iteritems(): for remote_sp_name, remote_sp in address_spaces.iteritems(): source_names = routes[local_sp_name, remote_sp_name] if not source_names: continue logger.debug(" from %s to %s using %s", local_sp_name, remote_sp_name, ', '.join(source_names)) rulesets = [sources[n][app] for n in source_names] # if we only have one source, this is pretty easy: # just limit each rule to the relevant IP spaces; otherwise # we need to do a recursive intersection if len(rulesets) == 1: new_rules = rules_from_to(rulesets[0], local_sp, remote_sp) else: new_rules = intersect_rules(rulesets, local_sp, remote_sp) if new_rules: combined_rules.setdefault(app, []).extend(new_rules) rules = simplify_rules(combined_rules) return rules
def test_simplify_empty(): """Simplifying an empty set results in an empty set""" eq_(simplify_rules({}), {})
def get_rules(aws, app_map, regions, dynamic_subnets): if not regions: logger.info("Getting all regions") regions = aws.all_regions() logger.info("collecting subnets") subnets = [] managed_ip_space = IPSet([]) for id, subnet in aws.get_all_subnets(regions).iteritems(): name = subnet.tags.get('Name', id) dynamic = name in dynamic_subnets or id in dynamic_subnets cidr_block = IP(subnet.cidr_block) subnet = Subnet(cidr_block=cidr_block, name=name, dynamic=dynamic) subnets.append(subnet) managed_ip_space = managed_ip_space + IPSet([cidr_block]) unmanaged_ip_space = IPSet([IP('0.0.0.0/0')]) - managed_ip_space logger.info("collecting dynamic subnet IP ranges") dynamic_ipsets = {} per_host_subnet_ips = IPSet() for subnet in subnets: if subnet.dynamic: ipset = dynamic_ipsets.get(subnet.name, IPSet([])) ipset += IPSet([subnet.cidr_block]) dynamic_ipsets[subnet.name] = ipset else: per_host_subnet_ips += IPSet([subnet.cidr_block]) # sort by IP subnet, so we can use a binary search logger.info("sorting subnets by IP") subnets.sort(key=lambda s: s.cidr_block) _subnet_blocks = [s.cidr_block for s in subnets] def subnet_by_ip(ip): i = bisect.bisect_right(_subnet_blocks, ip) if i and ip in _subnet_blocks[i - 1]: return subnets[i - 1] logger.info("examining instances") sgids_by_dynamic_subnet = {} # {subnet name: set of SecurityGroupIds} sgids_by_instance = {} # {instance_name: [ip, set of SecurityGroupIds]} all_sgids = set() ips_by_sg = {} # {group id: IPSet} for id, instance in aws.get_all_instances(regions).iteritems(): if instance.state == 'terminated' or instance.state == 'shutting-down': continue # meh, who cares if not instance.vpc_id: continue # not in vpc; ignored if not instance.private_ip_address: logger.debug( "ignoring instance with no private_ip_address: %s, tags %r", instance.id, instance.tags) continue ip = IP(instance.private_ip_address) for g in instance.groups: ips_by_sg[g.id] = ips_by_sg.get(g.id, IPSet([])) + IPSet([IP(ip)]) subnet = subnet_by_ip(ip) if not subnet: logger.debug( "ignoring instance with no matching subnet for %s: %s, tags %r", ip, instance.id, instance.tags) continue if subnet.dynamic: sgset = sgids_by_dynamic_subnet.setdefault(subnet.name, set()) else: inst_name = instance.tags.get('Name', instance.id) if inst_name in sgids_by_instance: inst_name = inst_name + ' ({})'.format(instance.id) sgset = set() sgids_by_instance[inst_name] = [ip, sgset] new_sgids = set( SecurityGroupId(g.id, instance.region.name) for g in instance.groups) sgset.update(new_sgids) all_sgids.update(new_sgids) logger.info("accumulating security groups") all_apps = set(app_map.values()) security_groups = {} for sgid in all_sgids: sg = security_groups[sgid] = aws.get_security_group(sgid) assert sg, "no security group with id {}".format(sgid) # pre-process all of the rules' apps now for sgrule in itertools.chain(sg.rules, sg.rules_egress): proto = str(sgrule.ip_protocol) if proto == '-1': proto = 'any' if sgrule.from_port == sgrule.to_port: if str(sgrule.from_port) in ("None", "-1"): app = "*/{}".format(proto) else: app = '{}/{}'.format(sgrule.from_port, proto) else: app = '{}-{}/{}'.format(sgrule.from_port, sgrule.to_port, proto) app = app_map[app] sgrule.app = app all_apps.add(app) rules = {} to_intersect = {} def make_rules(sgid, local): sg = security_groups[sgid] for dir, sgrules in [('in', sg.rules), ('out', sg.rules_egress)]: for sgrule in sgrules: if sgrule.app == '*/any': apps = all_apps | set(['@@other']) else: apps = [sgrule.app] for app in apps: for grant in sgrule.grants: if grant.cidr_ip: remote = IPSet([IP(grant.cidr_ip)]) else: remote = ips_by_sg.get(grant.group_id, None) if not remote: continue src, dst = (remote, local) if dir == 'in' else (local, remote) name = "{}/{}".format(sg.name, dir) # first make rules involving non-managed space, leaving # only managed-to-managed if dir == 'in': unmanaged_src = src & unmanaged_ip_space if unmanaged_src: rules.setdefault(app, []).append( Rule(src=unmanaged_src, dst=dst, app=app, name=name)) src = src & managed_ip_space else: unmanaged_dst = dst & unmanaged_ip_space if unmanaged_dst: rules.setdefault(app, []).append( Rule(src=src, dst=unmanaged_dst, app=app, name=name)) dst = dst & managed_ip_space if src and dst: to_intersect.setdefault(app, {}).setdefault( dir, []).append((src, dst, name)) logger.info("writing rules for dynamic subnets") for subnet_name, sgids in sgids_by_dynamic_subnet.iteritems(): subnet = dynamic_ipsets[subnet_name] logger.debug(" subnet %s, %s", subnet_name, subnet) for sgid in sgids: make_rules(sgid, subnet) logger.info("writing rules for instances in per-host subnets") per_host_host_ips = IPSet() for inst_name, info in sgids_by_instance.iteritems(): ip, sgids = info logger.debug(" instance %s at %s", inst_name, ip) host_ip = IPSet([ip]) per_host_host_ips += host_ip for sgid in sgids: make_rules(sgid, host_ip) logger.info( "assuming unrestricted outbound access from unoccupied IPs in per-host subnets" ) unoccupied = per_host_subnet_ips - per_host_host_ips for app in all_apps: rules.setdefault(app, []).append( Rule(src=unoccupied, dst=unmanaged_ip_space, app=app, name='unoccupied/out')) to_intersect.setdefault(app, {}).setdefault('out', []).append( (unoccupied, managed_ip_space, 'unoccupied/out')) # traffic within the manage Ip space is governed both by outbound rules on # the source and inbound rules on the destination. logger.info("intersecting inbound and outbound rules") for app, dirs in to_intersect.iteritems(): in_rules = dirs.get('in', []) out_rules = dirs.get('out', []) logger.debug("..for %s", app) new_rules = [] for inr in in_rules: for outr in out_rules: src = inr[0] & outr[0] if not src: continue dst = inr[1] & outr[1] if not dst: continue new_rules.append( Rule(src=src, dst=dst, app=app, name=combine_names(inr[2], outr[2]))) # simplify now, within this app, to save space and time new_rules = simplify_rules({app: new_rules})[app] rules.setdefault(app, []).extend(new_rules) rules = simplify_rules(rules) return rules
def process_rules(app_map, policies, zone_nets, policies_by_zone_pair, src_per_policy, dst_per_policy, apps_dir): logger.info("processing rules") # turn policies into a list of Rules (permit only), limited by zone, # that do not overlap. The tricky bit here is processing policies in # order and accounting for denies. We do this once for each # (from_zone, to_zone, app) tuple. The other tricky bit is handling # the application "any", which we treat as including all applications # used anywhere, and also record in a special "@@other" app. permit_rules_by_app = {} rule_name_mapping = {} permit_deny_rules = {} all_apps = set(itertools.chain(*[p.applications for p in policies])) all_apps = all_apps | set(app_map.keys()) all_apps.discard('any') if 'tcp_all' in all_apps: all_apps.discard('tcp_all') all_apps = all_apps | set(tcp_all) if 'udp_all' in all_apps: all_apps.discard('udp_all') all_apps = all_apps | set(udp_all) for app in all_apps: mapped_app = app_map[app] permit_deny_rules[mapped_app] = {"regular_policies": [], "global_policies": []} rule_name_mapping[mapped_app] = defaultdict(default_value) for zone_pairs, policies in policies_by_zone_pair.iteritems(): for policy in policies: src = src_per_policy[policy] dst = dst_per_policy[policy] if 'any' in policy.applications: apps = all_apps else: apps = set(policy.applications) if 'tcp_all' in apps: apps.discard('tcp_all') apps = apps | set(tcp_all) if 'udp_all' in apps: apps.discard('udp_all') apps = apps | set(udp_all) if zone_pairs == (None, None): priority = "global_policies" else: priority = "regular_policies" for app in apps: permit_deny_rules[app_map[app]][priority].append(FWRuleSequence(src, dst, policy.applications, policy.name, policy.action, policy.sequence)) rule_name_mapping[app_map[app]][policy.name].append(RuleNameMappingEntry(src, dst, policy.sequence, priority, policy.name)) for app in all_apps: for priority in ["regular_policies", "global_policies"]: permit_deny_rules[app_map[app]][priority].sort(key=lambda r: r.sequence) global_policies = sorted(policies_by_zone_pair.get((None, None), []), key=lambda p: p.sequence) for from_zone, to_zone in itertools.product(zone_nets, zone_nets): zpolicies = sorted(policies_by_zone_pair.get((from_zone, to_zone), []), key=lambda p: p.sequence) zpolicies += global_policies logger.debug(" from-zone %s to-zone %s (%d policies)", from_zone, to_zone, len(zpolicies)) apps = set(itertools.chain(*[p.applications for p in zpolicies])) if 'any' in apps: apps = all_apps if 'tcp_all' in apps: apps.discard('tcp_all') apps = apps | set(tcp_all) if 'udp_all' in apps: apps.discard('udp_all') apps = apps | set(udp_all) logger.info("start parallel phase 1 for %s to %s", from_zone, to_zone) mapped_apps = [app_map[app] for app in apps] pairs = process_pool.map(process_permit_rules_by_app_star, content_generator(mapped_apps, from_zone, to_zone, zone_nets, zpolicies, src_per_policy, dst_per_policy)) for mapped_app, permit_rules in pairs: permit_rules_app = permit_rules_by_app.setdefault(mapped_app, []) permit_rules_app += permit_rules logger.info("end parallel phase 1") logger.debug(" from-zone %s to-zone %s finished", from_zone, to_zone) permit_rules_by_app = simplify_rules(permit_rules_by_app) logger.info("start parallel phase 2") process_pool.map(write_app_to_file_star, file_content_generator(apps_dir, all_apps, app_map, permit_rules_by_app, rule_name_mapping, permit_deny_rules)) logger.info("end parallel phase 2") # simplify and return the result return all_apps
def get_rules(aws, app_map, regions, dynamic_subnets): if not regions: logger.info("Getting all regions") regions = aws.all_regions() logger.info("collecting subnets") subnets = [] managed_ip_space = IPSet([]) for id, subnet in aws.get_all_subnets(regions).iteritems(): name = subnet.tags.get('Name', id) dynamic = name in dynamic_subnets or id in dynamic_subnets cidr_block = IP(subnet.cidr_block) subnet = Subnet(cidr_block=cidr_block, name=name, dynamic=dynamic) subnets.append(subnet) managed_ip_space = managed_ip_space + IPSet([cidr_block]) unmanaged_ip_space = IPSet([IP('0.0.0.0/0')]) - managed_ip_space logger.info("collecting dynamic subnet IP ranges") dynamic_ipsets = {} per_host_subnet_ips = IPSet() for subnet in subnets: if subnet.dynamic: ipset = dynamic_ipsets.get(subnet.name, IPSet([])) ipset += IPSet([subnet.cidr_block]) dynamic_ipsets[subnet.name] = ipset else: per_host_subnet_ips += IPSet([subnet.cidr_block]) # sort by IP subnet, so we can use a binary search logger.info("sorting subnets by IP") subnets.sort(key=lambda s: s.cidr_block) _subnet_blocks = [s.cidr_block for s in subnets] def subnet_by_ip(ip): i = bisect.bisect_right(_subnet_blocks, ip) if i and ip in _subnet_blocks[i - 1]: return subnets[i - 1] logger.info("examining instances") sgids_by_dynamic_subnet = {} # {subnet name: set of SecurityGroupIds} sgids_by_instance = {} # {instance_name: [ip, set of SecurityGroupIds]} all_sgids = set() ips_by_sg = {} # {group id: IPSet} for id, instance in aws.get_all_instances(regions).iteritems(): if instance.state == 'terminated' or instance.state == 'shutting-down': continue # meh, who cares if not instance.vpc_id: continue # not in vpc; ignored if not instance.private_ip_address: logger.debug( "ignoring instance with no private_ip_address: %s, tags %r", instance.id, instance.tags) continue ip = IP(instance.private_ip_address) for g in instance.groups: ips_by_sg[g.id] = ips_by_sg.get(g.id, IPSet([])) + IPSet([IP(ip)]) subnet = subnet_by_ip(ip) if not subnet: logger.debug( "ignoring instance with no matching subnet for %s: %s, tags %r", ip, instance.id, instance.tags) continue if subnet.dynamic: sgset = sgids_by_dynamic_subnet.setdefault(subnet.name, set()) else: inst_name = instance.tags.get('Name', instance.id) if inst_name in sgids_by_instance: inst_name = inst_name + ' ({})'.format(instance.id) sgset = set() sgids_by_instance[inst_name] = [ip, sgset] new_sgids = set(SecurityGroupId(g.id, instance.region.name) for g in instance.groups) sgset.update(new_sgids) all_sgids.update(new_sgids) logger.info("accumulating security groups") all_apps = set(app_map.values()) security_groups = {} for sgid in all_sgids: sg = security_groups[sgid] = aws.get_security_group(sgid) assert sg, "no security group with id {}".format(sgid) # pre-process all of the rules' apps now for sgrule in itertools.chain(sg.rules, sg.rules_egress): proto = str(sgrule.ip_protocol) if proto == '-1': proto = 'any' if sgrule.from_port == sgrule.to_port: if str(sgrule.from_port) in ("None", "-1"): app = "*/{}".format(proto) else: app = '{}/{}'.format(sgrule.from_port, proto) else: app = '{}-{}/{}'.format(sgrule.from_port, sgrule.to_port, proto) app = app_map[app] sgrule.app = app all_apps.add(app) rules = {} to_intersect = {} def make_rules(sgid, local): sg = security_groups[sgid] for dir, sgrules in [('in', sg.rules), ('out', sg.rules_egress)]: for sgrule in sgrules: if sgrule.app == '*/any': apps = all_apps | set(['@@other']) else: apps = [sgrule.app] for app in apps: for grant in sgrule.grants: if grant.cidr_ip: remote = IPSet([IP(grant.cidr_ip)]) else: remote = ips_by_sg.get(grant.group_id, None) if not remote: continue src, dst = (remote, local) if dir == 'in' else (local, remote) name = "{}/{}".format(sg.name, dir) # first make rules involving non-managed space, leaving # only managed-to-managed if dir == 'in': unmanaged_src = src & unmanaged_ip_space if unmanaged_src: rules.setdefault(app, []).append(Rule( src=unmanaged_src, dst=dst, app=app, name=name)) src = src & managed_ip_space else: unmanaged_dst = dst & unmanaged_ip_space if unmanaged_dst: rules.setdefault(app, []).append(Rule( src=src, dst=unmanaged_dst, app=app, name=name)) dst = dst & managed_ip_space if src and dst: to_intersect.setdefault(app, {}).setdefault(dir, []).append((src, dst, name)) logger.info("writing rules for dynamic subnets") for subnet_name, sgids in sgids_by_dynamic_subnet.iteritems(): subnet = dynamic_ipsets[subnet_name] logger.debug(" subnet %s, %s", subnet_name, subnet) for sgid in sgids: make_rules(sgid, subnet) logger.info("writing rules for instances in per-host subnets") per_host_host_ips = IPSet() for inst_name, info in sgids_by_instance.iteritems(): ip, sgids = info logger.debug(" instance %s at %s", inst_name, ip) host_ip = IPSet([ip]) per_host_host_ips += host_ip for sgid in sgids: make_rules(sgid, host_ip) logger.info("assuming unrestricted outbound access from unoccupied IPs in per-host subnets") unoccupied = per_host_subnet_ips - per_host_host_ips for app in all_apps: rules.setdefault(app, []).append(Rule( src=unoccupied, dst=unmanaged_ip_space, app=app, name='unoccupied/out')) to_intersect.setdefault(app, {}).setdefault('out', []).append((unoccupied, managed_ip_space, 'unoccupied/out')) # traffic within the manage Ip space is governed both by outbound rules on # the source and inbound rules on the destination. logger.info("intersecting inbound and outbound rules") for app, dirs in to_intersect.iteritems(): in_rules = dirs.get('in', []) out_rules = dirs.get('out', []) logger.debug("..for %s", app) new_rules = [] for inr in in_rules: for outr in out_rules: src = inr[0] & outr[0] if not src: continue dst = inr[1] & outr[1] if not dst: continue new_rules.append(Rule(src=src, dst=dst, app=app, name=combine_names(inr[2], outr[2]))) # simplify now, within this app, to save space and time new_rules = simplify_rules({app: new_rules})[app] rules.setdefault(app, []).extend(new_rules) rules = simplify_rules(rules) return rules