def test_security_group_rule_get_by_security_group(self): fake_secgroup = {"id": "fake-secgroup"} self.mox.StubOutWithMock(db, "security_group_rule_get_by_security_group") db.security_group_rule_get_by_security_group(self.context, fake_secgroup["id"]).AndReturn("it worked") self.mox.ReplayAll() result = self.conductor.security_group_rule_get_by_security_group(self.context, fake_secgroup) self.assertEqual(result, "it worked")
def test_security_group_rule_get_by_security_group(self): fake_secgroup = {'id': 'fake-secgroup'} self.mox.StubOutWithMock(db, 'security_group_rule_get_by_security_group') db.security_group_rule_get_by_security_group( self.context, fake_secgroup['id']).AndReturn('it worked') self.mox.ReplayAll() result = self.conductor.security_group_rule_get_by_security_group( self.context, fake_secgroup) self.assertEqual(result, 'it worked')
def test_security_group_rule_get_by_security_group(self): fake_secgroup = {'id': 'fake-secgroup'} self.mox.StubOutWithMock(db, 'security_group_rule_get_by_security_group') db.security_group_rule_get_by_security_group( self.context, fake_secgroup['id']).AndReturn('it worked') self.mox.ReplayAll() result = self.conductor.security_group_rule_get_by_security_group( self.context, fake_secgroup) self.assertEqual(result, 'it worked')
def get_by_security_group_id(cls, context, secgroup_id): db_rules = db.security_group_rule_get_by_security_group( context, secgroup_id, columns_to_join=['grantee_group']) return base.obj_make_list(context, cls(context), objects.SecurityGroupRule, db_rules, expected_attrs=['grantee_group'])
def prepare_instance_filter(self, instance, network_info): """Prepare filters for the instance. At this point, the instance isn't running yet.""" LOG.debug("prepare_instance_filter: %s", locals()) tenant_id = instance['project_id'] ctxt = context.get_admin_context() new_filters = {} not_to_delete = {} for (network, mapping) in network_info: LOG.debug("handling network=%s mapping=%s", network, mapping) info = _from_network_info(network, mapping, tenant_id) if not info: LOG.debug("skip this network_info") continue vifinfo_uuid, network_uuid, ips = info filter_bodys = [] for ip in ips: dd_f = _build_default_drop_filter(ip + "/32") filter_bodys.extend(dd_f) for sg in instance.security_groups: LOG.debug("security_group.id=%s", sg.id) rules = db.security_group_rule_get_by_security_group( ctxt, sg.id) for rule in rules: rule_f = _build_sg_rule_filter( ip + "/32", rule, SECURITY_GROUP_PRIORITY) filter_bodys.extend(rule_f) #TODO(NTTdocomo) add duplicated id to list ids, dup_ids = _create_filters(self._connection, tenant_id, network_uuid, filter_bodys) new_filters[network_uuid] = ids not_to_delete[network_uuid] = dup_ids LOG.debug("new_filters = %s", new_filters) # delete old filters for (network_uuid, filter_ids)\ in self._filters.get(instance.id, {}).iteritems(): fid_dict = {} for fid in filter_ids: fid_dict[fid] = None for excl_fid in not_to_delete[network_uuid]: del (fid_dict[excl_fid]) _delete_filters(self._connection, tenant_id, network_uuid, fid_dict.keys()) self._filters[instance.id] = new_filters self._network_infos[instance.id] = network_info LOG.debug("prepare_instance_filter: end")
def prepare_instance_filter(self, instance, network_info): """Prepare filters for the instance. At this point, the instance isn't running yet.""" LOG.debug("prepare_instance_filter: %s", locals()) tenant_id = instance['project_id'] ctxt = context.get_admin_context() new_filters = {} not_to_delete = {} for (network, mapping) in network_info: LOG.debug("handling network=%s mapping=%s", network, mapping) info = _from_network_info(network, mapping, tenant_id) if not info: LOG.debug("skip this network_info") continue vifinfo_uuid, network_uuid, ips = info filter_bodys = [] for ip in ips: dd_f = _build_default_drop_filter(ip + "/32") filter_bodys.extend(dd_f) for sg in instance.security_groups: LOG.debug("security_group.id=%s", sg.id) rules = db.security_group_rule_get_by_security_group(ctxt, sg.id) for rule in rules: rule_f = _build_sg_rule_filter(ip + "/32", rule, SECURITY_GROUP_PRIORITY) filter_bodys.extend(rule_f) #TODO(NTTdocomo) add duplicated id to list ids, dup_ids = _create_filters(self._connection, tenant_id, network_uuid, filter_bodys) new_filters[network_uuid] = ids not_to_delete[network_uuid] = dup_ids LOG.debug("new_filters = %s", new_filters) # delete old filters for (network_uuid, filter_ids)\ in self._filters.get(instance.id, {}).iteritems(): fid_dict = {} for fid in filter_ids: fid_dict[fid] = None for excl_fid in not_to_delete[network_uuid]: del(fid_dict[excl_fid]) _delete_filters(self._connection, tenant_id, network_uuid, fid_dict.keys()) self._filters[instance.id] = new_filters self._network_infos[instance.id] = network_info LOG.debug("prepare_instance_filter: end")
def instance_rules(self, instance, network_info): # make sure this is legacy nw_info network_info = self._handle_network_info_model(network_info) ctxt = context.get_admin_context() ipv4_rules = [] ipv6_rules = [] # Initialize with basic rules self._do_basic_rules(ipv4_rules, ipv6_rules, network_info) # Set up rules to allow traffic to/from DHCP server self._do_dhcp_rules(ipv4_rules, network_info) # Allow project network traffic if FLAGS.allow_same_net_traffic: self._do_project_network_rules(ipv4_rules, ipv6_rules, network_info) # We wrap these in FLAGS.use_ipv6 because they might cause # a DB lookup. The other ones are just list operations, so # they're not worth the clutter. if FLAGS.use_ipv6: # Allow RA responses self._do_ra_rules(ipv6_rules, network_info) security_groups = db.security_group_get_by_instance(ctxt, instance["id"]) # then, security group chains and rules for security_group in security_groups: rules = db.security_group_rule_get_by_security_group(ctxt, security_group["id"]) for rule in rules: LOG.debug(_("Adding security group rule: %r"), rule, instance=instance) if not rule.cidr: version = 4 else: version = netutils.get_ip_version(rule.cidr) if version == 4: fw_rules = ipv4_rules else: fw_rules = ipv6_rules protocol = rule.protocol if protocol: protocol = rule.protocol.lower() if version == 6 and protocol == "icmp": protocol = "icmpv6" args = ["-j ACCEPT"] if protocol: args += ["-p", protocol] if protocol in ["udp", "tcp"]: args += self._build_tcp_udp_rule(rule, version) elif protocol == "icmp": args += self._build_icmp_rule(rule, version) if rule.cidr: LOG.debug("Using cidr %r", rule.cidr, instance=instance) args += ["-s", rule.cidr] fw_rules += [" ".join(args)] else: if rule["grantee_group"]: # FIXME(jkoelker) This needs to be ported up into # the compute manager which already # has access to a nw_api handle, # and should be the only one making # making rpc calls. nw_api = network.API() for instance in rule["grantee_group"]["instances"]: nw_info = nw_api.get_instance_nw_info(ctxt, instance) ips = [ip["address"] for ip in nw_info.fixed_ips() if ip["version"] == version] LOG.debug("ips: %r", ips, instance=instance) for ip in ips: subrule = args + ["-s %s" % ip] fw_rules += [" ".join(subrule)] LOG.debug("Using fw_rules: %r", fw_rules, instance=instance) ipv4_rules += ["-j $sg-fallback"] ipv6_rules += ["-j $sg-fallback"] return ipv4_rules, ipv6_rules
def instance_rules(self, instance, network_info=None): if not network_info: network_info = netutils.get_network_info(instance) ctxt = context.get_admin_context() ipv4_rules = [] ipv6_rules = [] # Always drop invalid packets ipv4_rules += ['-m state --state ' 'INVALID -j DROP'] ipv6_rules += ['-m state --state ' 'INVALID -j DROP'] # Allow established connections ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] # Pass through provider-wide drops ipv4_rules += ['-j $provider'] ipv6_rules += ['-j $provider'] dhcp_servers = [info['gateway'] for (_n, info) in network_info] for dhcp_server in dhcp_servers: ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 ' '-j ACCEPT' % (dhcp_server,)) #Allow project network traffic if FLAGS.allow_project_net_traffic: cidrs = [network['cidr'] for (network, _m) in network_info] for cidr in cidrs: ipv4_rules.append('-s %s -j ACCEPT' % (cidr,)) # We wrap these in FLAGS.use_ipv6 because they might cause # a DB lookup. The other ones are just list operations, so # they're not worth the clutter. if FLAGS.use_ipv6: # Allow RA responses gateways_v6 = [mapping['gateway6'] for (_n, mapping) in network_info] for gateway_v6 in gateways_v6: ipv6_rules.append( '-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,)) #Allow project network traffic if FLAGS.allow_project_net_traffic: cidrv6s = [network['cidr_v6'] for (network, _m) in network_info] for cidrv6 in cidrv6s: ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,)) security_groups = db.security_group_get_by_instance(ctxt, instance['id']) # then, security group chains and rules for security_group in security_groups: rules = db.security_group_rule_get_by_security_group(ctxt, security_group['id']) for rule in rules: LOG.debug(_('Adding security group rule: %r'), rule) if not rule.cidr: # Eventually, a mechanism to grant access for security # groups will turn up here. It'll use ipsets. continue version = netutils.get_ip_version(rule.cidr) if version == 4: fw_rules = ipv4_rules else: fw_rules = ipv6_rules protocol = rule.protocol if version == 6 and rule.protocol == 'icmp': protocol = 'icmpv6' args = ['-p', protocol, '-s', rule.cidr] if rule.protocol in ['udp', 'tcp']: if rule.from_port == rule.to_port: args += ['--dport', '%s' % (rule.from_port,)] else: args += ['-m', 'multiport', '--dports', '%s:%s' % (rule.from_port, rule.to_port)] elif rule.protocol == 'icmp': icmp_type = rule.from_port icmp_code = rule.to_port if icmp_type == -1: icmp_type_arg = None else: icmp_type_arg = '%s' % icmp_type if not icmp_code == -1: icmp_type_arg += '/%s' % icmp_code if icmp_type_arg: if version == 4: args += ['-m', 'icmp', '--icmp-type', icmp_type_arg] elif version == 6: args += ['-m', 'icmp6', '--icmpv6-type', icmp_type_arg] args += ['-j ACCEPT'] fw_rules += [' '.join(args)] ipv4_rules += ['-j $sg-fallback'] ipv6_rules += ['-j $sg-fallback'] return ipv4_rules, ipv6_rules
def instance_rules(self, instance, network_info): # make sure this is legacy nw_info network_info = self._handle_network_info_model(network_info) ctxt = context.get_admin_context() ipv4_rules = [] ipv6_rules = [] # Initialize with basic rules self._do_basic_rules(ipv4_rules, ipv6_rules, network_info) # Set up rules to allow traffic to/from DHCP server self._do_dhcp_rules(ipv4_rules, network_info) #Allow project network traffic if FLAGS.allow_same_net_traffic: self._do_project_network_rules(ipv4_rules, ipv6_rules, network_info) # We wrap these in FLAGS.use_ipv6 because they might cause # a DB lookup. The other ones are just list operations, so # they're not worth the clutter. if FLAGS.use_ipv6: # Allow RA responses self._do_ra_rules(ipv6_rules, network_info) security_groups = db.security_group_get_by_instance( ctxt, instance['id']) # then, security group chains and rules for security_group in security_groups: rules = db.security_group_rule_get_by_security_group( ctxt, security_group['id']) for rule in rules: LOG.debug(_('Adding security group rule: %r'), rule, instance=instance) if not rule.cidr: version = 4 else: version = netutils.get_ip_version(rule.cidr) if version == 4: fw_rules = ipv4_rules else: fw_rules = ipv6_rules protocol = rule.protocol if protocol: protocol = rule.protocol.lower() if version == 6 and protocol == 'icmp': protocol = 'icmpv6' args = ['-j ACCEPT'] if protocol: args += ['-p', protocol] if protocol in ['udp', 'tcp']: args += self._build_tcp_udp_rule(rule, version) elif protocol == 'icmp': args += self._build_icmp_rule(rule, version) if rule.cidr: LOG.debug('Using cidr %r', rule.cidr, instance=instance) args += ['-s', rule.cidr] fw_rules += [' '.join(args)] else: if rule['grantee_group']: # FIXME(jkoelker) This needs to be ported up into # the compute manager which already # has access to a nw_api handle, # and should be the only one making # making rpc calls. nw_api = network.API() for instance in rule['grantee_group']['instances']: nw_info = nw_api.get_instance_nw_info( ctxt, instance) ips = [ ip['address'] for ip in nw_info.fixed_ips() if ip['version'] == version ] LOG.debug('ips: %r', ips, instance=instance) for ip in ips: subrule = args + ['-s %s' % ip] fw_rules += [' '.join(subrule)] LOG.debug('Using fw_rules: %r', fw_rules, instance=instance) ipv4_rules += ['-j $sg-fallback'] ipv6_rules += ['-j $sg-fallback'] return ipv4_rules, ipv6_rules
def get_by_security_group_id(cls, context, secgroup_id): db_rules = db.security_group_rule_get_by_security_group( context, secgroup_id, columns_to_join=['grantee_group']) return base.obj_make_list(context, cls(context), objects.SecurityGroupRule, db_rules, expected_attrs=['grantee_group'])
def security_group_rule_get_by_security_group(self, context, security_group): return db.security_group_rule_get_by_security_group( context, security_group['id'])
def instance_rules(self, instance, network_info): ctxt = context.get_admin_context() ipv4_rules = [] ipv6_rules = [] # Initialize with basic rules self._do_basic_rules(ipv4_rules, ipv6_rules, network_info) # Set up rules to allow traffic to/from DHCP server self._do_dhcp_rules(ipv4_rules, network_info) #Allow project network traffic if FLAGS.allow_same_net_traffic: self._do_project_network_rules(ipv4_rules, ipv6_rules, network_info) # We wrap these in FLAGS.use_ipv6 because they might cause # a DB lookup. The other ones are just list operations, so # they're not worth the clutter. if FLAGS.use_ipv6: # Allow RA responses self._do_ra_rules(ipv6_rules, network_info) security_groups = db.security_group_get_by_instance(ctxt, instance['id']) # then, security group chains and rules for security_group in security_groups: rules = db.security_group_rule_get_by_security_group(ctxt, security_group['id']) for rule in rules: LOG.debug(_('Adding security group rule: %r'), rule) if not rule.cidr: version = 4 else: version = netutils.get_ip_version(rule.cidr) if version == 4: fw_rules = ipv4_rules else: fw_rules = ipv6_rules protocol = rule.protocol if version == 6 and rule.protocol == 'icmp': protocol = 'icmpv6' args = ['-j ACCEPT'] if protocol: args += ['-p', protocol] if protocol in ['udp', 'tcp']: args += self._build_tcp_udp_rule(rule, version) elif protocol == 'icmp': args += self._build_icmp_rule(rule, version) if rule.cidr: LOG.info('Using cidr %r', rule.cidr) args += ['-s', rule.cidr] fw_rules += [' '.join(args)] else: if rule['grantee_group']: # FIXME(jkoelker) This needs to be ported up into # the compute manager which already # has access to a nw_api handle, # and should be the only one making # making rpc calls. import nova.network nw_api = nova.network.API() for instance in rule['grantee_group']['instances']: LOG.info('instance: %r', instance) ips = [] nw_info = nw_api.get_instance_nw_info(ctxt, instance) for net in nw_info: ips.extend(net[1]['ips']) LOG.info('ips: %r', ips) for ip in ips: subrule = args + ['-s %s' % ip['ip']] fw_rules += [' '.join(subrule)] LOG.info('Using fw_rules: %r', fw_rules) ipv4_rules += ['-j $sg-fallback'] ipv6_rules += ['-j $sg-fallback'] return ipv4_rules, ipv6_rules
def instance_rules(self, instance, network_info=None): if not network_info: network_info = netutils.get_network_info(instance) ctxt = context.get_admin_context() ipv4_rules = [] ipv6_rules = [] # Always drop invalid packets ipv4_rules += ["-m state --state " "INVALID -j DROP"] ipv6_rules += ["-m state --state " "INVALID -j DROP"] # Allow established connections ipv4_rules += ["-m state --state ESTABLISHED,RELATED -j ACCEPT"] ipv6_rules += ["-m state --state ESTABLISHED,RELATED -j ACCEPT"] # Pass through provider-wide drops ipv4_rules += ["-j $provider"] ipv6_rules += ["-j $provider"] dhcp_servers = [info["gateway"] for (_n, info) in network_info] for dhcp_server in dhcp_servers: ipv4_rules.append("-s %s -p udp --sport 67 --dport 68 " "-j ACCEPT" % (dhcp_server,)) # Allow project network traffic if FLAGS.allow_project_net_traffic: cidrs = [network["cidr"] for (network, _m) in network_info] for cidr in cidrs: ipv4_rules.append("-s %s -j ACCEPT" % (cidr,)) # We wrap these in FLAGS.use_ipv6 because they might cause # a DB lookup. The other ones are just list operations, so # they're not worth the clutter. if FLAGS.use_ipv6: # Allow RA responses gateways_v6 = [mapping["gateway6"] for (_n, mapping) in network_info] for gateway_v6 in gateways_v6: ipv6_rules.append("-s %s/128 -p icmpv6 -j ACCEPT" % (gateway_v6,)) # Allow project network traffic if FLAGS.allow_project_net_traffic: cidrv6s = [network["cidr_v6"] for (network, _m) in network_info] for cidrv6 in cidrv6s: ipv6_rules.append("-s %s -j ACCEPT" % (cidrv6,)) security_groups = db.security_group_get_by_instance(ctxt, instance["id"]) # then, security group chains and rules for security_group in security_groups: rules = db.security_group_rule_get_by_security_group(ctxt, security_group["id"]) for rule in rules: LOG.debug(_("Adding security group rule: %r"), rule) if not rule.cidr: # Eventually, a mechanism to grant access for security # groups will turn up here. It'll use ipsets. continue version = netutils.get_ip_version(rule.cidr) if version == 4: fw_rules = ipv4_rules else: fw_rules = ipv6_rules protocol = rule.protocol if version == 6 and rule.protocol == "icmp": protocol = "icmpv6" args = ["-p", protocol, "-s", rule.cidr] if rule.protocol in ["udp", "tcp"]: if rule.from_port == rule.to_port: args += ["--dport", "%s" % (rule.from_port,)] else: args += ["-m", "multiport", "--dports", "%s:%s" % (rule.from_port, rule.to_port)] elif rule.protocol == "icmp": icmp_type = rule.from_port icmp_code = rule.to_port if icmp_type == -1: icmp_type_arg = None else: icmp_type_arg = "%s" % icmp_type if not icmp_code == -1: icmp_type_arg += "/%s" % icmp_code if icmp_type_arg: if version == 4: args += ["-m", "icmp", "--icmp-type", icmp_type_arg] elif version == 6: args += ["-m", "icmp6", "--icmpv6-type", icmp_type_arg] args += ["-j ACCEPT"] fw_rules += [" ".join(args)] ipv4_rules += ["-j $sg-fallback"] ipv6_rules += ["-j $sg-fallback"] return ipv4_rules, ipv6_rules
def security_group_rule_get_by_security_group(self, context, security_group_id): return db.security_group_rule_get_by_security_group(context, security_group_id)
def create_for_vif(self, tenant_id, instance, network, vif_chains, allow_same_net_traffic): LOG.debug('tenant_id=%r, instance=%r, network=%r, vif_chains=%r', tenant_id, instance['id'], network, vif_chains) bridge_uuid = network[0]['id'] net_cidr = network[0]['cidr'] vif_uuid = network[1]['vif_uuid'] mac = network[1]['mac'] ip = network[1]['ips'][0]['ip'] # # ingress # position = 1 in_chain = vif_chains['in'] out_chain = vif_chains['out'] # mac spoofing protection in_chain.add_rule().type('drop')\ .dl_src(mac)\ .inv_dl_src(True)\ .position(position)\ .create() position += 1 # ip spoofing protection in_chain.add_rule().type('drop')\ .nw_src_address(ip)\ .nw_src_length(32)\ .inv_nw_src(True)\ .dl_type(0x0800)\ .position(position)\ .create() position += 1 # conntrack in_chain.add_rule().type('accept')\ .match_forward_flow(True)\ .position(position)\ .create() position += 1 # # egress # ctxt = context.get_admin_context() if self.virtapi: security_groups = self.virtapi.security_group_get_by_instance( ctxt, instance) else: security_groups = db.security_group_get_by_instance( ctxt, instance['id']) position = 1 # get the port groups to match for the rule port_groups = self.mido_api.get_port_groups({'tenant_id': tenant_id}) if allow_same_net_traffic: LOG.debug('accept cidr=%r', net_cidr) nw_src_address, nw_src_length = net_cidr.split('/') out_chain.add_rule().type('accept')\ .nw_src_address(nw_src_address)\ .nw_src_length(nw_src_length)\ .position(position)\ .create() position += 1 # add rules that correspond to Nova SG for sg in security_groups: LOG.debug('security group=%r', sg['name']) if self.virtapi: rules = self.virtapi.security_group_rule_get_by_security_group( ctxt, sg) else: rules = db.security_group_rule_get_by_security_group( ctxt, sg['id']) LOG.debug('sg_id=%r', sg['id']) LOG.debug('sg_project_id=%r', sg['project_id']) LOG.debug('name=%r', sg['name']) LOG.debug('rules=%r', rules) cname = chain_name(sg['id'], sg['name']) chains = self.mido_api.get_chains({'tenant_id': tenant_id}) jump_chain_id = None for c in chains: if c.get_name() == cname: jump_chain_id = c.get_id() break # sg handler must have missed the event of creating the SG. # Now doing the equivalent as a quick workaround. if not jump_chain_id: def create_sg_resources(tenant_id, sg_id, sg_name): self.chain_manager.create_for_sg(tenant_id, sg_id, sg_name) self.pg_manager.create(tenant_id, sg_id, sg_name) create_sg_resources(tenant_id, sg['id'], sg['name']) chains = self.mido_api.get_chains({'tenant_id': tenant_id}) jump_chain_id = None for c in chains: if c.get_name() == cname: jump_chain_id = c.get_id() break assert jump_chain_id rule = out_chain.add_rule().type('jump')\ .position(position)\ .jump_chain_id(jump_chain_id)\ .jump_chain_name(cname)\ .create() position += 1 # Look for the port group that the vif should belong to for pg in port_groups: if pg.get_name() != cname: port_groups.remove(pg) # add reverse flow matching at the end out_chain.add_rule().type('accept')\ .match_return_flow(True)\ .position(position)\ .create() position += 1 # fall back DROP rule at the end except for ARP out_chain.add_rule().type('drop')\ .dl_type(0x0806)\ .inv_dl_type(True)\ .position(position)\ .create() # # Updating the vport # bridge = self.mido_api.get_bridge(bridge_uuid) bridge_port = self.mido_api.get_port(vif_uuid) LOG.debug('bridge_port=%r found', bridge_port) # set filters bridge_port.inbound_filter_id(in_chain.get_id()) bridge_port.outbound_filter_id(out_chain.get_id()) bridge_port.update() for pg in port_groups: pg.add_port_group_port().port_id(bridge_port.get_id()).create()
def create_for_vif(self, tenant_id, instance, network, vif_chains, allow_same_net_traffic): LOG.debug('tenant_id=%r, instance=%r, network=%r, vif_chains=%r', tenant_id, instance['id'], network, vif_chains) bridge_uuid = network[0]['id'] net_cidr = network[0]['cidr'] vif_uuid = network[1]['vif_uuid'] mac = network[1]['mac'] ip = network[1]['ips'][0]['ip'] # # ingress # position = 1 in_chain = vif_chains['in'] out_chain = vif_chains['out'] # mac spoofing protection in_chain.add_rule().type('drop')\ .dl_src(mac)\ .inv_dl_src(True)\ .position(position)\ .create() position += 1 # ip spoofing protection in_chain.add_rule().type('drop')\ .nw_src_address(ip)\ .nw_src_length(32)\ .inv_nw_src(True)\ .dl_type(0x0800)\ .position(position)\ .create() position += 1 # conntrack in_chain.add_rule().type('accept')\ .match_forward_flow(True)\ .position(position)\ .create() position += 1 # # egress # ctxt = context.get_admin_context() if self.virtapi: security_groups = self.virtapi.security_group_get_by_instance( ctxt, instance) else: security_groups = db.security_group_get_by_instance(ctxt, instance['id']) position = 1 # get the port groups to match for the rule port_groups = self.mido_api.get_port_groups({'tenant_id': tenant_id}) if allow_same_net_traffic: LOG.debug('accept cidr=%r', net_cidr) nw_src_address, nw_src_length = net_cidr.split('/') out_chain.add_rule().type('accept')\ .nw_src_address(nw_src_address)\ .nw_src_length(nw_src_length)\ .position(position)\ .create() position += 1 # add rules that correspond to Nova SG for sg in security_groups: LOG.debug('security group=%r', sg['name']) if self.virtapi: rules = self.virtapi.security_group_rule_get_by_security_group( ctxt, sg) else: rules = db.security_group_rule_get_by_security_group( ctxt, sg['id']) LOG.debug('sg_id=%r', sg['id']) LOG.debug('sg_project_id=%r', sg['project_id']) LOG.debug('name=%r', sg['name']) LOG.debug('rules=%r', rules) cname = chain_name(sg['id'], sg['name']) chains = self.mido_api.get_chains({'tenant_id': tenant_id}) jump_chain_id = None for c in chains: if c.get_name() == cname: jump_chain_id = c.get_id() break # sg handler must have missed the event of creating the SG. # Now doing the equivalent as a quick workaround. if not jump_chain_id: def create_sg_resources(tenant_id, sg_id, sg_name): self.chain_manager.create_for_sg(tenant_id, sg_id, sg_name) self.pg_manager.create(tenant_id, sg_id, sg_name) create_sg_resources(tenant_id, sg['id'], sg['name']) chains = self.mido_api.get_chains({'tenant_id': tenant_id}) jump_chain_id = None for c in chains: if c.get_name() == cname: jump_chain_id = c.get_id() break assert jump_chain_id rule = out_chain.add_rule().type('jump')\ .position(position)\ .jump_chain_id(jump_chain_id)\ .jump_chain_name(cname)\ .create() position += 1 # Look for the port group that the vif should belong to for pg in port_groups: if pg.get_name() != cname: port_groups.remove(pg) # add reverse flow matching at the end out_chain.add_rule().type('accept')\ .match_return_flow(True)\ .position(position)\ .create() position += 1 # fall back DROP rule at the end except for ARP out_chain.add_rule().type('drop')\ .dl_type(0x0806)\ .inv_dl_type(True)\ .position(position)\ .create() # # Updating the vport # bridge = self.mido_api.get_bridge(bridge_uuid) bridge_port = self.mido_api.get_port(vif_uuid) LOG.debug('bridge_port=%r found', bridge_port) # set filters bridge_port.inbound_filter_id(in_chain.get_id()) bridge_port.outbound_filter_id(out_chain.get_id()) bridge_port.update() for pg in port_groups: pg.add_port_group_port().port_id(bridge_port.get_id()).create()
def instance_rules(self, instance, network_info): # make sure this is legacy nw_info network_info = self._handle_network_info_model(network_info) ctxt = context.get_admin_context() ipv4_rules = [] ipv6_rules = [] # Initialize with basic rules self._do_basic_rules(ipv4_rules, ipv6_rules, network_info) # Set up rules to allow traffic to/from DHCP server self._do_dhcp_rules(ipv4_rules, network_info) # NOTE: (stanzgy) insert intranet firewall rule here # # iptables -I nova-compute-inst-{inst-id} 4 # -m set --match-set {inst-set-name} src -j ACCEPT # # then drop all other packets not been NATed # # iptables -I nova-compute-inst-[inst-id] 5 -d fixed_range # -m conntrack ! --ctstate DNAT -j DROP if FLAGS.intranet_firewall_mode == 'tenant': setname = instance['project_id'][0:31] self._do_intranet_firewall_rules(ipv4_rules, ipv6_rules, setname) #Allow project network traffic if FLAGS.allow_same_net_traffic: self._do_project_network_rules(ipv4_rules, ipv6_rules, network_info) # We wrap these in FLAGS.use_ipv6 because they might cause # a DB lookup. The other ones are just list operations, so # they're not worth the clutter. if FLAGS.use_ipv6: # Allow RA responses self._do_ra_rules(ipv6_rules, network_info) # Allow IP encapsulated in IP packets? if FLAGS.allow_ipencap: self._do_ipencap_rules(ipv4_rules, ipv6_rules) security_groups = db.security_group_get_by_instance(ctxt, instance['id']) # then, security group chains and rules for security_group in security_groups: rules = db.security_group_rule_get_by_security_group(ctxt, security_group['id']) for rule in rules: LOG.debug(_('Adding security group rule: %r'), rule, instance=instance) if not rule.cidr: version = 4 else: version = netutils.get_ip_version(rule.cidr) if version == 4: fw_rules = ipv4_rules else: fw_rules = ipv6_rules protocol = rule.protocol if protocol: protocol = rule.protocol.lower() if version == 6 and protocol == 'icmp': protocol = 'icmpv6' args = ['-j ACCEPT'] if protocol: args += ['-p', protocol] if protocol in ['udp', 'tcp']: args += self._build_tcp_udp_rule(rule, version) elif protocol == 'icmp': args += self._build_icmp_rule(rule, version) if rule.cidr: LOG.debug('Using cidr %r', rule.cidr, instance=instance) args += ['-s', rule.cidr] fw_rules += [' '.join(args)] else: if rule['grantee_group']: # FIXME(jkoelker) This needs to be ported up into # the compute manager which already # has access to a nw_api handle, # and should be the only one making # making rpc calls. import nova.network nw_api = nova.network.API() for instance in rule['grantee_group']['instances']: nw_info = nw_api.get_instance_nw_info(ctxt, instance) ips = [ip['address'] for ip in nw_info.fixed_ips() if ip['version'] == version] LOG.debug('ips: %r', ips, instance=instance) for ip in ips: subrule = args + ['-s %s' % ip] fw_rules += [' '.join(subrule)] LOG.debug('Using fw_rules: %r', fw_rules, instance=instance) ipv4_rules += ['-j $sg-fallback'] ipv6_rules += ['-j $sg-fallback'] return ipv4_rules, ipv6_rules
def _fullbuild(conn): tenants_networks_filters = {} def _extend(tenant_id, network_id, filter_bodys): if not tenants_networks_filters.has_key(tenant_id): tenants_networks_filters[tenant_id] = {} if not tenants_networks_filters[tenant_id].has_key(network_id): tenants_networks_filters[tenant_id][network_id] = [] tenants_networks_filters[tenant_id][network_id].extend(filter_bodys) ctxt = context.get_admin_context() hosts = db.phy_host_get_all(ctxt) for t in hosts: LOG.debug('to.id=%s', t.id) LOG.debug('to=%s', t.__dict__) if not t.instance_id: continue ti = db.instance_get(ctxt, t.instance_id) LOG.debug('to.instance=%s', ti.__dict__) # DHCP from the instance for (in_port,network_uuid,mac,_) in _from_phy_host(ti.id, ti.project_id): filter_bodys = [] filter_bodys.extend(_build_allow_dhcp_client(in_port, mac)) filter_bodys.extend(_build_deny_dhcp_server(in_port)) _extend(ti.project_id, network_uuid, filter_bodys) # from external host to the instance LOG.debug('from=* to.id=%s', t.id) for (_,network_uuid,_,t_ips) in _from_phy_host(ti.id, ti.project_id): filter_bodys = [] for t_ip in t_ips: for sg in db.security_group_get_by_instance(ctxt, ti.id): rules = db.security_group_rule_get_by_security_group(ctxt, sg.id) for rule in rules: rule_f = _build_security_group_rule_filter(t_ip + "/32", rule, EXTERNAL_SECURITY_GROUP_PRIORITY) filter_bodys.extend(rule_f) rule_f = _build_default_drop_filter(t_ip + "/32") filter_bodys.extend(rule_f) _extend(ti.project_id, network_uuid, filter_bodys) # from other instances to the instance for f in hosts: LOG.debug('from.id=%s to.id=%s', f.id, t.id) if f.id == t.id: continue if not f.instance_id: continue fi = db.instance_get(ctxt, f.instance_id) LOG.debug('from.instance=%s', fi.__dict__) for (in_port,network_uuid,mac,f_ips) in _from_phy_host(fi.id, fi.project_id): filter_bodys = [] for (_,_,_,t_ips) in _from_phy_host(ti.id, ti.project_id): for f_ip in f_ips: for t_ip in t_ips: for sg in db.security_group_get_by_instance(ctxt, ti.id): rules = db.security_group_rule_get_by_security_group(ctxt, sg.id) for rule in rules: if rule.cidr and not _in_cidr(f_ip, rule.cidr): continue rule_f = _build_full_security_group_rule_filter(in_port, mac, f_ip + "/32", t_ip + "/32", rule) filter_bodys.extend(rule_f) rule_f = _build_full_default_drop_filter(in_port, mac, f_ip + "/32", t_ip + "/32") filter_bodys.extend(rule_f) _extend(fi.project_id, network_uuid, filter_bodys) LOG.debug('begin update filters') for (tenant_id, nf) in tenants_networks_filters.iteritems(): for (network_id, filter_bodys) in nf.iteritems(): old_fids = _list_filters(conn, tenant_id, network_id) LOG.debug("delete filters tenant_id=%s network_id=%s ids=\n%s", tenant_id, network_id, _pp(old_fids)) _delete_filters(conn, tenant_id, network_id, old_fids) LOG.debug("create filters tenant_id=%s network_id=%s bodys=\n%s", tenant_id, network_id, _pp(filter_bodys)) _create_filters(conn, tenant_id, network_id, filter_bodys) LOG.debug('end update filters')
def instance_rules(self, instance, network_info): ctxt = context.get_admin_context() ipv4_rules = [] ipv6_rules = [] # Always drop invalid packets ipv4_rules += ['-m state --state ' 'INVALID -j DROP'] ipv6_rules += ['-m state --state ' 'INVALID -j DROP'] # Allow established connections ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] # Pass through provider-wide drops ipv4_rules += ['-j $provider'] ipv6_rules += ['-j $provider'] dhcp_servers = [info['dhcp_server'] for (_n, info) in network_info] for dhcp_server in dhcp_servers: ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 ' '-j ACCEPT' % (dhcp_server, )) #Allow project network traffic if FLAGS.allow_same_net_traffic: cidrs = [network['cidr'] for (network, _m) in network_info] for cidr in cidrs: ipv4_rules.append('-s %s -j ACCEPT' % (cidr, )) # We wrap these in FLAGS.use_ipv6 because they might cause # a DB lookup. The other ones are just list operations, so # they're not worth the clutter. if FLAGS.use_ipv6: # Allow RA responses gateways_v6 = [ mapping['gateway6'] for (_n, mapping) in network_info ] for gateway_v6 in gateways_v6: ipv6_rules.append('-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6, )) #Allow project network traffic if FLAGS.allow_same_net_traffic: cidrv6s = [ network['cidr_v6'] for (network, _m) in network_info ] for cidrv6 in cidrv6s: ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6, )) security_groups = db.security_group_get_by_instance( ctxt, instance['id']) # then, security group chains and rules for security_group in security_groups: rules = db.security_group_rule_get_by_security_group( ctxt, security_group['id']) for rule in rules: LOG.debug(_('Adding security group rule: %r'), rule) if not rule.cidr: version = 4 else: version = netutils.get_ip_version(rule.cidr) if version == 4: fw_rules = ipv4_rules else: fw_rules = ipv6_rules protocol = rule.protocol if version == 6 and rule.protocol == 'icmp': protocol = 'icmpv6' args = ['-j ACCEPT'] if protocol: args += ['-p', protocol] if protocol in ['udp', 'tcp']: if rule.from_port == rule.to_port: args += ['--dport', '%s' % (rule.from_port, )] else: args += [ '-m', 'multiport', '--dports', '%s:%s' % (rule.from_port, rule.to_port) ] elif protocol == 'icmp': icmp_type = rule.from_port icmp_code = rule.to_port if icmp_type == -1: icmp_type_arg = None else: icmp_type_arg = '%s' % icmp_type if not icmp_code == -1: icmp_type_arg += '/%s' % icmp_code if icmp_type_arg: if version == 4: args += [ '-m', 'icmp', '--icmp-type', icmp_type_arg ] elif version == 6: args += [ '-m', 'icmp6', '--icmpv6-type', icmp_type_arg ] if rule.cidr: LOG.info('Using cidr %r', rule.cidr) args += ['-s', rule.cidr] fw_rules += [' '.join(args)] else: if rule['grantee_group']: for instance in rule['grantee_group']['instances']: LOG.info('instance: %r', instance) ips = db.instance_get_fixed_addresses( ctxt, instance['id']) LOG.info('ips: %r', ips) for ip in ips: subrule = args + ['-s %s' % ip] fw_rules += [' '.join(subrule)] LOG.info('Using fw_rules: %r', fw_rules) ipv4_rules += ['-j $sg-fallback'] ipv6_rules += ['-j $sg-fallback'] return ipv4_rules, ipv6_rules
def _fullbuild(conn): LOG.debug('_fullbuild begin') tenants_networks_filters = {} def _extend(tenant_id, network_id, filter_bodys): if tenant_id not in tenants_networks_filters: tenants_networks_filters[tenant_id] = {} if network_id not in tenants_networks_filters[tenant_id]: tenants_networks_filters[tenant_id][network_id] = [] tenants_networks_filters[tenant_id][network_id].extend(filter_bodys) ctxt = context.get_admin_context() hosts = bmdb.bm_node_get_all(ctxt) for t in hosts: if not t.instance_id: continue LOG.debug('to id=%s instance_id=%s', t.id, t.instance_id) ti = db.instance_get(ctxt, t.instance_id) # DHCP from the instance for (in_port, network_uuid, mac, _) \ in _from_bm_node(ti.id, ti.project_id): filter_bodys = [] filter_bodys.extend(_build_allow_dhcp_client(in_port, mac)) filter_bodys.extend(_build_deny_dhcp_server(in_port)) LOG.debug("filter_bodys: %s", filter_bodys) _extend(ti.project_id, network_uuid, filter_bodys) # from external host to the instance LOG.debug('from=* to.id=%s', t.id) for (_, network_uuid, _, t_ips) in _from_bm_node(ti.id, ti.project_id): filter_bodys = [] for t_ip in t_ips: for sg in db.security_group_get_by_instance(ctxt, ti.id): rules = db.security_group_rule_get_by_security_group( ctxt, sg.id) for rule in rules: rule_f = _build_sg_rule_filter( t_ip + "/32", rule, EXTERNAL_SECURITY_GROUP_PRIORITY) filter_bodys.extend(rule_f) rule_f = _build_default_drop_filter(t_ip + "/32") filter_bodys.extend(rule_f) LOG.debug("filter_bodys: %s", filter_bodys) _extend(ti.project_id, network_uuid, filter_bodys) # Just to make lines short... _sg_rules = db.security_group_rule_get_by_security_group _build = _build_full_sg_rule_filter # from other instances to the instance for f in hosts: LOG.debug('from.id=%s to.id=%s', f.id, t.id) if f.id == t.id: continue if not f.instance_id: continue fi = db.instance_get(ctxt, f.instance_id) LOG.debug('from id=%s instance_id=%s', f.id, f.instance_id) for (in_port, network_uuid, mac, f_ips) in _from_bm_node(fi.id, fi.project_id): filter_bodys = [] for (_, _, _, t_ips) in _from_bm_node(ti.id, ti.project_id): for f_ip in f_ips: for t_ip in t_ips: for sg in db.security_group_get_by_instance( ctxt, ti.id): rules = _sg_rules(ctxt, sg.id) for rule in rules: if rule.cidr and not _in_cidr( f_ip, rule.cidr): continue rule_f = _build(in_port, mac, f_ip + "/32", t_ip + "/32", rule) filter_bodys.extend(rule_f) rule_f = _build_full_default_drop_filter( in_port, mac, f_ip + "/32", t_ip + "/32") filter_bodys.extend(rule_f) LOG.debug("filter_bodys: %s", filter_bodys) _extend(fi.project_id, network_uuid, filter_bodys) LOG.debug('begin update filters') for (tenant_id, nf) in tenants_networks_filters.iteritems(): for (network_id, filter_bodys) in nf.iteritems(): old_fids = _list_filters(conn, tenant_id, network_id) LOG.debug("delete filters tenant_id=%s network_id=%s ids=\n%s", tenant_id, network_id, _pp(old_fids)) _delete_filters(conn, tenant_id, network_id, old_fids) LOG.debug("create filters tenant_id=%s network_id=%s bodys=\n%s", tenant_id, network_id, _pp(filter_bodys)) _create_filters(conn, tenant_id, network_id, filter_bodys) LOG.debug('end update filters') LOG.debug('_fullbuild end')