Esempio n. 1
0
 def test_too_many_networks(self):
     """Ensure error is raised if we run out of networks"""
     projects = []
     networks_left = (FLAGS.num_networks -
                      db.network_count(context.get_admin_context()))
     for i in range(networks_left):
         project = self.manager.create_project('many%s' % i, self.user)
         projects.append(project)
         db.project_get_network(context.get_admin_context(), project.id)
     project = self.manager.create_project('last', self.user)
     projects.append(project)
     self.assertRaises(db.NoMoreNetworks, db.project_get_network,
                       context.get_admin_context(), project.id)
     for project in projects:
         self.manager.delete_project(project)
Esempio n. 2
0
    def test_too_many_addresses(self):
        """Test for a NoMoreAddresses exception when all fixed ips are used.
        """
        admin_context = context.get_admin_context()
        network = db.project_get_network(admin_context, self.projects[0].id)
        num_available_ips = db.network_count_available_ips(admin_context,
                                                           network['id'])
        addresses = []
        instance_ids = []
        for i in range(num_available_ips):
            instance_ref = self._create_instance(0)
            instance_ids.append(instance_ref['id'])
            address = self._create_address(0, instance_ref['id'])
            addresses.append(address)
            lease_ip(address)

        ip_count = db.network_count_available_ips(context.get_admin_context(),
                                                  network['id'])
        self.assertEqual(ip_count, 0)
        self.assertRaises(db.NoMoreAddresses,
                          self.network.allocate_fixed_ip,
                          self.context,
                          'foo')

        for i in range(num_available_ips):
            self.network.deallocate_fixed_ip(self.context, addresses[i])
            release_ip(addresses[i])
            db.instance_destroy(context.get_admin_context(), instance_ids[i])
        ip_count = db.network_count_available_ips(context.get_admin_context(),
                                                  network['id'])
        self.assertEqual(ip_count, num_available_ips)
Esempio n. 3
0
    def test_spawn_with_network_info(self):
        # Skip if non-libvirt environment
        if not self.lazy_load_library_exists():
            return

        # Preparing mocks
        def fake_none(self, instance):
            return

        self.create_fake_libvirt_mock()
        instance = db.instance_create(self.context, self.test_instance)

        # Start test
        self.mox.ReplayAll()
        conn = libvirt_conn.LibvirtConnection(False)
        conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
        conn.firewall_driver.setattr('prepare_instance_filter', fake_none)

        network = db.project_get_network(context.get_admin_context(),
                                         self.project.id)
        ip_dict = {'ip': self.test_ip,
                   'netmask': network['netmask'],
                   'enabled': '1'}
        mapping = {'label': network['label'],
                   'gateway': network['gateway'],
                   'mac': instance['mac_address'],
                   'dns': [network['dns']],
                   'ips': [ip_dict]}
        network_info = [(network, mapping)]

        try:
            conn.spawn(instance, network_info)
        except Exception, e:
            count = (0 <= str(e.message).find('Unexpected method call'))
Esempio n. 4
0
    def test_too_many_addresses(self):
        """Test for a NoMoreAddresses exception when all fixed ips are used.
        """
        admin_context = context.get_admin_context()
        network = db.project_get_network(admin_context, self.projects[0].id)
        num_available_ips = db.network_count_available_ips(
            admin_context, network['id'])
        addresses = []
        instance_ids = []
        for i in range(num_available_ips):
            instance_ref = self._create_instance(0)
            instance_ids.append(instance_ref['id'])
            address = self._create_address(0, instance_ref['id'])
            addresses.append(address)
            lease_ip(address)

        ip_count = db.network_count_available_ips(context.get_admin_context(),
                                                  network['id'])
        self.assertEqual(ip_count, 0)
        self.assertRaises(db.NoMoreAddresses, self.network.allocate_fixed_ip,
                          self.context, 'foo')

        for i in range(num_available_ips):
            self.network.deallocate_fixed_ip(self.context, addresses[i])
            release_ip(addresses[i])
            db.instance_destroy(context.get_admin_context(), instance_ids[i])
        ip_count = db.network_count_available_ips(context.get_admin_context(),
                                                  network['id'])
        self.assertEqual(ip_count, num_available_ips)
Esempio n. 5
0
def is_allocated_in_project(address, project_id):
    """Returns true if address is in specified project"""
    project_net = db.project_get_network(context.get_admin_context(),
                                         project_id)
    network = db.fixed_ip_get_network(context.get_admin_context(), address)
    instance = db.fixed_ip_get_instance(context.get_admin_context(), address)
    # instance exists until release
    return instance is not None and network['id'] == project_net['id']
Esempio n. 6
0
 def test_too_many_networks(self):
     """Ensure error is raised if we run out of networks"""
     projects = []
     networks_left = (FLAGS.num_networks -
                      db.network_count(context.get_admin_context()))
     for i in range(networks_left):
         project = self.manager.create_project('many%s' % i, self.user)
         projects.append(project)
         db.project_get_network(context.get_admin_context(), project.id)
     project = self.manager.create_project('last', self.user)
     projects.append(project)
     self.assertRaises(db.NoMoreNetworks,
                       db.project_get_network,
                       context.get_admin_context(),
                       project.id)
     for project in projects:
         self.manager.delete_project(project)
Esempio n. 7
0
 def tearDown(self):
     network_ref = db.project_get_network(self.context, self.project.id)
     db.network_disassociate(self.context, network_ref['id'])
     self.manager.delete_project(self.project)
     self.manager.delete_user(self.user)
     self.compute.kill()
     self.network.kill()
     super(CloudTestCase, self).tearDown()
Esempio n. 8
0
 def tearDown(self):
     network_ref = db.project_get_network(self.context,
                                          self.project.id)
     db.network_disassociate(self.context, network_ref['id'])
     self.manager.delete_project(self.project)
     self.manager.delete_user(self.user)
     self.compute.kill()
     self.network.kill()
     super(CloudTestCase, self).tearDown()
Esempio n. 9
0
 def _is_allocated_in_project(self, address, project_id):
     """Returns true if address is in specified project"""
     project_net = db.project_get_network(context.get_admin_context(),
                                          project_id)
     network = db.fixed_ip_get_network(context.get_admin_context(), address)
     instance = db.fixed_ip_get_instance(context.get_admin_context(),
                                         address)
     # instance exists until release
     return instance is not None and network['id'] == project_net['id']
Esempio n. 10
0
    def get_project_vpn_data(project):
        """Gets vpn ip and port for project

        @type project: Project or project_id
        @param project: Project from which to get associated vpn data

        @rvalue: tuple of (str, str)
        @return: A tuple containing (ip, port) or None, None if vpn has
        not been allocated for user.
        """

        network_ref = db.project_get_network(context.get_admin_context(),
                                             Project.safe_id(project), False)

        if not network_ref:
            return (None, None)
        return (network_ref['vpn_public_address'],
                network_ref['vpn_public_port'])
Esempio n. 11
0
    def get_project_vpn_data(project):
        """Gets vpn ip and port for project

        @type project: Project or project_id
        @param project: Project from which to get associated vpn data

        @rvalue: tuple of (str, str)
        @return: A tuple containing (ip, port) or None, None if vpn has
        not been allocated for user.
        """

        network_ref = db.project_get_network(context.get_admin_context(),
                                             Project.safe_id(project), False)

        if not network_ref:
            return (None, None)
        return (network_ref['vpn_public_address'],
                network_ref['vpn_public_port'])
Esempio n. 12
0
    def test_available_ips(self):
        """Make sure the number of available ips for the network is correct

        The number of available IP addresses depends on the test
        environment's setup.

        Network size is set in test fixture's setUp method.

        There are ips reserved at the bottom and top of the range.
        services (network, gateway, CloudPipe, broadcast)
        """
        network = db.project_get_network(context.get_admin_context(),
                                         self.projects[0].id)
        net_size = flags.FLAGS.network_size
        admin_context = context.get_admin_context()
        total_ips = (
            db.network_count_available_ips(admin_context, network['id']) +
            db.network_count_reserved_ips(admin_context, network['id']) +
            db.network_count_allocated_ips(admin_context, network['id']))
        self.assertEqual(total_ips, net_size)
Esempio n. 13
0
 def test_private_ipv6(self):
     """Make sure ipv6 is OK"""
     if FLAGS.use_ipv6:
         instance_ref = self._create_instance(0)
         address = self._create_address(0, instance_ref['id'])
         network_ref = db.project_get_network(
                                              context.get_admin_context(),
                                              self.context.project_id)
         address_v6 = db.instance_get_fixed_address_v6(
                                              context.get_admin_context(),
                                              instance_ref['id'])
         self.assertEqual(instance_ref['mac_address'],
                          utils.to_mac(address_v6))
         instance_ref2 = db.fixed_ip_get_instance_v6(
                                              context.get_admin_context(),
                                              address_v6)
         self.assertEqual(instance_ref['id'], instance_ref2['id'])
         self.assertEqual(address_v6,
                          utils.to_global_ipv6(
                                              network_ref['cidr_v6'],
                                              instance_ref['mac_address']))
Esempio n. 14
0
 def test_private_ipv6(self):
     """Make sure ipv6 is OK"""
     if FLAGS.use_ipv6:
         instance_ref = self._create_instance(0)
         address = self._create_address(0, instance_ref['id'])
         network_ref = db.project_get_network(context.get_admin_context(),
                                              self.context.project_id)
         address_v6 = db.instance_get_fixed_address_v6(
             context.get_admin_context(), instance_ref['id'])
         self.assertEqual(instance_ref['mac_address'],
                          utils.to_mac(address_v6))
         instance_ref2 = db.fixed_ip_get_instance_v6(
             context.get_admin_context(), address_v6)
         self.assertEqual(instance_ref['id'], instance_ref2['id'])
         self.assertEqual(
             address_v6,
             utils.to_global_ipv6(network_ref['cidr_v6'],
                                  instance_ref['mac_address']))
         self._deallocate_address(0, address)
         db.instance_destroy(context.get_admin_context(),
                             instance_ref['id'])
Esempio n. 15
0
    def test_available_ips(self):
        """Make sure the number of available ips for the network is correct

        The number of available IP addresses depends on the test
        environment's setup.

        Network size is set in test fixture's setUp method.

        There are ips reserved at the bottom and top of the range.
        services (network, gateway, CloudPipe, broadcast)
        """
        network = db.project_get_network(context.get_admin_context(),
                                         self.projects[0].id)
        net_size = flags.FLAGS.network_size
        admin_context = context.get_admin_context()
        total_ips = (db.network_count_available_ips(admin_context,
                                                    network['id']) +
                     db.network_count_reserved_ips(admin_context,
                                                   network['id']) +
                     db.network_count_allocated_ips(admin_context,
                                                    network['id']))
        self.assertEqual(total_ips, net_size)
Esempio n. 16
0
    def _check_xml_and_container(self, instance):
        user_context = context.RequestContext(project=self.project,
                                              user=self.user)
        instance_ref = db.instance_create(user_context, instance)
        host = self.network.get_network_host(user_context.elevated())
        network_ref = db.project_get_network(context.get_admin_context(),
                                             self.project.id)

        fixed_ip = {'address': self.test_ip,
                    'network_id': network_ref['id']}

        ctxt = context.get_admin_context()
        fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip)
        db.fixed_ip_update(ctxt, self.test_ip,
                                 {'allocated': True,
                                  'instance_id': instance_ref['id']})

        self.flags(libvirt_type='lxc')
        conn = libvirt_conn.LibvirtConnection(True)

        uri = conn.get_uri()
        self.assertEquals(uri, 'lxc:///')

        xml = conn.to_xml(instance_ref)
        tree = xml_to_tree(xml)

        check = [
        (lambda t: t.find('.').get('type'), 'lxc'),
        (lambda t: t.find('./os/type').text, 'exe'),
        (lambda t: t.find('./devices/filesystem/target').get('dir'), '/')]

        for i, (check, expected_result) in enumerate(check):
            self.assertEqual(check(tree),
                             expected_result,
                             '%s failed common check %d' % (xml, i))

        target = tree.find('./devices/filesystem/source').get('dir')
        self.assertTrue(len(target) > 0)
Esempio n. 17
0
    def test_spawn_with_network_info(self):
        # Skip if non-libvirt environment
        if not self.lazy_load_library_exists():
            return

        # Preparing mocks
        def fake_none(self, instance):
            return

        self.create_fake_libvirt_mock()
        instance = db.instance_create(self.context, self.test_instance)

        # Start test
        self.mox.ReplayAll()
        conn = libvirt_conn.LibvirtConnection(False)
        conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
        conn.firewall_driver.setattr('prepare_instance_filter', fake_none)

        network = db.project_get_network(context.get_admin_context(),
                                         self.project.id)
        ip_dict = {
            'ip': self.test_ip,
            'netmask': network['netmask'],
            'enabled': '1'
        }
        mapping = {
            'label': network['label'],
            'gateway': network['gateway'],
            'mac': instance['mac_address'],
            'dns': [network['dns']],
            'ips': [ip_dict]
        }
        network_info = [(network, mapping)]

        try:
            conn.spawn(instance, network_info)
        except Exception, e:
            count = (0 <= e.message.find('Unexpected method call'))
Esempio n. 18
0
    def _check_xml_and_container(self, instance):
        user_context = context.RequestContext(project=self.project,
                                              user=self.user)
        instance_ref = db.instance_create(user_context, instance)
        host = self.network.get_network_host(user_context.elevated())
        network_ref = db.project_get_network(context.get_admin_context(),
                                             self.project.id)

        fixed_ip = {'address': self.test_ip, 'network_id': network_ref['id']}

        ctxt = context.get_admin_context()
        fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip)
        db.fixed_ip_update(ctxt, self.test_ip, {
            'allocated': True,
            'instance_id': instance_ref['id']
        })

        self.flags(libvirt_type='lxc')
        conn = libvirt_conn.LibvirtConnection(True)

        uri = conn.get_uri()
        self.assertEquals(uri, 'lxc:///')

        xml = conn.to_xml(instance_ref)
        tree = xml_to_tree(xml)

        check = [(lambda t: t.find('.').get('type'), 'lxc'),
                 (lambda t: t.find('./os/type').text, 'exe'),
                 (lambda t: t.find('./devices/filesystem/target').get('dir'),
                  '/')]

        for i, (check, expected_result) in enumerate(check):
            self.assertEqual(check(tree), expected_result,
                             '%s failed common check %d' % (xml, i))

        target = tree.find('./devices/filesystem/source').get('dir')
        self.assertTrue(len(target) > 0)
Esempio n. 19
0
    def _check_xml_and_uri(self, instance, expect_ramdisk, expect_kernel,
                           rescue=False):
        user_context = context.RequestContext(project=self.project,
                                              user=self.user)
        instance_ref = db.instance_create(user_context, instance)
        host = self.network.get_network_host(user_context.elevated())
        network_ref = db.project_get_network(context.get_admin_context(),
                                             self.project.id)

        fixed_ip = {'address':    self.test_ip,
                    'network_id': network_ref['id']}

        ctxt = context.get_admin_context()
        fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip)
        db.fixed_ip_update(ctxt, self.test_ip,
                                 {'allocated':   True,
                                  'instance_id': instance_ref['id']})

        type_uri_map = {'qemu': ('qemu:///system',
                             [(lambda t: t.find('.').get('type'), 'qemu'),
                              (lambda t: t.find('./os/type').text, 'hvm'),
                              (lambda t: t.find('./devices/emulator'), None)]),
                        'kvm': ('qemu:///system',
                             [(lambda t: t.find('.').get('type'), 'kvm'),
                              (lambda t: t.find('./os/type').text, 'hvm'),
                              (lambda t: t.find('./devices/emulator'), None)]),
                        'uml': ('uml:///system',
                             [(lambda t: t.find('.').get('type'), 'uml'),
                              (lambda t: t.find('./os/type').text, 'uml')]),
                        'xen': ('xen:///',
                             [(lambda t: t.find('.').get('type'), 'xen'),
                              (lambda t: t.find('./os/type').text, 'linux')]),
                              }

        for hypervisor_type in ['qemu', 'kvm', 'xen']:
            check_list = type_uri_map[hypervisor_type][1]

            if rescue:
                check = (lambda t: t.find('./os/kernel').text.split('/')[1],
                         'kernel.rescue')
                check_list.append(check)
                check = (lambda t: t.find('./os/initrd').text.split('/')[1],
                         'ramdisk.rescue')
                check_list.append(check)
            else:
                if expect_kernel:
                    check = (lambda t: t.find('./os/kernel').text.split(
                        '/')[1], 'kernel')
                else:
                    check = (lambda t: t.find('./os/kernel'), None)
                check_list.append(check)

                if expect_ramdisk:
                    check = (lambda t: t.find('./os/initrd').text.split(
                        '/')[1], 'ramdisk')
                else:
                    check = (lambda t: t.find('./os/initrd'), None)
                check_list.append(check)

        parameter = './devices/interface/filterref/parameter'
        common_checks = [
            (lambda t: t.find('.').tag, 'domain'),
            (lambda t: t.find(parameter).get('name'), 'IP'),
            (lambda t: t.find(parameter).get('value'), '10.11.12.13'),
            (lambda t: t.findall(parameter)[1].get('name'), 'DHCPSERVER'),
            (lambda t: t.findall(parameter)[1].get('value'), '10.0.0.1'),
            (lambda t: t.find('./devices/serial/source').get(
                'path').split('/')[1], 'console.log'),
            (lambda t: t.find('./memory').text, '2097152')]
        if rescue:
            common_checks += [
                (lambda t: t.findall('./devices/disk/source')[0].get(
                    'file').split('/')[1], 'disk.rescue'),
                (lambda t: t.findall('./devices/disk/source')[1].get(
                    'file').split('/')[1], 'disk')]
        else:
            common_checks += [(lambda t: t.findall(
                './devices/disk/source')[0].get('file').split('/')[1],
                               'disk')]
            common_checks += [(lambda t: t.findall(
                './devices/disk/source')[1].get('file').split('/')[1],
                               'disk.local')]

        for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
            FLAGS.libvirt_type = libvirt_type
            conn = libvirt_conn.LibvirtConnection(True)

            uri = conn.get_uri()
            self.assertEquals(uri, expected_uri)

            xml = conn.to_xml(instance_ref, rescue)
            tree = xml_to_tree(xml)
            for i, (check, expected_result) in enumerate(checks):
                self.assertEqual(check(tree),
                                 expected_result,
                                 '%s failed check %d' % (xml, i))

            for i, (check, expected_result) in enumerate(common_checks):
                self.assertEqual(check(tree),
                                 expected_result,
                                 '%s failed common check %d' % (xml, i))

        # This test is supposed to make sure we don't
        # override a specifically set uri
        #
        # Deliberately not just assigning this string to FLAGS.libvirt_uri and
        # checking against that later on. This way we make sure the
        # implementation doesn't fiddle around with the FLAGS.
        testuri = 'something completely different'
        FLAGS.libvirt_uri = testuri
        for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
            FLAGS.libvirt_type = libvirt_type
            conn = libvirt_conn.LibvirtConnection(True)
            uri = conn.get_uri()
            self.assertEquals(uri, testuri)
        db.instance_destroy(user_context, instance_ref['id'])
Esempio n. 20
0
    def test_static_filters(self):
        instance_ref = db.instance_create(
            self.context, {"user_id": "fake", "project_id": "fake", "mac_address": "56:12:12:12:12:12"}
        )
        ip = "10.11.12.13"

        network_ref = db.project_get_network(self.context, "fake")

        fixed_ip = {"address": ip, "network_id": network_ref["id"]}

        admin_ctxt = context.get_admin_context()
        db.fixed_ip_create(admin_ctxt, fixed_ip)
        db.fixed_ip_update(admin_ctxt, ip, {"allocated": True, "instance_id": instance_ref["id"]})

        secgroup = db.security_group_create(
            admin_ctxt, {"user_id": "fake", "project_id": "fake", "name": "testgroup", "description": "test group"}
        )

        db.security_group_rule_create(
            admin_ctxt,
            {
                "parent_group_id": secgroup["id"],
                "protocol": "icmp",
                "from_port": -1,
                "to_port": -1,
                "cidr": "192.168.11.0/24",
            },
        )

        db.security_group_rule_create(
            admin_ctxt,
            {
                "parent_group_id": secgroup["id"],
                "protocol": "icmp",
                "from_port": 8,
                "to_port": -1,
                "cidr": "192.168.11.0/24",
            },
        )

        db.security_group_rule_create(
            admin_ctxt,
            {
                "parent_group_id": secgroup["id"],
                "protocol": "tcp",
                "from_port": 80,
                "to_port": 81,
                "cidr": "192.168.10.0/24",
            },
        )

        db.instance_add_security_group(admin_ctxt, instance_ref["id"], secgroup["id"])
        instance_ref = db.instance_get(admin_ctxt, instance_ref["id"])

        #        self.fw.add_instance(instance_ref)
        def fake_iptables_execute(cmd, process_input=None):
            if cmd == "sudo ip6tables-save -t filter":
                return "\n".join(self.in6_rules), None
            if cmd == "sudo iptables-save -t filter":
                return "\n".join(self.in_rules), None
            if cmd == "sudo iptables-restore":
                self.out_rules = process_input.split("\n")
                return "", ""
            if cmd == "sudo ip6tables-restore":
                self.out6_rules = process_input.split("\n")
                return "", ""

        self.fw.execute = fake_iptables_execute

        self.fw.prepare_instance_filter(instance_ref)
        self.fw.apply_instance_filter(instance_ref)

        in_rules = filter(lambda l: not l.startswith("#"), self.in_rules)
        for rule in in_rules:
            if not "nova" in rule:
                self.assertTrue(rule in self.out_rules, "Rule went missing: %s" % rule)

        instance_chain = None
        for rule in self.out_rules:
            # This is pretty crude, but it'll do for now
            if "-d 10.11.12.13 -j" in rule:
                instance_chain = rule.split(" ")[-1]
                break
        self.assertTrue(instance_chain, "The instance chain wasn't added")

        security_group_chain = None
        for rule in self.out_rules:
            # This is pretty crude, but it'll do for now
            if "-A %s -j" % instance_chain in rule:
                security_group_chain = rule.split(" ")[-1]
                break
        self.assertTrue(security_group_chain, "The security group chain wasn't added")

        self.assertTrue(
            "-A %s -p icmp -s 192.168.11.0/24 -j ACCEPT" % security_group_chain in self.out_rules,
            "ICMP acceptance rule wasn't added",
        )

        self.assertTrue(
            "-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type "
            "8 -j ACCEPT" % security_group_chain in self.out_rules,
            "ICMP Echo Request acceptance rule wasn't added",
        )

        self.assertTrue(
            "-A %s -p tcp -s 192.168.10.0/24 -m multiport "
            "--dports 80:81 -j ACCEPT" % security_group_chain in self.out_rules,
            "TCP port 80/81 acceptance rule wasn't added",
        )
        db.instance_destroy(admin_ctxt, instance_ref["id"])
Esempio n. 21
0
    def test_creates_base_rule_first(self):
        # These come pre-defined by libvirt
        self.defined_filters = [
            'no-mac-spoofing', 'no-ip-spoofing', 'no-arp-spoofing',
            'allow-dhcp-server'
        ]

        self.recursive_depends = {}
        for f in self.defined_filters:
            self.recursive_depends[f] = []

        def _filterDefineXMLMock(xml):
            dom = xml_to_dom(xml)
            name = dom.firstChild.getAttribute('name')
            self.recursive_depends[name] = []
            for f in dom.getElementsByTagName('filterref'):
                ref = f.getAttribute('filter')
                self.assertTrue(
                    ref in self.defined_filters,
                    ('%s referenced filter that does ' + 'not yet exist: %s') %
                    (name, ref))
                dependencies = [ref] + self.recursive_depends[ref]
                self.recursive_depends[name] += dependencies

            self.defined_filters.append(name)
            return True

        self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock

        instance_ref = db.instance_create(self.context, {
            'user_id': 'fake',
            'project_id': 'fake'
        })
        inst_id = instance_ref['id']

        ip = '10.11.12.13'

        network_ref = db.project_get_network(self.context, 'fake')

        fixed_ip = {'address': ip, 'network_id': network_ref['id']}

        admin_ctxt = context.get_admin_context()
        db.fixed_ip_create(admin_ctxt, fixed_ip)
        db.fixed_ip_update(admin_ctxt, ip, {
            'allocated': True,
            'instance_id': instance_ref['id']
        })

        def _ensure_all_called():
            instance_filter = 'nova-instance-%s' % instance_ref['name']
            secgroup_filter = 'nova-secgroup-%s' % self.security_group['id']
            for required in [
                    secgroup_filter, 'allow-dhcp-server', 'no-arp-spoofing',
                    'no-ip-spoofing', 'no-mac-spoofing'
            ]:
                self.assertTrue(
                    required in self.recursive_depends[instance_filter],
                    "Instance's filter does not include %s" % required)

        self.security_group = self.setup_and_return_security_group()

        db.instance_add_security_group(self.context, inst_id,
                                       self.security_group.id)
        instance = db.instance_get(self.context, inst_id)

        self.fw.setup_basic_filtering(instance)
        self.fw.prepare_instance_filter(instance)
        self.fw.apply_instance_filter(instance)
        _ensure_all_called()
        self.teardown_security_group()
        db.instance_destroy(admin_ctxt, instance_ref['id'])
Esempio n. 22
0
    def test_static_filters(self):
        instance_ref = db.instance_create(
            self.context, {
                'user_id': 'fake',
                'project_id': 'fake',
                'mac_address': '56:12:12:12:12:12'
            })
        ip = '10.11.12.13'

        network_ref = db.project_get_network(self.context, 'fake')

        fixed_ip = {'address': ip, 'network_id': network_ref['id']}

        admin_ctxt = context.get_admin_context()
        db.fixed_ip_create(admin_ctxt, fixed_ip)
        db.fixed_ip_update(admin_ctxt, ip, {
            'allocated': True,
            'instance_id': instance_ref['id']
        })

        secgroup = db.security_group_create(
            admin_ctxt, {
                'user_id': 'fake',
                'project_id': 'fake',
                'name': 'testgroup',
                'description': 'test group'
            })

        db.security_group_rule_create(
            admin_ctxt, {
                'parent_group_id': secgroup['id'],
                'protocol': 'icmp',
                'from_port': -1,
                'to_port': -1,
                'cidr': '192.168.11.0/24'
            })

        db.security_group_rule_create(
            admin_ctxt, {
                'parent_group_id': secgroup['id'],
                'protocol': 'icmp',
                'from_port': 8,
                'to_port': -1,
                'cidr': '192.168.11.0/24'
            })

        db.security_group_rule_create(
            admin_ctxt, {
                'parent_group_id': secgroup['id'],
                'protocol': 'tcp',
                'from_port': 80,
                'to_port': 81,
                'cidr': '192.168.10.0/24'
            })

        db.instance_add_security_group(admin_ctxt, instance_ref['id'],
                                       secgroup['id'])
        instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])

        #        self.fw.add_instance(instance_ref)
        def fake_iptables_execute(*cmd, **kwargs):
            process_input = kwargs.get('process_input', None)
            if cmd == ('sudo', 'ip6tables-save', '-t', 'filter'):
                return '\n'.join(self.in6_filter_rules), None
            if cmd == ('sudo', 'iptables-save', '-t', 'filter'):
                return '\n'.join(self.in_filter_rules), None
            if cmd == ('sudo', 'iptables-save', '-t', 'nat'):
                return '\n'.join(self.in_nat_rules), None
            if cmd == ('sudo', 'iptables-restore'):
                lines = process_input.split('\n')
                if '*filter' in lines:
                    self.out_rules = lines
                return '', ''
            if cmd == ('sudo', 'ip6tables-restore'):
                lines = process_input.split('\n')
                if '*filter' in lines:
                    self.out6_rules = lines
                return '', ''
            print cmd, kwargs

        from nova.network import linux_net
        linux_net.iptables_manager.execute = fake_iptables_execute

        self.fw.prepare_instance_filter(instance_ref)
        self.fw.apply_instance_filter(instance_ref)

        in_rules = filter(lambda l: not l.startswith('#'),
                          self.in_filter_rules)
        for rule in in_rules:
            if not 'nova' in rule:
                self.assertTrue(rule in self.out_rules,
                                'Rule went missing: %s' % rule)

        instance_chain = None
        for rule in self.out_rules:
            # This is pretty crude, but it'll do for now
            if '-d 10.11.12.13 -j' in rule:
                instance_chain = rule.split(' ')[-1]
                break
        self.assertTrue(instance_chain, "The instance chain wasn't added")

        security_group_chain = None
        for rule in self.out_rules:
            # This is pretty crude, but it'll do for now
            if '-A %s -j' % instance_chain in rule:
                security_group_chain = rule.split(' ')[-1]
                break
        self.assertTrue(security_group_chain,
                        "The security group chain wasn't added")

        regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -j ACCEPT')
        self.assertTrue(
            len(filter(regex.match, self.out_rules)) > 0,
            "ICMP acceptance rule wasn't added")

        regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -m icmp '
                           '--icmp-type 8 -j ACCEPT')
        self.assertTrue(
            len(filter(regex.match, self.out_rules)) > 0,
            "ICMP Echo Request acceptance rule wasn't added")

        regex = re.compile('-A .* -p tcp -s 192.168.10.0/24 -m multiport '
                           '--dports 80:81 -j ACCEPT')
        self.assertTrue(
            len(filter(regex.match, self.out_rules)) > 0,
            "TCP port 80/81 acceptance rule wasn't added")
        db.instance_destroy(admin_ctxt, instance_ref['id'])
Esempio n. 23
0
    def _check_xml_and_uri(self,
                           instance,
                           expect_ramdisk,
                           expect_kernel,
                           rescue=False):
        user_context = context.RequestContext(project=self.project,
                                              user=self.user)
        instance_ref = db.instance_create(user_context, instance)
        host = self.network.get_network_host(user_context.elevated())
        network_ref = db.project_get_network(context.get_admin_context(),
                                             self.project.id)

        fixed_ip = {'address': self.test_ip, 'network_id': network_ref['id']}

        ctxt = context.get_admin_context()
        fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip)
        db.fixed_ip_update(ctxt, self.test_ip, {
            'allocated': True,
            'instance_id': instance_ref['id']
        })

        type_uri_map = {
            'qemu': ('qemu:///system',
                     [(lambda t: t.find('.').get('type'), 'qemu'),
                      (lambda t: t.find('./os/type').text, 'hvm'),
                      (lambda t: t.find('./devices/emulator'), None)]),
            'kvm': ('qemu:///system',
                    [(lambda t: t.find('.').get('type'), 'kvm'),
                     (lambda t: t.find('./os/type').text, 'hvm'),
                     (lambda t: t.find('./devices/emulator'), None)]),
            'uml':
            ('uml:///system', [(lambda t: t.find('.').get('type'), 'uml'),
                               (lambda t: t.find('./os/type').text, 'uml')]),
            'xen':
            ('xen:///', [(lambda t: t.find('.').get('type'), 'xen'),
                         (lambda t: t.find('./os/type').text, 'linux')]),
        }

        for hypervisor_type in ['qemu', 'kvm', 'xen']:
            check_list = type_uri_map[hypervisor_type][1]

            if rescue:
                check = (lambda t: t.find('./os/kernel').text.split('/')[1],
                         'kernel.rescue')
                check_list.append(check)
                check = (lambda t: t.find('./os/initrd').text.split('/')[1],
                         'ramdisk.rescue')
                check_list.append(check)
            else:
                if expect_kernel:
                    check = (
                        lambda t: t.find('./os/kernel').text.split('/')[1],
                        'kernel')
                else:
                    check = (lambda t: t.find('./os/kernel'), None)
                check_list.append(check)

                if expect_ramdisk:
                    check = (
                        lambda t: t.find('./os/initrd').text.split('/')[1],
                        'ramdisk')
                else:
                    check = (lambda t: t.find('./os/initrd'), None)
                check_list.append(check)

        common_checks = [
            (lambda t: t.find('.').tag, 'domain'),
            (lambda t: t.find('./devices/interface/filterref/parameter').get(
                'name'), 'IP'),
            (lambda t: t.find('./devices/interface/filterref/parameter').get(
                'value'), '10.11.12.13'),
            (lambda t: t.findall('./devices/interface/filterref/parameter')[1].
             get('name'), 'DHCPSERVER'),
            (lambda t: t.findall('./devices/interface/filterref/parameter')[1].
             get('value'), '10.0.0.1'),
            (lambda t: t.find('./devices/serial/source').get('path').split(
                '/')[1], 'console.log'),
            (lambda t: t.find('./memory').text, '2097152')
        ]
        if rescue:
            common_checks += [(lambda t: t.findall('./devices/disk/source')[0].
                               get('file').split('/')[1], 'disk.rescue'),
                              (lambda t: t.findall('./devices/disk/source')[1].
                               get('file').split('/')[1], 'disk')]
        else:
            common_checks += [(lambda t: t.findall('./devices/disk/source')[0].
                               get('file').split('/')[1], 'disk')]
            common_checks += [(lambda t: t.findall('./devices/disk/source')[1].
                               get('file').split('/')[1], 'disk.local')]

        for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
            FLAGS.libvirt_type = libvirt_type
            conn = libvirt_conn.LibvirtConnection(True)

            uri = conn.get_uri()
            self.assertEquals(uri, expected_uri)

            xml = conn.to_xml(instance_ref, rescue)
            tree = xml_to_tree(xml)
            for i, (check, expected_result) in enumerate(checks):
                self.assertEqual(check(tree), expected_result,
                                 '%s failed check %d' % (xml, i))

            for i, (check, expected_result) in enumerate(common_checks):
                self.assertEqual(check(tree), expected_result,
                                 '%s failed common check %d' % (xml, i))

        # This test is supposed to make sure we don't
        # override a specifically set uri
        #
        # Deliberately not just assigning this string to FLAGS.libvirt_uri and
        # checking against that later on. This way we make sure the
        # implementation doesn't fiddle around with the FLAGS.
        testuri = 'something completely different'
        FLAGS.libvirt_uri = testuri
        for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
            FLAGS.libvirt_type = libvirt_type
            conn = libvirt_conn.LibvirtConnection(True)
            uri = conn.get_uri()
            self.assertEquals(uri, testuri)
        db.instance_destroy(user_context, instance_ref['id'])
Esempio n. 24
0
    def _check_xml_and_uri(self, instance, expect_ramdisk, expect_kernel, rescue=False):
        user_context = context.RequestContext(project=self.project, user=self.user)
        instance_ref = db.instance_create(user_context, instance)
        host = self.network.get_network_host(user_context.elevated())
        network_ref = db.project_get_network(context.get_admin_context(), self.project.id)

        fixed_ip = {"address": self.test_ip, "network_id": network_ref["id"]}

        ctxt = context.get_admin_context()
        fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip)
        db.fixed_ip_update(ctxt, self.test_ip, {"allocated": True, "instance_id": instance_ref["id"]})

        type_uri_map = {
            "qemu": (
                "qemu:///system",
                [
                    (lambda t: t.find(".").get("type"), "qemu"),
                    (lambda t: t.find("./os/type").text, "hvm"),
                    (lambda t: t.find("./devices/emulator"), None),
                ],
            ),
            "kvm": (
                "qemu:///system",
                [
                    (lambda t: t.find(".").get("type"), "kvm"),
                    (lambda t: t.find("./os/type").text, "hvm"),
                    (lambda t: t.find("./devices/emulator"), None),
                ],
            ),
            "uml": (
                "uml:///system",
                [(lambda t: t.find(".").get("type"), "uml"), (lambda t: t.find("./os/type").text, "uml")],
            ),
            "xen": (
                "xen:///",
                [(lambda t: t.find(".").get("type"), "xen"), (lambda t: t.find("./os/type").text, "linux")],
            ),
        }

        for hypervisor_type in ["qemu", "kvm", "xen"]:
            check_list = type_uri_map[hypervisor_type][1]

            if rescue:
                check = (lambda t: t.find("./os/kernel").text.split("/")[1], "kernel.rescue")
                check_list.append(check)
                check = (lambda t: t.find("./os/initrd").text.split("/")[1], "ramdisk.rescue")
                check_list.append(check)
            else:
                if expect_kernel:
                    check = (lambda t: t.find("./os/kernel").text.split("/")[1], "kernel")
                else:
                    check = (lambda t: t.find("./os/kernel"), None)
                check_list.append(check)

                if expect_ramdisk:
                    check = (lambda t: t.find("./os/initrd").text.split("/")[1], "ramdisk")
                else:
                    check = (lambda t: t.find("./os/initrd"), None)
                check_list.append(check)

        common_checks = [
            (lambda t: t.find(".").tag, "domain"),
            (lambda t: t.find("./devices/interface/filterref/parameter").get("name"), "IP"),
            (lambda t: t.find("./devices/interface/filterref/parameter").get("value"), "10.11.12.13"),
            (lambda t: t.findall("./devices/interface/filterref/parameter")[1].get("name"), "DHCPSERVER"),
            (lambda t: t.findall("./devices/interface/filterref/parameter")[1].get("value"), "10.0.0.1"),
            (lambda t: t.find("./devices/serial/source").get("path").split("/")[1], "console.log"),
            (lambda t: t.find("./memory").text, "2097152"),
        ]
        if rescue:
            common_checks += [
                (lambda t: t.findall("./devices/disk/source")[0].get("file").split("/")[1], "disk.rescue"),
                (lambda t: t.findall("./devices/disk/source")[1].get("file").split("/")[1], "disk"),
            ]
        else:
            common_checks += [(lambda t: t.findall("./devices/disk/source")[0].get("file").split("/")[1], "disk")]
            common_checks += [(lambda t: t.findall("./devices/disk/source")[1].get("file").split("/")[1], "disk.local")]

        for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
            FLAGS.libvirt_type = libvirt_type
            conn = libvirt_conn.LibvirtConnection(True)

            uri = conn.get_uri()
            self.assertEquals(uri, expected_uri)

            xml = conn.to_xml(instance_ref, rescue)
            tree = xml_to_tree(xml)
            for i, (check, expected_result) in enumerate(checks):
                self.assertEqual(check(tree), expected_result, "%s failed check %d" % (xml, i))

            for i, (check, expected_result) in enumerate(common_checks):
                self.assertEqual(check(tree), expected_result, "%s failed common check %d" % (xml, i))

        # This test is supposed to make sure we don't override a specifically
        # set uri
        #
        # Deliberately not just assigning this string to FLAGS.libvirt_uri and
        # checking against that later on. This way we make sure the
        # implementation doesn't fiddle around with the FLAGS.
        testuri = "something completely different"
        FLAGS.libvirt_uri = testuri
        for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
            FLAGS.libvirt_type = libvirt_type
            conn = libvirt_conn.LibvirtConnection(True)
            uri = conn.get_uri()
            self.assertEquals(uri, testuri)
        db.instance_destroy(user_context, instance_ref["id"])
Esempio n. 25
0
    def test_creates_base_rule_first(self):
        # These come pre-defined by libvirt
        self.defined_filters = ["no-mac-spoofing", "no-ip-spoofing", "no-arp-spoofing", "allow-dhcp-server"]

        self.recursive_depends = {}
        for f in self.defined_filters:
            self.recursive_depends[f] = []

        def _filterDefineXMLMock(xml):
            dom = xml_to_dom(xml)
            name = dom.firstChild.getAttribute("name")
            self.recursive_depends[name] = []
            for f in dom.getElementsByTagName("filterref"):
                ref = f.getAttribute("filter")
                self.assertTrue(
                    ref in self.defined_filters, ("%s referenced filter that does " + "not yet exist: %s") % (name, ref)
                )
                dependencies = [ref] + self.recursive_depends[ref]
                self.recursive_depends[name] += dependencies

            self.defined_filters.append(name)
            return True

        self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock

        instance_ref = db.instance_create(self.context, {"user_id": "fake", "project_id": "fake"})
        inst_id = instance_ref["id"]

        ip = "10.11.12.13"

        network_ref = db.project_get_network(self.context, "fake")

        fixed_ip = {"address": ip, "network_id": network_ref["id"]}

        admin_ctxt = context.get_admin_context()
        db.fixed_ip_create(admin_ctxt, fixed_ip)
        db.fixed_ip_update(admin_ctxt, ip, {"allocated": True, "instance_id": instance_ref["id"]})

        def _ensure_all_called():
            instance_filter = "nova-instance-%s" % instance_ref["name"]
            secgroup_filter = "nova-secgroup-%s" % self.security_group["id"]
            for required in [
                secgroup_filter,
                "allow-dhcp-server",
                "no-arp-spoofing",
                "no-ip-spoofing",
                "no-mac-spoofing",
            ]:
                self.assertTrue(
                    required in self.recursive_depends[instance_filter],
                    "Instance's filter does not include %s" % required,
                )

        self.security_group = self.setup_and_return_security_group()

        db.instance_add_security_group(self.context, inst_id, self.security_group.id)
        instance = db.instance_get(self.context, inst_id)

        self.fw.setup_basic_filtering(instance)
        self.fw.prepare_instance_filter(instance)
        self.fw.apply_instance_filter(instance)
        _ensure_all_called()
        self.teardown_security_group()
        db.instance_destroy(admin_ctxt, instance_ref["id"])
Esempio n. 26
0
    def test_static_filters(self):
        instance_ref = self._create_instance_ref()
        ip = '10.11.12.13'

        network_ref = db.project_get_network(self.context,
                                             'fake')

        fixed_ip = {'address': ip,
                    'network_id': network_ref['id']}

        admin_ctxt = context.get_admin_context()
        db.fixed_ip_create(admin_ctxt, fixed_ip)
        db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
                                            'instance_id': instance_ref['id']})

        secgroup = db.security_group_create(admin_ctxt,
                                            {'user_id': 'fake',
                                             'project_id': 'fake',
                                             'name': 'testgroup',
                                             'description': 'test group'})

        db.security_group_rule_create(admin_ctxt,
                                      {'parent_group_id': secgroup['id'],
                                       'protocol': 'icmp',
                                       'from_port': -1,
                                       'to_port': -1,
                                       'cidr': '192.168.11.0/24'})

        db.security_group_rule_create(admin_ctxt,
                                      {'parent_group_id': secgroup['id'],
                                       'protocol': 'icmp',
                                       'from_port': 8,
                                       'to_port': -1,
                                       'cidr': '192.168.11.0/24'})

        db.security_group_rule_create(admin_ctxt,
                                      {'parent_group_id': secgroup['id'],
                                       'protocol': 'tcp',
                                       'from_port': 80,
                                       'to_port': 81,
                                       'cidr': '192.168.10.0/24'})

        db.instance_add_security_group(admin_ctxt, instance_ref['id'],
                                       secgroup['id'])
        instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])

#        self.fw.add_instance(instance_ref)
        def fake_iptables_execute(*cmd, **kwargs):
            process_input = kwargs.get('process_input', None)
            if cmd == ('sudo', 'ip6tables-save', '-t', 'filter'):
                return '\n'.join(self.in6_filter_rules), None
            if cmd == ('sudo', 'iptables-save', '-t', 'filter'):
                return '\n'.join(self.in_filter_rules), None
            if cmd == ('sudo', 'iptables-save', '-t', 'nat'):
                return '\n'.join(self.in_nat_rules), None
            if cmd == ('sudo', 'iptables-restore'):
                lines = process_input.split('\n')
                if '*filter' in lines:
                    self.out_rules = lines
                return '', ''
            if cmd == ('sudo', 'ip6tables-restore'):
                lines = process_input.split('\n')
                if '*filter' in lines:
                    self.out6_rules = lines
                return '', ''
            print cmd, kwargs

        from nova.network import linux_net
        linux_net.iptables_manager.execute = fake_iptables_execute

        self.fw.prepare_instance_filter(instance_ref)
        self.fw.apply_instance_filter(instance_ref)

        in_rules = filter(lambda l: not l.startswith('#'),
                          self.in_filter_rules)
        for rule in in_rules:
            if not 'nova' in rule:
                self.assertTrue(rule in self.out_rules,
                                'Rule went missing: %s' % rule)

        instance_chain = None
        for rule in self.out_rules:
            # This is pretty crude, but it'll do for now
            if '-d 10.11.12.13 -j' in rule:
                instance_chain = rule.split(' ')[-1]
                break
        self.assertTrue(instance_chain, "The instance chain wasn't added")

        security_group_chain = None
        for rule in self.out_rules:
            # This is pretty crude, but it'll do for now
            if '-A %s -j' % instance_chain in rule:
                security_group_chain = rule.split(' ')[-1]
                break
        self.assertTrue(security_group_chain,
                        "The security group chain wasn't added")

        regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -j ACCEPT')
        self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
                        "ICMP acceptance rule wasn't added")

        regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -m icmp '
                           '--icmp-type 8 -j ACCEPT')
        self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
                        "ICMP Echo Request acceptance rule wasn't added")

        regex = re.compile('-A .* -p tcp -s 192.168.10.0/24 -m multiport '
                           '--dports 80:81 -j ACCEPT')
        self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
                        "TCP port 80/81 acceptance rule wasn't added")
        db.instance_destroy(admin_ctxt, instance_ref['id'])
Esempio n. 27
0
    def test_creates_base_rule_first(self):
        # These come pre-defined by libvirt
        self.defined_filters = ['no-mac-spoofing',
                                'no-ip-spoofing',
                                'no-arp-spoofing',
                                'allow-dhcp-server']

        self.recursive_depends = {}
        for f in self.defined_filters:
            self.recursive_depends[f] = []

        def _filterDefineXMLMock(xml):
            dom = xml_to_dom(xml)
            name = dom.firstChild.getAttribute('name')
            self.recursive_depends[name] = []
            for f in dom.getElementsByTagName('filterref'):
                ref = f.getAttribute('filter')
                self.assertTrue(ref in self.defined_filters,
                                ('%s referenced filter that does ' +
                                'not yet exist: %s') % (name, ref))
                dependencies = [ref] + self.recursive_depends[ref]
                self.recursive_depends[name] += dependencies

            self.defined_filters.append(name)
            return True

        self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock

        instance_ref = self._create_instance()
        inst_id = instance_ref['id']

        ip = '10.11.12.13'

        network_ref = db.project_get_network(self.context, 'fake')
        fixed_ip = {'address': ip, 'network_id': network_ref['id']}

        admin_ctxt = context.get_admin_context()
        db.fixed_ip_create(admin_ctxt, fixed_ip)
        db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
                                            'instance_id': inst_id})

        def _ensure_all_called():
            instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'],
                                                       '00A0C914C829')
            secgroup_filter = 'nova-secgroup-%s' % self.security_group['id']
            for required in [secgroup_filter, 'allow-dhcp-server',
                             'no-arp-spoofing', 'no-ip-spoofing',
                             'no-mac-spoofing']:
                self.assertTrue(required in
                                self.recursive_depends[instance_filter],
                                "Instance's filter does not include %s" %
                                required)

        self.security_group = self.setup_and_return_security_group()

        db.instance_add_security_group(self.context, inst_id,
                                       self.security_group.id)
        instance = db.instance_get(self.context, inst_id)

        self.fw.setup_basic_filtering(instance)
        self.fw.prepare_instance_filter(instance)
        self.fw.apply_instance_filter(instance)
        _ensure_all_called()
        self.teardown_security_group()
        db.instance_destroy(admin_ctxt, instance_ref['id'])
Esempio n. 28
0
    def test_static_filters(self):
        instance_ref = db.instance_create(self.context,
                                          {'user_id': 'fake',
                                          'project_id': 'fake',
                                          'mac_address': '56:12:12:12:12:12'})
        ip = '10.11.12.13'

        network_ref = db.project_get_network(self.context,
                                             'fake')

        fixed_ip = {'address': ip,
                    'network_id': network_ref['id']}

        admin_ctxt = context.get_admin_context()
        db.fixed_ip_create(admin_ctxt, fixed_ip)
        db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
                                            'instance_id': instance_ref['id']})

        secgroup = db.security_group_create(admin_ctxt,
                                            {'user_id': 'fake',
                                             'project_id': 'fake',
                                             'name': 'testgroup',
                                             'description': 'test group'})

        db.security_group_rule_create(admin_ctxt,
                                      {'parent_group_id': secgroup['id'],
                                       'protocol': 'icmp',
                                       'from_port': -1,
                                       'to_port': -1,
                                       'cidr': '192.168.11.0/24'})

        db.security_group_rule_create(admin_ctxt,
                                      {'parent_group_id': secgroup['id'],
                                       'protocol': 'icmp',
                                       'from_port': 8,
                                       'to_port': -1,
                                       'cidr': '192.168.11.0/24'})

        db.security_group_rule_create(admin_ctxt,
                                      {'parent_group_id': secgroup['id'],
                                       'protocol': 'tcp',
                                       'from_port': 80,
                                       'to_port': 81,
                                       'cidr': '192.168.10.0/24'})

        db.instance_add_security_group(admin_ctxt, instance_ref['id'],
                                       secgroup['id'])
        instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])

#        self.fw.add_instance(instance_ref)
        def fake_iptables_execute(cmd, process_input=None):
            if cmd == 'sudo ip6tables-save -t filter':
                return '\n'.join(self.in6_rules), None
            if cmd == 'sudo iptables-save -t filter':
                return '\n'.join(self.in_rules), None
            if cmd == 'sudo iptables-restore':
                self.out_rules = process_input.split('\n')
                return '', ''
            if cmd == 'sudo ip6tables-restore':
                self.out6_rules = process_input.split('\n')
                return '', ''
        self.fw.execute = fake_iptables_execute

        self.fw.prepare_instance_filter(instance_ref)
        self.fw.apply_instance_filter(instance_ref)

        in_rules = filter(lambda l: not l.startswith('#'), self.in_rules)
        for rule in in_rules:
            if not 'nova' in rule:
                self.assertTrue(rule in self.out_rules,
                                'Rule went missing: %s' % rule)

        instance_chain = None
        for rule in self.out_rules:
            # This is pretty crude, but it'll do for now
            if '-d 10.11.12.13 -j' in rule:
                instance_chain = rule.split(' ')[-1]
                break
        self.assertTrue(instance_chain, "The instance chain wasn't added")

        security_group_chain = None
        for rule in self.out_rules:
            # This is pretty crude, but it'll do for now
            if '-A %s -j' % instance_chain in rule:
                security_group_chain = rule.split(' ')[-1]
                break
        self.assertTrue(security_group_chain,
                        "The security group chain wasn't added")

        self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -j ACCEPT' % \
                               security_group_chain in self.out_rules,
                        "ICMP acceptance rule wasn't added")

        self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type '
                        '8 -j ACCEPT' % security_group_chain in self.out_rules,
                        "ICMP Echo Request acceptance rule wasn't added")

        self.assertTrue('-A %s -p tcp -s 192.168.10.0/24 -m multiport '
                        '--dports 80:81 -j ACCEPT' % security_group_chain \
                            in self.out_rules,
                        "TCP port 80/81 acceptance rule wasn't added")