コード例 #1
0
 def deallocate_ips_by_vif(self, context, tenant_id, net_id, vif_ref):
     """Deallocate all fixed IPs associated with the specified
        virtual interface.
     """
     admin_context = context.elevated()
     fixed_ips = db.fixed_ips_by_virtual_interface(admin_context,
                                                      vif_ref['id'])
     # NOTE(s0mik): Sets fixed-ip to deallocated, but leaves the entry
     # associated with the instance-id.  This prevents us from handing it
     # out again immediately, as allocating it to a new instance before
     # a DHCP lease has timed-out is bad.  Instead, the fixed-ip will
     # be disassociated with the instance-id by a call to one of two
     # methods inherited from FlatManager:
     # - if DHCP is in use, a lease expiring in dnsmasq triggers
     #   a call to release_fixed_ip in the network manager.
     # - otherwise, _disassociate_stale_fixed_ips is called periodically
     #   to disassociate all fixed ips that are unallocated
     #   but still associated with an instance-id.
     for fixed_ip in fixed_ips:
         db.fixed_ip_update(admin_context, fixed_ip['address'],
                            {'allocated': False,
                             'virtual_interface_id': None})
     if len(fixed_ips) == 0:
         LOG.error(_('No fixed IPs to deallocate for vif %s') %
                   vif_ref['id'])
コード例 #2
0
ファイル: test_network.py プロジェクト: yamahata/nova
    def test_instance_dns(self):
        fixedip = '192.168.0.101'
        self.mox.StubOutWithMock(db, 'network_get')
        self.mox.StubOutWithMock(db, 'network_update')
        self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
        self.mox.StubOutWithMock(db, 'instance_get')
        self.mox.StubOutWithMock(
            db, 'virtual_interface_get_by_instance_and_network')
        self.mox.StubOutWithMock(db, 'fixed_ip_update')

        db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
        db.virtual_interface_get_by_instance_and_network(
            mox.IgnoreArg(), mox.IgnoreArg(),
            mox.IgnoreArg()).AndReturn({'id': 0})

        db.instance_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
            {'security_groups': [{
                'id': 0
            }]})

        db.instance_get(self.context, 1).AndReturn({'display_name': HOST})
        db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(),
                                   mox.IgnoreArg()).AndReturn(fixedip)
        db.network_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(networks[0])
        db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
        self.mox.ReplayAll()
        self.network.add_fixed_ip_to_instance(self.context, 1, HOST,
                                              networks[0]['id'])
        addresses = self.network.instance_dns_manager.get_entries_by_name(HOST)
        self.assertEqual(len(addresses), 1)
        self.assertEqual(addresses[0], fixedip)
コード例 #3
0
ファイル: test_network.py プロジェクト: yamahata/nova
    def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
        self.mox.StubOutWithMock(db, 'network_get')
        self.mox.StubOutWithMock(db, 'network_update')
        self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
        self.mox.StubOutWithMock(db, 'instance_get')
        self.mox.StubOutWithMock(
            db, 'virtual_interface_get_by_instance_and_network')
        self.mox.StubOutWithMock(db, 'fixed_ip_update')

        db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
        db.virtual_interface_get_by_instance_and_network(
            mox.IgnoreArg(), mox.IgnoreArg(),
            mox.IgnoreArg()).AndReturn({'id': 0})

        db.instance_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
            {'security_groups': [{
                'id': 0
            }]})
        db.instance_get(self.context, 1).AndReturn({'display_name': HOST})
        db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(),
                                   mox.IgnoreArg()).AndReturn('192.168.0.101')
        db.network_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(networks[0])
        db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
        self.mox.ReplayAll()
        self.network.add_fixed_ip_to_instance(self.context, 1, HOST,
                                              networks[0]['id'])
コード例 #4
0
ファイル: test_network.py プロジェクト: swapniltamse/nova
    def test_instance_dns(self):
        fixedip = '192.168.0.101'
        self.mox.StubOutWithMock(db, 'network_get')
        self.mox.StubOutWithMock(db, 'network_update')
        self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
        self.mox.StubOutWithMock(db, 'instance_get')
        self.mox.StubOutWithMock(db,
                              'virtual_interface_get_by_instance_and_network')
        self.mox.StubOutWithMock(db, 'fixed_ip_update')

        db.fixed_ip_update(mox.IgnoreArg(),
                           mox.IgnoreArg(),
                           mox.IgnoreArg())
        db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
                mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})

        db.instance_get(mox.IgnoreArg(),
                        mox.IgnoreArg()).AndReturn({'security_groups':
                                                             [{'id': 0}]})

        db.instance_get(self.context,
                        1).AndReturn({'display_name': HOST})
        db.fixed_ip_associate_pool(mox.IgnoreArg(),
                                   mox.IgnoreArg(),
                                   mox.IgnoreArg()).AndReturn(fixedip)
        db.network_get(mox.IgnoreArg(),
                       mox.IgnoreArg()).AndReturn(networks[0])
        db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
        self.mox.ReplayAll()
        self.network.add_fixed_ip_to_instance(self.context, 1, HOST,
                                              networks[0]['id'])
        addresses = self.network.instance_dns_manager.get_entries_by_name(HOST)
        self.assertEqual(len(addresses), 1)
        self.assertEqual(addresses[0], fixedip)
コード例 #5
0
ファイル: fixed_ip.py プロジェクト: vincentpanqi/nova
 def save(self, context):
     updates = self.obj_get_changes()
     if 'address' in updates:
         raise exception.ObjectActionError(action='save',
                                           reason='address is not mutable')
     db.fixed_ip_update(context, str(self.address), updates)
     self.obj_reset_changes()
コード例 #6
0
ファイル: test_network.py プロジェクト: CaptTofu/reddwarf
    def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
        self.mox.StubOutWithMock(db, 'network_get')
        self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
        self.mox.StubOutWithMock(db, 'instance_get')
        self.mox.StubOutWithMock(db,
                              'virtual_interface_get_by_instance_and_network')
        self.mox.StubOutWithMock(db, 'fixed_ip_update')

        db.fixed_ip_update(mox.IgnoreArg(),
                           mox.IgnoreArg(),
                           mox.IgnoreArg())
        db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
                mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})

        db.instance_get(mox.IgnoreArg(),
                        mox.IgnoreArg()).AndReturn({'security_groups':
                                                             [{'id': 0}]})
        db.fixed_ip_associate_pool(mox.IgnoreArg(),
                                   mox.IgnoreArg(),
                                   mox.IgnoreArg()).AndReturn('192.168.0.101')
        db.network_get(mox.IgnoreArg(),
                       mox.IgnoreArg()).AndReturn(networks[0])
        self.mox.ReplayAll()
        self.network.add_fixed_ip_to_instance(self.context, 1, HOST,
                                              networks[0]['id'])
コード例 #7
0
 def deallocate_ips_by_vif(self, context, tenant_id, net_id, vif_ref):
     """Deallocate all fixed IPs associated with the specified
        virtual interface.
     """
     admin_context = context.elevated()
     fixed_ips = db.fixed_ips_by_virtual_interface(admin_context,
                                                   vif_ref['id'])
     # NOTE(s0mik): Sets fixed-ip to deallocated, but leaves the entry
     # associated with the instance-id.  This prevents us from handing it
     # out again immediately, as allocating it to a new instance before
     # a DHCP lease has timed-out is bad.  Instead, the fixed-ip will
     # be disassociated with the instance-id by a call to one of two
     # methods inherited from FlatManager:
     # - if DHCP is in use, a lease expiring in dnsmasq triggers
     #   a call to release_fixed_ip in the network manager.
     # - otherwise, _disassociate_stale_fixed_ips is called periodically
     #   to disassociate all fixed ips that are unallocated
     #   but still associated with an instance-id.
     for fixed_ip in fixed_ips:
         db.fixed_ip_update(admin_context, fixed_ip['address'], {
             'allocated': False,
             'virtual_interface_id': None
         })
     if len(fixed_ips) == 0:
         LOG.error(
             _('No fixed IPs to deallocate for vif %s') % vif_ref['id'])
コード例 #8
0
 def test_unreserve(self):
     db.fixed_ip_update(context.get_admin_context(), '10.0.0.100',
                        {'reserved': True})
     self.commands.unreserve('10.0.0.100')
     address = db.fixed_ip_get_by_address(context.get_admin_context(),
                                          '10.0.0.100')
     self.assertEqual(address['reserved'], False)
コード例 #9
0
 def delete(self, context):
     """
     PF9 function
     :param context:
     :return:
     """
     db.fixed_ip_update(context, str(self.address), dict(deleted=True))
     self.deleted = True
     self.obj_reset_changes(['deleted'])
コード例 #10
0
 def allocate_fixed_ip(self, context, tenant_id, quantum_net_id, vif_rec):
     """Allocates a single fixed IPv4 address for a virtual interface."""
     admin_context = context.elevated()
     network = db.network_get_by_uuid(admin_context, quantum_net_id)
     if network['cidr']:
         address = db.fixed_ip_associate_pool(admin_context, network['id'],
                                              vif_rec['instance_id'])
         values = {'allocated': True, 'virtual_interface_id': vif_rec['id']}
         db.fixed_ip_update(admin_context, address, values)
コード例 #11
0
ファイル: fixed_ips.py プロジェクト: sdague/nova
    def _set_reserved(self, context, address, reserved):
        try:
            fixed_ip = db.fixed_ip_get_by_address(context, address)
            db.fixed_ip_update(context, fixed_ip["address"], {"reserved": reserved})
        except exception.FixedIpNotFoundForAddress:
            msg = _("Fixed IP %s not found") % address
            raise webob.exc.HTTPNotFound(explanation=msg)

        return webob.exc.HTTPAccepted()
コード例 #12
0
ファイル: nova_ipam_lib.py プロジェクト: pknouff/nova
 def allocate_fixed_ips(self, context, tenant_id, quantum_net_id, network_tenant_id, vif_rec):
     """Allocates a single fixed IPv4 address for a virtual interface."""
     admin_context = context.elevated()
     network = db.network_get_by_uuid(admin_context, quantum_net_id)
     address = None
     if network["cidr"]:
         address = db.fixed_ip_associate_pool(admin_context, network["id"], vif_rec["instance_id"])
         values = {"allocated": True, "virtual_interface_id": vif_rec["id"]}
         db.fixed_ip_update(admin_context, address, values)
     return [address]
コード例 #13
0
    def _set_reserved(self, context, address, reserved):
        try:
            fixed_ip = db.fixed_ip_get_by_address(context, address)
            db.fixed_ip_update(context, fixed_ip['address'],
                               {'reserved': reserved})
        except (exception.FixedIpNotFoundForAddress, exception.FixedIpInvalid):
            msg = _("Fixed IP %s not found") % address
            raise webob.exc.HTTPNotFound(explanation=msg)

        return webob.exc.HTTPAccepted()
コード例 #14
0
ファイル: nova_ipam_lib.py プロジェクト: pknouff/nova
 def deallocate_ips_by_vif(self, context, tenant_id, net_id, vif_ref):
     """Deallocate all fixed IPs associated with the specified
        virtual interface.
     """
     admin_context = context.elevated()
     fixed_ips = db.fixed_ips_by_virtual_interface(admin_context, vif_ref["id"])
     for fixed_ip in fixed_ips:
         db.fixed_ip_update(admin_context, fixed_ip["address"], {"allocated": False, "virtual_interface_id": None})
     if len(fixed_ips) == 0:
         LOG.error(_("No fixed IPs to deallocate for vif %s" % vif_ref["id"]))
コード例 #15
0
 def allocate_fixed_ip(self, context, tenant_id, quantum_net_id, vif_rec):
     """Allocates a single fixed IPv4 address for a virtual interface."""
     admin_context = context.elevated()
     network = db.network_get_by_uuid(admin_context, quantum_net_id)
     if network['cidr']:
         address = db.fixed_ip_associate_pool(admin_context,
                                              network['id'],
                                              vif_rec['instance_id'])
         values = {'allocated': True,
                   'virtual_interface_id': vif_rec['id']}
         db.fixed_ip_update(admin_context, address, values)
コード例 #16
0
ファイル: manage.py プロジェクト: comstud/nova
    def _set_reserved(self, address, reserved):
        ctxt = context.get_admin_context()

        try:
            fixed_ip = db.fixed_ip_get_by_address(ctxt, address)
            if fixed_ip is None:
                raise exception.NotFound("Could not find address")
            db.fixed_ip_update(ctxt, fixed_ip["address"], {"reserved": reserved})
        except exception.NotFound as ex:
            print _("error: %s") % ex
            return 2
コード例 #17
0
    def _set_reserved(self, address, reserved):
        ctxt = context.get_admin_context()

        try:
            fixed_ip = db.fixed_ip_get_by_address(ctxt, address)
            if fixed_ip is None:
                raise exception.NotFound('Could not find address')
            db.fixed_ip_update(ctxt, fixed_ip['address'],
                                {'reserved': reserved})
        except exception.NotFound as ex:
            print _("error: %s") % ex
            return(2)
コード例 #18
0
ファイル: manage.py プロジェクト: RibeiroAna/nova
    def _set_reserved(self, address, reserved):
        ctxt = context.get_admin_context()

        try:
            fixed_ip = db.fixed_ip_get_by_address(ctxt, address)
            if fixed_ip is None:
                raise exception.NotFound('Could not find address')
            db.fixed_ip_update(ctxt, fixed_ip['address'],
                                {'reserved': reserved})
        except exception.NotFound as ex:
            print(_("error: %s") % ex)
            return(2)
コード例 #19
0
ファイル: nova_ipam_lib.py プロジェクト: Cygnet/nova
 def deallocate_ips_by_vif(self, context, tenant_id, net_id, vif_ref):
     """Deallocate all fixed IPs associated with the specified
        virtual interface.
     """
     admin_context = context.elevated()
     fixed_ips = db.fixed_ips_by_virtual_interface(admin_context,
                                                      vif_ref['id'])
     for fixed_ip in fixed_ips:
         db.fixed_ip_update(admin_context, fixed_ip['address'],
                            {'allocated': False,
                             'virtual_interface_id': None})
     if len(fixed_ips) == 0:
         LOG.error(_('No fixed IPs to deallocate for vif %s'),
                   vif_ref['id'])
コード例 #20
0
ファイル: test_db_api.py プロジェクト: hiteshwadekar/nova
 def test_network_delete_safe(self):
     ctxt = context.get_admin_context()
     values = {"host": "localhost", "project_id": "project1"}
     network = db.network_create_safe(ctxt, values)
     db_network = db.network_get(ctxt, network.id)
     values = {"network_id": network["id"], "address": "fake1"}
     address1 = db.fixed_ip_create(ctxt, values)
     values = {"network_id": network["id"], "address": "fake2", "allocated": True}
     address2 = db.fixed_ip_create(ctxt, values)
     self.assertRaises(exception.NetworkInUse, db.network_delete_safe, ctxt, network["id"])
     db.fixed_ip_update(ctxt, address2, {"allocated": False})
     network = db.network_delete_safe(ctxt, network["id"])
     ctxt = ctxt.elevated(read_deleted="yes")
     fixed_ip = db.fixed_ip_get_by_address(ctxt, address1)
     self.assertTrue(fixed_ip["deleted"])
コード例 #21
0
ファイル: nova_ipam_lib.py プロジェクト: BillTheBest/nova
 def deallocate_ips_by_vif(self, context, tenant_id, net_id, vif_ref):
     """Deallocate all fixed IPs associated with the specified
        virtual interface.
     """
     try:
         admin_context = context.elevated()
         fixed_ips = db.fixed_ip_get_by_virtual_interface(admin_context,
                                                          vif_ref['id'])
         for fixed_ip in fixed_ips:
             db.fixed_ip_update(admin_context, fixed_ip['address'],
                                {'allocated': False,
                                 'virtual_interface_id': None})
     except exception.FixedIpNotFoundForInstance:
         LOG.error(_('No fixed IPs to deallocate for vif %s' %
                         vif_ref['id']))
コード例 #22
0
ファイル: test_network.py プロジェクト: rlz/osc-build-nova
    def test_vpn_allocate_fixed_ip(self):
        self.mox.StubOutWithMock(db, "fixed_ip_associate")
        self.mox.StubOutWithMock(db, "fixed_ip_update")
        self.mox.StubOutWithMock(db, "virtual_interface_get_by_instance_and_network")

        db.fixed_ip_associate(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), reserved=True).AndReturn("192.168.0.1")
        db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
        db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
            {"id": 0}
        )
        self.mox.ReplayAll()

        network = dict(networks[0])
        network["vpn_private_address"] = "192.168.0.2"
        self.network.allocate_fixed_ip(None, 0, network, vpn=True)
コード例 #23
0
 def deallocate_ips_by_vif(self, context, tenant_id, net_id, vif_ref):
     """Deallocate all fixed IPs associated with the specified
        virtual interface.
     """
     admin_context = context.elevated()
     fixed_ips = db.fixed_ips_by_virtual_interface(admin_context,
                                                   vif_ref['id'])
     for fixed_ip in fixed_ips:
         db.fixed_ip_update(admin_context, fixed_ip['address'], {
             'allocated': False,
             'virtual_interface_id': None
         })
     if len(fixed_ips) == 0:
         LOG.error(
             _('No fixed IPs to deallocate for vif %s' % vif_ref['id']))
コード例 #24
0
 def deallocate_ips_by_vif(self, context, tenant_id, net_id, vif_ref):
     """Deallocate all fixed IPs associated with the specified
        virtual interface.
     """
     try:
         admin_context = context.elevated()
         fixed_ips = db.fixed_ip_get_by_virtual_interface(
             admin_context, vif_ref['id'])
         for fixed_ip in fixed_ips:
             db.fixed_ip_update(admin_context, fixed_ip['address'], {
                 'allocated': False,
                 'virtual_interface_id': None
             })
     except exception.FixedIpNotFoundForInstance:
         LOG.error(
             _('No fixed IPs to deallocate for vif %s' % vif_ref['id']))
コード例 #25
0
    def deallocate_ips_by_vif(self, context, tenant_id, net_id, vif_ref):
        """Deallocate all fixed IPs associated with the specified
           virtual interface.
        """
        admin_context = context.elevated()
        fixed_ips = db.fixed_ips_by_virtual_interface(admin_context,
                                                      vif_ref['id'])
        # NOTE(s0mik): Sets fixed-ip to deallocated, but leaves the entry
        # associated with the instance-id.  This prevents us from handing it
        # out again immediately, as allocating it to a new instance before
        # a DHCP lease has timed-out is bad.  Instead, the fixed-ip will
        # be disassociated with the instance-id by a call to one of two
        # methods inherited from FlatManager:
        # - if DHCP is in use, a lease expiring in dnsmasq triggers
        #   a call to release_fixed_ip in the network manager, or it will
        #   be timed out periodically if the lease fails.
        # - otherwise, we release the ip immediately

        read_deleted_context = admin_context.elevated(read_deleted='yes')
        for fixed_ip in fixed_ips:
            fixed_id = fixed_ip['id']
            floating_ips = self.net_manager.db.floating_ip_get_by_fixed_ip_id(
                admin_context, fixed_id)
            # disassociate floating ips related to fixed_ip
            for floating_ip in floating_ips:
                address = floating_ip['address']
                manager.FloatingIP.disassociate_floating_ip(
                    self.net_manager,
                    read_deleted_context,
                    address,
                    affect_auto_assigned=True)
                # deallocate if auto_assigned
                if floating_ip['auto_assigned']:
                    manager.FloatingIP.deallocate_floating_ip(
                        read_deleted_context,
                        address,
                        affect_auto_assigned=True)
            db.fixed_ip_update(admin_context, fixed_ip['address'], {
                'allocated': False,
                'virtual_interface_id': None
            })
            if not self.net_manager.DHCP:
                db.fixed_ip_disassociate(admin_context, fixed_ip['address'])

        if len(fixed_ips) == 0:
            LOG.error(_('No fixed IPs to deallocate for vif %s'),
                      vif_ref['id'])
コード例 #26
0
ファイル: nova_ipam_lib.py プロジェクト: A7Zulu/nova
    def deallocate_ips_by_vif(self, context, tenant_id, net_id, vif_ref):
        """Deallocate all fixed IPs associated with the specified
           virtual interface.
        """
        admin_context = context.elevated()
        fixed_ips = db.fixed_ips_by_virtual_interface(admin_context,
                                                         vif_ref['id'])
        # NOTE(s0mik): Sets fixed-ip to deallocated, but leaves the entry
        # associated with the instance-id.  This prevents us from handing it
        # out again immediately, as allocating it to a new instance before
        # a DHCP lease has timed-out is bad.  Instead, the fixed-ip will
        # be disassociated with the instance-id by a call to one of two
        # methods inherited from FlatManager:
        # - if DHCP is in use, a lease expiring in dnsmasq triggers
        #   a call to release_fixed_ip in the network manager, or it will
        #   be timed out periodically if the lease fails.
        # - otherwise, we release the ip immediately

        read_deleted_context = admin_context.elevated(read_deleted='yes')
        for fixed_ip in fixed_ips:
            fixed_id = fixed_ip['id']
            floating_ips = self.net_manager.db.floating_ip_get_by_fixed_ip_id(
                                admin_context,
                                fixed_id)
            # disassociate floating ips related to fixed_ip
            for floating_ip in floating_ips:
                address = floating_ip['address']
                manager.FloatingIP.disassociate_floating_ip(
                    self.net_manager,
                    read_deleted_context,
                    address,
                    affect_auto_assigned=True)
                # deallocate if auto_assigned
                if floating_ip['auto_assigned']:
                    manager.FloatingIP.deallocate_floating_ip(
                        read_deleted_context,
                        address,
                        affect_auto_assigned=True)
            db.fixed_ip_update(admin_context, fixed_ip['address'],
                               {'allocated': False,
                                'virtual_interface_id': None})
            if not self.net_manager.DHCP:
                db.fixed_ip_disassociate(admin_context, fixed_ip['address'])

        if len(fixed_ips) == 0:
            LOG.error(_('No fixed IPs to deallocate for vif %s'),
                      vif_ref['id'])
コード例 #27
0
ファイル: test_network.py プロジェクト: nicoleLiu/nova
    def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
        self.mox.StubOutWithMock(db, "network_get")
        self.mox.StubOutWithMock(db, "fixed_ip_associate_pool")
        self.mox.StubOutWithMock(db, "instance_get")
        self.mox.StubOutWithMock(db, "virtual_interface_get_by_instance_and_network")
        self.mox.StubOutWithMock(db, "fixed_ip_update")

        db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
        db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
            {"id": 0}
        )

        db.instance_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({"security_groups": [{"id": 0}]})
        db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn("192.168.0.101")
        db.network_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(networks[0])
        self.mox.ReplayAll()
        self.network.add_fixed_ip_to_instance(self.context, 1, HOST, networks[0]["id"])
コード例 #28
0
ファイル: test_network.py プロジェクト: termie/nova
    def test_allocate_fixed_ip(self):
        self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
        self.mox.StubOutWithMock(db, 'fixed_ip_update')
        self.mox.StubOutWithMock(
            db, 'virtual_interface_get_by_instance_and_network')

        db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(),
                                   mox.IgnoreArg()).AndReturn('192.168.0.1')
        db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
        db.virtual_interface_get_by_instance_and_network(
            mox.IgnoreArg(), mox.IgnoreArg(),
            mox.IgnoreArg()).AndReturn({'id': 0})
        self.mox.ReplayAll()

        network = dict(networks[0])
        network['vpn_private_address'] = '192.168.0.2'
        self.network.allocate_fixed_ip(None, 0, network)
コード例 #29
0
ファイル: test_network.py プロジェクト: nicoleLiu/nova
    def test_allocate_fixed_ip(self):
        self.mox.StubOutWithMock(db, "fixed_ip_associate_pool")
        self.mox.StubOutWithMock(db, "fixed_ip_update")
        self.mox.StubOutWithMock(db, "virtual_interface_get_by_instance_and_network")
        self.mox.StubOutWithMock(db, "instance_get")

        db.instance_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({"security_groups": [{"id": 0}]})
        db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn("192.168.0.1")
        db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
        db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
            {"id": 0}
        )
        self.mox.ReplayAll()

        network = dict(networks[0])
        network["vpn_private_address"] = "192.168.0.2"
        self.network.allocate_fixed_ip(self.context, 0, network)
コード例 #30
0
 def test_network_delete_safe(self):
     ctxt = context.get_admin_context()
     values = {'host': 'localhost', 'project_id': 'project1'}
     network = db.network_create_safe(ctxt, values)
     db_network = db.network_get(ctxt, network.id)
     values = {'network_id': network['id'], 'address': 'fake1'}
     address1 = db.fixed_ip_create(ctxt, values)
     values = {'network_id': network['id'],
               'address': 'fake2',
               'allocated': True}
     address2 = db.fixed_ip_create(ctxt, values)
     self.assertRaises(exception.NetworkInUse,
                       db.network_delete_safe, ctxt, network['id'])
     db.fixed_ip_update(ctxt, address2, {'allocated': False})
     network = db.network_delete_safe(ctxt, network['id'])
     ctxt = ctxt.elevated(read_deleted='yes')
     fixed_ip = db.fixed_ip_get_by_address(ctxt, address1)
     self.assertTrue(fixed_ip['deleted'])
コード例 #31
0
ファイル: test_network.py プロジェクト: lovocas/nova
    def test_allocate_fixed_ip(self):
        self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
        self.mox.StubOutWithMock(db, 'fixed_ip_update')
        self.mox.StubOutWithMock(db,
                              'virtual_interface_get_by_instance_and_network')

        db.fixed_ip_associate_pool(mox.IgnoreArg(),
                                   mox.IgnoreArg(),
                                   mox.IgnoreArg()).AndReturn('192.168.0.1')
        db.fixed_ip_update(mox.IgnoreArg(),
                           mox.IgnoreArg(),
                           mox.IgnoreArg())
        db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
                mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
        self.mox.ReplayAll()

        network = dict(networks[0])
        network['vpn_private_address'] = '192.168.0.2'
        self.network.allocate_fixed_ip(None, 0, network)
コード例 #32
0
ファイル: test_db_api.py プロジェクト: scpham/nova
 def test_network_delete_safe(self):
     ctxt = context.get_admin_context()
     values = {'host': 'localhost', 'project_id': 'project1'}
     network = db.network_create_safe(ctxt, values)
     db_network = db.network_get(ctxt, network.id)
     values = {'network_id': network['id'], 'address': 'fake1'}
     address1 = db.fixed_ip_create(ctxt, values)
     values = {
         'network_id': network['id'],
         'address': 'fake2',
         'allocated': True
     }
     address2 = db.fixed_ip_create(ctxt, values)
     self.assertRaises(exception.NetworkInUse, db.network_delete_safe, ctxt,
                       network['id'])
     db.fixed_ip_update(ctxt, address2, {'allocated': False})
     network = db.network_delete_safe(ctxt, network['id'])
     ctxt = ctxt.elevated(read_deleted='yes')
     fixed_ip = db.fixed_ip_get_by_address(ctxt, address1)
     self.assertTrue(fixed_ip['deleted'])
コード例 #33
0
ファイル: test_virt.py プロジェクト: superstack/nova
    def _check_xml_and_container(self, instance):
        user_context = context.RequestContext(project=self.project,
                                              user=self.user)
        instance_ref = db.instance_create(user_context, instance)
        host = self.network.get_network_host(user_context.elevated())
        network_ref = db.project_get_network(context.get_admin_context(),
                                             self.project.id)

        fixed_ip = {'address': self.test_ip,
                    'network_id': network_ref['id']}

        ctxt = context.get_admin_context()
        fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip)
        db.fixed_ip_update(ctxt, self.test_ip,
                                 {'allocated': True,
                                  'instance_id': instance_ref['id']})

        self.flags(libvirt_type='lxc')
        conn = libvirt_conn.LibvirtConnection(True)

        uri = conn.get_uri()
        self.assertEquals(uri, 'lxc:///')

        xml = conn.to_xml(instance_ref)
        tree = xml_to_tree(xml)

        check = [
        (lambda t: t.find('.').get('type'), 'lxc'),
        (lambda t: t.find('./os/type').text, 'exe'),
        (lambda t: t.find('./devices/filesystem/target').get('dir'), '/')]

        for i, (check, expected_result) in enumerate(check):
            self.assertEqual(check(tree),
                             expected_result,
                             '%s failed common check %d' % (xml, i))

        target = tree.find('./devices/filesystem/source').get('dir')
        self.assertTrue(len(target) > 0)
コード例 #34
0
    def _check_xml_and_container(self, instance):
        user_context = context.RequestContext(project=self.project,
                                              user=self.user)
        instance_ref = db.instance_create(user_context, instance)
        host = self.network.get_network_host(user_context.elevated())
        network_ref = db.project_get_network(context.get_admin_context(),
                                             self.project.id)

        fixed_ip = {'address': self.test_ip, 'network_id': network_ref['id']}

        ctxt = context.get_admin_context()
        fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip)
        db.fixed_ip_update(ctxt, self.test_ip, {
            'allocated': True,
            'instance_id': instance_ref['id']
        })

        self.flags(libvirt_type='lxc')
        conn = libvirt_conn.LibvirtConnection(True)

        uri = conn.get_uri()
        self.assertEquals(uri, 'lxc:///')

        xml = conn.to_xml(instance_ref)
        tree = xml_to_tree(xml)

        check = [(lambda t: t.find('.').get('type'), 'lxc'),
                 (lambda t: t.find('./os/type').text, 'exe'),
                 (lambda t: t.find('./devices/filesystem/target').get('dir'),
                  '/')]

        for i, (check, expected_result) in enumerate(check):
            self.assertEqual(check(tree), expected_result,
                             '%s failed common check %d' % (xml, i))

        target = tree.find('./devices/filesystem/source').get('dir')
        self.assertTrue(len(target) > 0)
コード例 #35
0
    def test_creates_base_rule_first(self):
        # These come pre-defined by libvirt
        self.defined_filters = [
            'no-mac-spoofing', 'no-ip-spoofing', 'no-arp-spoofing',
            'allow-dhcp-server'
        ]

        self.recursive_depends = {}
        for f in self.defined_filters:
            self.recursive_depends[f] = []

        def _filterDefineXMLMock(xml):
            dom = xml_to_dom(xml)
            name = dom.firstChild.getAttribute('name')
            self.recursive_depends[name] = []
            for f in dom.getElementsByTagName('filterref'):
                ref = f.getAttribute('filter')
                self.assertTrue(
                    ref in self.defined_filters,
                    ('%s referenced filter that does ' + 'not yet exist: %s') %
                    (name, ref))
                dependencies = [ref] + self.recursive_depends[ref]
                self.recursive_depends[name] += dependencies

            self.defined_filters.append(name)
            return True

        self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock

        instance_ref = db.instance_create(self.context, {
            'user_id': 'fake',
            'project_id': 'fake'
        })
        inst_id = instance_ref['id']

        ip = '10.11.12.13'

        network_ref = db.project_get_network(self.context, 'fake')

        fixed_ip = {'address': ip, 'network_id': network_ref['id']}

        admin_ctxt = context.get_admin_context()
        db.fixed_ip_create(admin_ctxt, fixed_ip)
        db.fixed_ip_update(admin_ctxt, ip, {
            'allocated': True,
            'instance_id': instance_ref['id']
        })

        def _ensure_all_called():
            instance_filter = 'nova-instance-%s' % instance_ref['name']
            secgroup_filter = 'nova-secgroup-%s' % self.security_group['id']
            for required in [
                    secgroup_filter, 'allow-dhcp-server', 'no-arp-spoofing',
                    'no-ip-spoofing', 'no-mac-spoofing'
            ]:
                self.assertTrue(
                    required in self.recursive_depends[instance_filter],
                    "Instance's filter does not include %s" % required)

        self.security_group = self.setup_and_return_security_group()

        db.instance_add_security_group(self.context, inst_id,
                                       self.security_group.id)
        instance = db.instance_get(self.context, inst_id)

        self.fw.setup_basic_filtering(instance)
        self.fw.prepare_instance_filter(instance)
        self.fw.apply_instance_filter(instance)
        _ensure_all_called()
        self.teardown_security_group()
        db.instance_destroy(admin_ctxt, instance_ref['id'])
コード例 #36
0
ファイル: test_virt.py プロジェクト: yosh/nova
    def test_creates_base_rule_first(self):
        # These come pre-defined by libvirt
        self.defined_filters = ["no-mac-spoofing", "no-ip-spoofing", "no-arp-spoofing", "allow-dhcp-server"]

        self.recursive_depends = {}
        for f in self.defined_filters:
            self.recursive_depends[f] = []

        def _filterDefineXMLMock(xml):
            dom = xml_to_dom(xml)
            name = dom.firstChild.getAttribute("name")
            self.recursive_depends[name] = []
            for f in dom.getElementsByTagName("filterref"):
                ref = f.getAttribute("filter")
                self.assertTrue(
                    ref in self.defined_filters, ("%s referenced filter that does " + "not yet exist: %s") % (name, ref)
                )
                dependencies = [ref] + self.recursive_depends[ref]
                self.recursive_depends[name] += dependencies

            self.defined_filters.append(name)
            return True

        self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock

        instance_ref = db.instance_create(self.context, {"user_id": "fake", "project_id": "fake"})
        inst_id = instance_ref["id"]

        ip = "10.11.12.13"

        network_ref = db.project_get_network(self.context, "fake")

        fixed_ip = {"address": ip, "network_id": network_ref["id"]}

        admin_ctxt = context.get_admin_context()
        db.fixed_ip_create(admin_ctxt, fixed_ip)
        db.fixed_ip_update(admin_ctxt, ip, {"allocated": True, "instance_id": instance_ref["id"]})

        def _ensure_all_called():
            instance_filter = "nova-instance-%s" % instance_ref["name"]
            secgroup_filter = "nova-secgroup-%s" % self.security_group["id"]
            for required in [
                secgroup_filter,
                "allow-dhcp-server",
                "no-arp-spoofing",
                "no-ip-spoofing",
                "no-mac-spoofing",
            ]:
                self.assertTrue(
                    required in self.recursive_depends[instance_filter],
                    "Instance's filter does not include %s" % required,
                )

        self.security_group = self.setup_and_return_security_group()

        db.instance_add_security_group(self.context, inst_id, self.security_group.id)
        instance = db.instance_get(self.context, inst_id)

        self.fw.setup_basic_filtering(instance)
        self.fw.prepare_instance_filter(instance)
        self.fw.apply_instance_filter(instance)
        _ensure_all_called()
        self.teardown_security_group()
        db.instance_destroy(admin_ctxt, instance_ref["id"])
コード例 #37
0
ファイル: test_virt.py プロジェクト: yosh/nova
    def _check_xml_and_uri(self, instance, expect_ramdisk, expect_kernel, rescue=False):
        user_context = context.RequestContext(project=self.project, user=self.user)
        instance_ref = db.instance_create(user_context, instance)
        host = self.network.get_network_host(user_context.elevated())
        network_ref = db.project_get_network(context.get_admin_context(), self.project.id)

        fixed_ip = {"address": self.test_ip, "network_id": network_ref["id"]}

        ctxt = context.get_admin_context()
        fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip)
        db.fixed_ip_update(ctxt, self.test_ip, {"allocated": True, "instance_id": instance_ref["id"]})

        type_uri_map = {
            "qemu": (
                "qemu:///system",
                [
                    (lambda t: t.find(".").get("type"), "qemu"),
                    (lambda t: t.find("./os/type").text, "hvm"),
                    (lambda t: t.find("./devices/emulator"), None),
                ],
            ),
            "kvm": (
                "qemu:///system",
                [
                    (lambda t: t.find(".").get("type"), "kvm"),
                    (lambda t: t.find("./os/type").text, "hvm"),
                    (lambda t: t.find("./devices/emulator"), None),
                ],
            ),
            "uml": (
                "uml:///system",
                [(lambda t: t.find(".").get("type"), "uml"), (lambda t: t.find("./os/type").text, "uml")],
            ),
            "xen": (
                "xen:///",
                [(lambda t: t.find(".").get("type"), "xen"), (lambda t: t.find("./os/type").text, "linux")],
            ),
        }

        for hypervisor_type in ["qemu", "kvm", "xen"]:
            check_list = type_uri_map[hypervisor_type][1]

            if rescue:
                check = (lambda t: t.find("./os/kernel").text.split("/")[1], "kernel.rescue")
                check_list.append(check)
                check = (lambda t: t.find("./os/initrd").text.split("/")[1], "ramdisk.rescue")
                check_list.append(check)
            else:
                if expect_kernel:
                    check = (lambda t: t.find("./os/kernel").text.split("/")[1], "kernel")
                else:
                    check = (lambda t: t.find("./os/kernel"), None)
                check_list.append(check)

                if expect_ramdisk:
                    check = (lambda t: t.find("./os/initrd").text.split("/")[1], "ramdisk")
                else:
                    check = (lambda t: t.find("./os/initrd"), None)
                check_list.append(check)

        common_checks = [
            (lambda t: t.find(".").tag, "domain"),
            (lambda t: t.find("./devices/interface/filterref/parameter").get("name"), "IP"),
            (lambda t: t.find("./devices/interface/filterref/parameter").get("value"), "10.11.12.13"),
            (lambda t: t.findall("./devices/interface/filterref/parameter")[1].get("name"), "DHCPSERVER"),
            (lambda t: t.findall("./devices/interface/filterref/parameter")[1].get("value"), "10.0.0.1"),
            (lambda t: t.find("./devices/serial/source").get("path").split("/")[1], "console.log"),
            (lambda t: t.find("./memory").text, "2097152"),
        ]
        if rescue:
            common_checks += [
                (lambda t: t.findall("./devices/disk/source")[0].get("file").split("/")[1], "disk.rescue"),
                (lambda t: t.findall("./devices/disk/source")[1].get("file").split("/")[1], "disk"),
            ]
        else:
            common_checks += [(lambda t: t.findall("./devices/disk/source")[0].get("file").split("/")[1], "disk")]
            common_checks += [(lambda t: t.findall("./devices/disk/source")[1].get("file").split("/")[1], "disk.local")]

        for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
            FLAGS.libvirt_type = libvirt_type
            conn = libvirt_conn.LibvirtConnection(True)

            uri = conn.get_uri()
            self.assertEquals(uri, expected_uri)

            xml = conn.to_xml(instance_ref, rescue)
            tree = xml_to_tree(xml)
            for i, (check, expected_result) in enumerate(checks):
                self.assertEqual(check(tree), expected_result, "%s failed check %d" % (xml, i))

            for i, (check, expected_result) in enumerate(common_checks):
                self.assertEqual(check(tree), expected_result, "%s failed common check %d" % (xml, i))

        # This test is supposed to make sure we don't override a specifically
        # set uri
        #
        # Deliberately not just assigning this string to FLAGS.libvirt_uri and
        # checking against that later on. This way we make sure the
        # implementation doesn't fiddle around with the FLAGS.
        testuri = "something completely different"
        FLAGS.libvirt_uri = testuri
        for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
            FLAGS.libvirt_type = libvirt_type
            conn = libvirt_conn.LibvirtConnection(True)
            uri = conn.get_uri()
            self.assertEquals(uri, testuri)
        db.instance_destroy(user_context, instance_ref["id"])
コード例 #38
0
    def _check_xml_and_uri(self,
                           instance,
                           expect_ramdisk,
                           expect_kernel,
                           rescue=False):
        user_context = context.RequestContext(project=self.project,
                                              user=self.user)
        instance_ref = db.instance_create(user_context, instance)
        host = self.network.get_network_host(user_context.elevated())
        network_ref = db.project_get_network(context.get_admin_context(),
                                             self.project.id)

        fixed_ip = {'address': self.test_ip, 'network_id': network_ref['id']}

        ctxt = context.get_admin_context()
        fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip)
        db.fixed_ip_update(ctxt, self.test_ip, {
            'allocated': True,
            'instance_id': instance_ref['id']
        })

        type_uri_map = {
            'qemu': ('qemu:///system',
                     [(lambda t: t.find('.').get('type'), 'qemu'),
                      (lambda t: t.find('./os/type').text, 'hvm'),
                      (lambda t: t.find('./devices/emulator'), None)]),
            'kvm': ('qemu:///system',
                    [(lambda t: t.find('.').get('type'), 'kvm'),
                     (lambda t: t.find('./os/type').text, 'hvm'),
                     (lambda t: t.find('./devices/emulator'), None)]),
            'uml':
            ('uml:///system', [(lambda t: t.find('.').get('type'), 'uml'),
                               (lambda t: t.find('./os/type').text, 'uml')]),
            'xen':
            ('xen:///', [(lambda t: t.find('.').get('type'), 'xen'),
                         (lambda t: t.find('./os/type').text, 'linux')]),
        }

        for hypervisor_type in ['qemu', 'kvm', 'xen']:
            check_list = type_uri_map[hypervisor_type][1]

            if rescue:
                check = (lambda t: t.find('./os/kernel').text.split('/')[1],
                         'kernel.rescue')
                check_list.append(check)
                check = (lambda t: t.find('./os/initrd').text.split('/')[1],
                         'ramdisk.rescue')
                check_list.append(check)
            else:
                if expect_kernel:
                    check = (
                        lambda t: t.find('./os/kernel').text.split('/')[1],
                        'kernel')
                else:
                    check = (lambda t: t.find('./os/kernel'), None)
                check_list.append(check)

                if expect_ramdisk:
                    check = (
                        lambda t: t.find('./os/initrd').text.split('/')[1],
                        'ramdisk')
                else:
                    check = (lambda t: t.find('./os/initrd'), None)
                check_list.append(check)

        common_checks = [
            (lambda t: t.find('.').tag, 'domain'),
            (lambda t: t.find('./devices/interface/filterref/parameter').get(
                'name'), 'IP'),
            (lambda t: t.find('./devices/interface/filterref/parameter').get(
                'value'), '10.11.12.13'),
            (lambda t: t.findall('./devices/interface/filterref/parameter')[1].
             get('name'), 'DHCPSERVER'),
            (lambda t: t.findall('./devices/interface/filterref/parameter')[1].
             get('value'), '10.0.0.1'),
            (lambda t: t.find('./devices/serial/source').get('path').split(
                '/')[1], 'console.log'),
            (lambda t: t.find('./memory').text, '2097152')
        ]
        if rescue:
            common_checks += [(lambda t: t.findall('./devices/disk/source')[0].
                               get('file').split('/')[1], 'disk.rescue'),
                              (lambda t: t.findall('./devices/disk/source')[1].
                               get('file').split('/')[1], 'disk')]
        else:
            common_checks += [(lambda t: t.findall('./devices/disk/source')[0].
                               get('file').split('/')[1], 'disk')]
            common_checks += [(lambda t: t.findall('./devices/disk/source')[1].
                               get('file').split('/')[1], 'disk.local')]

        for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
            FLAGS.libvirt_type = libvirt_type
            conn = libvirt_conn.LibvirtConnection(True)

            uri = conn.get_uri()
            self.assertEquals(uri, expected_uri)

            xml = conn.to_xml(instance_ref, rescue)
            tree = xml_to_tree(xml)
            for i, (check, expected_result) in enumerate(checks):
                self.assertEqual(check(tree), expected_result,
                                 '%s failed check %d' % (xml, i))

            for i, (check, expected_result) in enumerate(common_checks):
                self.assertEqual(check(tree), expected_result,
                                 '%s failed common check %d' % (xml, i))

        # This test is supposed to make sure we don't
        # override a specifically set uri
        #
        # Deliberately not just assigning this string to FLAGS.libvirt_uri and
        # checking against that later on. This way we make sure the
        # implementation doesn't fiddle around with the FLAGS.
        testuri = 'something completely different'
        FLAGS.libvirt_uri = testuri
        for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
            FLAGS.libvirt_type = libvirt_type
            conn = libvirt_conn.LibvirtConnection(True)
            uri = conn.get_uri()
            self.assertEquals(uri, testuri)
        db.instance_destroy(user_context, instance_ref['id'])
コード例 #39
0
    def test_static_filters(self):
        instance_ref = db.instance_create(
            self.context, {
                'user_id': 'fake',
                'project_id': 'fake',
                'mac_address': '56:12:12:12:12:12'
            })
        ip = '10.11.12.13'

        network_ref = db.project_get_network(self.context, 'fake')

        fixed_ip = {'address': ip, 'network_id': network_ref['id']}

        admin_ctxt = context.get_admin_context()
        db.fixed_ip_create(admin_ctxt, fixed_ip)
        db.fixed_ip_update(admin_ctxt, ip, {
            'allocated': True,
            'instance_id': instance_ref['id']
        })

        secgroup = db.security_group_create(
            admin_ctxt, {
                'user_id': 'fake',
                'project_id': 'fake',
                'name': 'testgroup',
                'description': 'test group'
            })

        db.security_group_rule_create(
            admin_ctxt, {
                'parent_group_id': secgroup['id'],
                'protocol': 'icmp',
                'from_port': -1,
                'to_port': -1,
                'cidr': '192.168.11.0/24'
            })

        db.security_group_rule_create(
            admin_ctxt, {
                'parent_group_id': secgroup['id'],
                'protocol': 'icmp',
                'from_port': 8,
                'to_port': -1,
                'cidr': '192.168.11.0/24'
            })

        db.security_group_rule_create(
            admin_ctxt, {
                'parent_group_id': secgroup['id'],
                'protocol': 'tcp',
                'from_port': 80,
                'to_port': 81,
                'cidr': '192.168.10.0/24'
            })

        db.instance_add_security_group(admin_ctxt, instance_ref['id'],
                                       secgroup['id'])
        instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])

        #        self.fw.add_instance(instance_ref)
        def fake_iptables_execute(*cmd, **kwargs):
            process_input = kwargs.get('process_input', None)
            if cmd == ('sudo', 'ip6tables-save', '-t', 'filter'):
                return '\n'.join(self.in6_filter_rules), None
            if cmd == ('sudo', 'iptables-save', '-t', 'filter'):
                return '\n'.join(self.in_filter_rules), None
            if cmd == ('sudo', 'iptables-save', '-t', 'nat'):
                return '\n'.join(self.in_nat_rules), None
            if cmd == ('sudo', 'iptables-restore'):
                lines = process_input.split('\n')
                if '*filter' in lines:
                    self.out_rules = lines
                return '', ''
            if cmd == ('sudo', 'ip6tables-restore'):
                lines = process_input.split('\n')
                if '*filter' in lines:
                    self.out6_rules = lines
                return '', ''
            print cmd, kwargs

        from nova.network import linux_net
        linux_net.iptables_manager.execute = fake_iptables_execute

        self.fw.prepare_instance_filter(instance_ref)
        self.fw.apply_instance_filter(instance_ref)

        in_rules = filter(lambda l: not l.startswith('#'),
                          self.in_filter_rules)
        for rule in in_rules:
            if not 'nova' in rule:
                self.assertTrue(rule in self.out_rules,
                                'Rule went missing: %s' % rule)

        instance_chain = None
        for rule in self.out_rules:
            # This is pretty crude, but it'll do for now
            if '-d 10.11.12.13 -j' in rule:
                instance_chain = rule.split(' ')[-1]
                break
        self.assertTrue(instance_chain, "The instance chain wasn't added")

        security_group_chain = None
        for rule in self.out_rules:
            # This is pretty crude, but it'll do for now
            if '-A %s -j' % instance_chain in rule:
                security_group_chain = rule.split(' ')[-1]
                break
        self.assertTrue(security_group_chain,
                        "The security group chain wasn't added")

        regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -j ACCEPT')
        self.assertTrue(
            len(filter(regex.match, self.out_rules)) > 0,
            "ICMP acceptance rule wasn't added")

        regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -m icmp '
                           '--icmp-type 8 -j ACCEPT')
        self.assertTrue(
            len(filter(regex.match, self.out_rules)) > 0,
            "ICMP Echo Request acceptance rule wasn't added")

        regex = re.compile('-A .* -p tcp -s 192.168.10.0/24 -m multiport '
                           '--dports 80:81 -j ACCEPT')
        self.assertTrue(
            len(filter(regex.match, self.out_rules)) > 0,
            "TCP port 80/81 acceptance rule wasn't added")
        db.instance_destroy(admin_ctxt, instance_ref['id'])
コード例 #40
0
ファイル: test_virt.py プロジェクト: superstack/nova
    def test_creates_base_rule_first(self):
        # These come pre-defined by libvirt
        self.defined_filters = ['no-mac-spoofing',
                                'no-ip-spoofing',
                                'no-arp-spoofing',
                                'allow-dhcp-server']

        self.recursive_depends = {}
        for f in self.defined_filters:
            self.recursive_depends[f] = []

        def _filterDefineXMLMock(xml):
            dom = xml_to_dom(xml)
            name = dom.firstChild.getAttribute('name')
            self.recursive_depends[name] = []
            for f in dom.getElementsByTagName('filterref'):
                ref = f.getAttribute('filter')
                self.assertTrue(ref in self.defined_filters,
                                ('%s referenced filter that does ' +
                                'not yet exist: %s') % (name, ref))
                dependencies = [ref] + self.recursive_depends[ref]
                self.recursive_depends[name] += dependencies

            self.defined_filters.append(name)
            return True

        self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock

        instance_ref = self._create_instance()
        inst_id = instance_ref['id']

        ip = '10.11.12.13'

        network_ref = db.project_get_network(self.context, 'fake')
        fixed_ip = {'address': ip, 'network_id': network_ref['id']}

        admin_ctxt = context.get_admin_context()
        db.fixed_ip_create(admin_ctxt, fixed_ip)
        db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
                                            'instance_id': inst_id})

        def _ensure_all_called():
            instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'],
                                                       '00A0C914C829')
            secgroup_filter = 'nova-secgroup-%s' % self.security_group['id']
            for required in [secgroup_filter, 'allow-dhcp-server',
                             'no-arp-spoofing', 'no-ip-spoofing',
                             'no-mac-spoofing']:
                self.assertTrue(required in
                                self.recursive_depends[instance_filter],
                                "Instance's filter does not include %s" %
                                required)

        self.security_group = self.setup_and_return_security_group()

        db.instance_add_security_group(self.context, inst_id,
                                       self.security_group.id)
        instance = db.instance_get(self.context, inst_id)

        self.fw.setup_basic_filtering(instance)
        self.fw.prepare_instance_filter(instance)
        self.fw.apply_instance_filter(instance)
        _ensure_all_called()
        self.teardown_security_group()
        db.instance_destroy(admin_ctxt, instance_ref['id'])
コード例 #41
0
ファイル: test_virt.py プロジェクト: superstack/nova
    def test_static_filters(self):
        instance_ref = self._create_instance_ref()
        ip = '10.11.12.13'

        network_ref = db.project_get_network(self.context,
                                             'fake')

        fixed_ip = {'address': ip,
                    'network_id': network_ref['id']}

        admin_ctxt = context.get_admin_context()
        db.fixed_ip_create(admin_ctxt, fixed_ip)
        db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
                                            'instance_id': instance_ref['id']})

        secgroup = db.security_group_create(admin_ctxt,
                                            {'user_id': 'fake',
                                             'project_id': 'fake',
                                             'name': 'testgroup',
                                             'description': 'test group'})

        db.security_group_rule_create(admin_ctxt,
                                      {'parent_group_id': secgroup['id'],
                                       'protocol': 'icmp',
                                       'from_port': -1,
                                       'to_port': -1,
                                       'cidr': '192.168.11.0/24'})

        db.security_group_rule_create(admin_ctxt,
                                      {'parent_group_id': secgroup['id'],
                                       'protocol': 'icmp',
                                       'from_port': 8,
                                       'to_port': -1,
                                       'cidr': '192.168.11.0/24'})

        db.security_group_rule_create(admin_ctxt,
                                      {'parent_group_id': secgroup['id'],
                                       'protocol': 'tcp',
                                       'from_port': 80,
                                       'to_port': 81,
                                       'cidr': '192.168.10.0/24'})

        db.instance_add_security_group(admin_ctxt, instance_ref['id'],
                                       secgroup['id'])
        instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])

#        self.fw.add_instance(instance_ref)
        def fake_iptables_execute(*cmd, **kwargs):
            process_input = kwargs.get('process_input', None)
            if cmd == ('sudo', 'ip6tables-save', '-t', 'filter'):
                return '\n'.join(self.in6_filter_rules), None
            if cmd == ('sudo', 'iptables-save', '-t', 'filter'):
                return '\n'.join(self.in_filter_rules), None
            if cmd == ('sudo', 'iptables-save', '-t', 'nat'):
                return '\n'.join(self.in_nat_rules), None
            if cmd == ('sudo', 'iptables-restore'):
                lines = process_input.split('\n')
                if '*filter' in lines:
                    self.out_rules = lines
                return '', ''
            if cmd == ('sudo', 'ip6tables-restore'):
                lines = process_input.split('\n')
                if '*filter' in lines:
                    self.out6_rules = lines
                return '', ''
            print cmd, kwargs

        from nova.network import linux_net
        linux_net.iptables_manager.execute = fake_iptables_execute

        self.fw.prepare_instance_filter(instance_ref)
        self.fw.apply_instance_filter(instance_ref)

        in_rules = filter(lambda l: not l.startswith('#'),
                          self.in_filter_rules)
        for rule in in_rules:
            if not 'nova' in rule:
                self.assertTrue(rule in self.out_rules,
                                'Rule went missing: %s' % rule)

        instance_chain = None
        for rule in self.out_rules:
            # This is pretty crude, but it'll do for now
            if '-d 10.11.12.13 -j' in rule:
                instance_chain = rule.split(' ')[-1]
                break
        self.assertTrue(instance_chain, "The instance chain wasn't added")

        security_group_chain = None
        for rule in self.out_rules:
            # This is pretty crude, but it'll do for now
            if '-A %s -j' % instance_chain in rule:
                security_group_chain = rule.split(' ')[-1]
                break
        self.assertTrue(security_group_chain,
                        "The security group chain wasn't added")

        regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -j ACCEPT')
        self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
                        "ICMP acceptance rule wasn't added")

        regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -m icmp '
                           '--icmp-type 8 -j ACCEPT')
        self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
                        "ICMP Echo Request acceptance rule wasn't added")

        regex = re.compile('-A .* -p tcp -s 192.168.10.0/24 -m multiport '
                           '--dports 80:81 -j ACCEPT')
        self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
                        "TCP port 80/81 acceptance rule wasn't added")
        db.instance_destroy(admin_ctxt, instance_ref['id'])
コード例 #42
0
ファイル: test_virt.py プロジェクト: superstack/nova
    def _check_xml_and_uri(self, instance, expect_ramdisk, expect_kernel,
                           rescue=False):
        user_context = context.RequestContext(project=self.project,
                                              user=self.user)
        instance_ref = db.instance_create(user_context, instance)
        host = self.network.get_network_host(user_context.elevated())
        network_ref = db.project_get_network(context.get_admin_context(),
                                             self.project.id)

        fixed_ip = {'address':    self.test_ip,
                    'network_id': network_ref['id']}

        ctxt = context.get_admin_context()
        fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip)
        db.fixed_ip_update(ctxt, self.test_ip,
                                 {'allocated':   True,
                                  'instance_id': instance_ref['id']})

        type_uri_map = {'qemu': ('qemu:///system',
                             [(lambda t: t.find('.').get('type'), 'qemu'),
                              (lambda t: t.find('./os/type').text, 'hvm'),
                              (lambda t: t.find('./devices/emulator'), None)]),
                        'kvm': ('qemu:///system',
                             [(lambda t: t.find('.').get('type'), 'kvm'),
                              (lambda t: t.find('./os/type').text, 'hvm'),
                              (lambda t: t.find('./devices/emulator'), None)]),
                        'uml': ('uml:///system',
                             [(lambda t: t.find('.').get('type'), 'uml'),
                              (lambda t: t.find('./os/type').text, 'uml')]),
                        'xen': ('xen:///',
                             [(lambda t: t.find('.').get('type'), 'xen'),
                              (lambda t: t.find('./os/type').text, 'linux')]),
                              }

        for hypervisor_type in ['qemu', 'kvm', 'xen']:
            check_list = type_uri_map[hypervisor_type][1]

            if rescue:
                check = (lambda t: t.find('./os/kernel').text.split('/')[1],
                         'kernel.rescue')
                check_list.append(check)
                check = (lambda t: t.find('./os/initrd').text.split('/')[1],
                         'ramdisk.rescue')
                check_list.append(check)
            else:
                if expect_kernel:
                    check = (lambda t: t.find('./os/kernel').text.split(
                        '/')[1], 'kernel')
                else:
                    check = (lambda t: t.find('./os/kernel'), None)
                check_list.append(check)

                if expect_ramdisk:
                    check = (lambda t: t.find('./os/initrd').text.split(
                        '/')[1], 'ramdisk')
                else:
                    check = (lambda t: t.find('./os/initrd'), None)
                check_list.append(check)

        parameter = './devices/interface/filterref/parameter'
        common_checks = [
            (lambda t: t.find('.').tag, 'domain'),
            (lambda t: t.find(parameter).get('name'), 'IP'),
            (lambda t: t.find(parameter).get('value'), '10.11.12.13'),
            (lambda t: t.findall(parameter)[1].get('name'), 'DHCPSERVER'),
            (lambda t: t.findall(parameter)[1].get('value'), '10.0.0.1'),
            (lambda t: t.find('./devices/serial/source').get(
                'path').split('/')[1], 'console.log'),
            (lambda t: t.find('./memory').text, '2097152')]
        if rescue:
            common_checks += [
                (lambda t: t.findall('./devices/disk/source')[0].get(
                    'file').split('/')[1], 'disk.rescue'),
                (lambda t: t.findall('./devices/disk/source')[1].get(
                    'file').split('/')[1], 'disk')]
        else:
            common_checks += [(lambda t: t.findall(
                './devices/disk/source')[0].get('file').split('/')[1],
                               'disk')]
            common_checks += [(lambda t: t.findall(
                './devices/disk/source')[1].get('file').split('/')[1],
                               'disk.local')]

        for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
            FLAGS.libvirt_type = libvirt_type
            conn = libvirt_conn.LibvirtConnection(True)

            uri = conn.get_uri()
            self.assertEquals(uri, expected_uri)

            xml = conn.to_xml(instance_ref, rescue)
            tree = xml_to_tree(xml)
            for i, (check, expected_result) in enumerate(checks):
                self.assertEqual(check(tree),
                                 expected_result,
                                 '%s failed check %d' % (xml, i))

            for i, (check, expected_result) in enumerate(common_checks):
                self.assertEqual(check(tree),
                                 expected_result,
                                 '%s failed common check %d' % (xml, i))

        # This test is supposed to make sure we don't
        # override a specifically set uri
        #
        # Deliberately not just assigning this string to FLAGS.libvirt_uri and
        # checking against that later on. This way we make sure the
        # implementation doesn't fiddle around with the FLAGS.
        testuri = 'something completely different'
        FLAGS.libvirt_uri = testuri
        for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
            FLAGS.libvirt_type = libvirt_type
            conn = libvirt_conn.LibvirtConnection(True)
            uri = conn.get_uri()
            self.assertEquals(uri, testuri)
        db.instance_destroy(user_context, instance_ref['id'])
コード例 #43
0
ファイル: test_virt.py プロジェクト: yosh/nova
    def test_static_filters(self):
        instance_ref = db.instance_create(
            self.context, {"user_id": "fake", "project_id": "fake", "mac_address": "56:12:12:12:12:12"}
        )
        ip = "10.11.12.13"

        network_ref = db.project_get_network(self.context, "fake")

        fixed_ip = {"address": ip, "network_id": network_ref["id"]}

        admin_ctxt = context.get_admin_context()
        db.fixed_ip_create(admin_ctxt, fixed_ip)
        db.fixed_ip_update(admin_ctxt, ip, {"allocated": True, "instance_id": instance_ref["id"]})

        secgroup = db.security_group_create(
            admin_ctxt, {"user_id": "fake", "project_id": "fake", "name": "testgroup", "description": "test group"}
        )

        db.security_group_rule_create(
            admin_ctxt,
            {
                "parent_group_id": secgroup["id"],
                "protocol": "icmp",
                "from_port": -1,
                "to_port": -1,
                "cidr": "192.168.11.0/24",
            },
        )

        db.security_group_rule_create(
            admin_ctxt,
            {
                "parent_group_id": secgroup["id"],
                "protocol": "icmp",
                "from_port": 8,
                "to_port": -1,
                "cidr": "192.168.11.0/24",
            },
        )

        db.security_group_rule_create(
            admin_ctxt,
            {
                "parent_group_id": secgroup["id"],
                "protocol": "tcp",
                "from_port": 80,
                "to_port": 81,
                "cidr": "192.168.10.0/24",
            },
        )

        db.instance_add_security_group(admin_ctxt, instance_ref["id"], secgroup["id"])
        instance_ref = db.instance_get(admin_ctxt, instance_ref["id"])

        #        self.fw.add_instance(instance_ref)
        def fake_iptables_execute(cmd, process_input=None):
            if cmd == "sudo ip6tables-save -t filter":
                return "\n".join(self.in6_rules), None
            if cmd == "sudo iptables-save -t filter":
                return "\n".join(self.in_rules), None
            if cmd == "sudo iptables-restore":
                self.out_rules = process_input.split("\n")
                return "", ""
            if cmd == "sudo ip6tables-restore":
                self.out6_rules = process_input.split("\n")
                return "", ""

        self.fw.execute = fake_iptables_execute

        self.fw.prepare_instance_filter(instance_ref)
        self.fw.apply_instance_filter(instance_ref)

        in_rules = filter(lambda l: not l.startswith("#"), self.in_rules)
        for rule in in_rules:
            if not "nova" in rule:
                self.assertTrue(rule in self.out_rules, "Rule went missing: %s" % rule)

        instance_chain = None
        for rule in self.out_rules:
            # This is pretty crude, but it'll do for now
            if "-d 10.11.12.13 -j" in rule:
                instance_chain = rule.split(" ")[-1]
                break
        self.assertTrue(instance_chain, "The instance chain wasn't added")

        security_group_chain = None
        for rule in self.out_rules:
            # This is pretty crude, but it'll do for now
            if "-A %s -j" % instance_chain in rule:
                security_group_chain = rule.split(" ")[-1]
                break
        self.assertTrue(security_group_chain, "The security group chain wasn't added")

        self.assertTrue(
            "-A %s -p icmp -s 192.168.11.0/24 -j ACCEPT" % security_group_chain in self.out_rules,
            "ICMP acceptance rule wasn't added",
        )

        self.assertTrue(
            "-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type "
            "8 -j ACCEPT" % security_group_chain in self.out_rules,
            "ICMP Echo Request acceptance rule wasn't added",
        )

        self.assertTrue(
            "-A %s -p tcp -s 192.168.10.0/24 -m multiport "
            "--dports 80:81 -j ACCEPT" % security_group_chain in self.out_rules,
            "TCP port 80/81 acceptance rule wasn't added",
        )
        db.instance_destroy(admin_ctxt, instance_ref["id"])
コード例 #44
0
ファイル: test_virt.py プロジェクト: anotherjesse/nova
    def test_static_filters(self):
        instance_ref = db.instance_create(self.context,
                                          {'user_id': 'fake',
                                          'project_id': 'fake',
                                          'mac_address': '56:12:12:12:12:12'})
        ip = '10.11.12.13'

        network_ref = db.project_get_network(self.context,
                                             'fake')

        fixed_ip = {'address': ip,
                    'network_id': network_ref['id']}

        admin_ctxt = context.get_admin_context()
        db.fixed_ip_create(admin_ctxt, fixed_ip)
        db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
                                            'instance_id': instance_ref['id']})

        secgroup = db.security_group_create(admin_ctxt,
                                            {'user_id': 'fake',
                                             'project_id': 'fake',
                                             'name': 'testgroup',
                                             'description': 'test group'})

        db.security_group_rule_create(admin_ctxt,
                                      {'parent_group_id': secgroup['id'],
                                       'protocol': 'icmp',
                                       'from_port': -1,
                                       'to_port': -1,
                                       'cidr': '192.168.11.0/24'})

        db.security_group_rule_create(admin_ctxt,
                                      {'parent_group_id': secgroup['id'],
                                       'protocol': 'icmp',
                                       'from_port': 8,
                                       'to_port': -1,
                                       'cidr': '192.168.11.0/24'})

        db.security_group_rule_create(admin_ctxt,
                                      {'parent_group_id': secgroup['id'],
                                       'protocol': 'tcp',
                                       'from_port': 80,
                                       'to_port': 81,
                                       'cidr': '192.168.10.0/24'})

        db.instance_add_security_group(admin_ctxt, instance_ref['id'],
                                       secgroup['id'])
        instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])

#        self.fw.add_instance(instance_ref)
        def fake_iptables_execute(cmd, process_input=None):
            if cmd == 'sudo ip6tables-save -t filter':
                return '\n'.join(self.in6_rules), None
            if cmd == 'sudo iptables-save -t filter':
                return '\n'.join(self.in_rules), None
            if cmd == 'sudo iptables-restore':
                self.out_rules = process_input.split('\n')
                return '', ''
            if cmd == 'sudo ip6tables-restore':
                self.out6_rules = process_input.split('\n')
                return '', ''
        self.fw.execute = fake_iptables_execute

        self.fw.prepare_instance_filter(instance_ref)
        self.fw.apply_instance_filter(instance_ref)

        in_rules = filter(lambda l: not l.startswith('#'), self.in_rules)
        for rule in in_rules:
            if not 'nova' in rule:
                self.assertTrue(rule in self.out_rules,
                                'Rule went missing: %s' % rule)

        instance_chain = None
        for rule in self.out_rules:
            # This is pretty crude, but it'll do for now
            if '-d 10.11.12.13 -j' in rule:
                instance_chain = rule.split(' ')[-1]
                break
        self.assertTrue(instance_chain, "The instance chain wasn't added")

        security_group_chain = None
        for rule in self.out_rules:
            # This is pretty crude, but it'll do for now
            if '-A %s -j' % instance_chain in rule:
                security_group_chain = rule.split(' ')[-1]
                break
        self.assertTrue(security_group_chain,
                        "The security group chain wasn't added")

        self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -j ACCEPT' % \
                               security_group_chain in self.out_rules,
                        "ICMP acceptance rule wasn't added")

        self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type '
                        '8 -j ACCEPT' % security_group_chain in self.out_rules,
                        "ICMP Echo Request acceptance rule wasn't added")

        self.assertTrue('-A %s -p tcp -s 192.168.10.0/24 -m multiport '
                        '--dports 80:81 -j ACCEPT' % security_group_chain \
                            in self.out_rules,
                        "TCP port 80/81 acceptance rule wasn't added")