コード例 #1
0
ファイル: driver.py プロジェクト: ttx/nova
    def mounted_on_same_shared_storage(self, context, instance_ref, dest):
        """Check if the src and dest host mount same shared storage.

        At first, dest host creates temp file, and src host can see
        it if they mounts same shared storage. Then src host erase it.

        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host

        """

        src = instance_ref['host']
        dst_t = rpc.queue_get_for(context, FLAGS.compute_topic, dest)
        src_t = rpc.queue_get_for(context, FLAGS.compute_topic, src)

        filename = rpc.call(context, dst_t,
                            {"method": 'create_shared_storage_test_file'})

        try:
            # make sure existence at src host.
            ret = rpc.call(context, src_t,
                        {"method": 'check_shared_storage_test_file',
                        "args": {'filename': filename}})

        finally:
            rpc.cast(context, dst_t,
                    {"method": 'cleanup_shared_storage_test_file',
                    "args": {'filename': filename}})

        return ret
コード例 #2
0
ファイル: test_scheduler.py プロジェクト: kiall/nova
    def test_block_migration_dest_check_service_lack_disk(self):
        """Confirms exception raises when dest doesn't have enough disk."""

        self.mox.StubOutWithMock(db, 'instance_get')
        self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
        self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
        self.mox.StubOutWithMock(utils, 'service_is_up')
        self.mox.StubOutWithMock(self.driver,
                'assert_compute_node_has_enough_memory')
        self.mox.StubOutWithMock(self.driver, '_get_compute_info')
        self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
        self.mox.StubOutWithMock(rpc, 'queue_get_for')
        self.mox.StubOutWithMock(rpc, 'call')

        dest = 'fake_host2'
        block_migration = True
        disk_over_commit = True
        instance = self._live_migration_instance()
        db.instance_get(self.context, instance['id']).AndReturn(instance)

        self.driver._live_migration_src_check(self.context, instance)
        db.service_get_all_compute_by_host(self.context,
                dest).AndReturn(['fake_service3'])
        utils.service_is_up('fake_service3').AndReturn(True)

        # Enough memory
        self.driver.assert_compute_node_has_enough_memory(self.context,
                instance, dest)

        # Not enough disk
        self.driver._get_compute_info(self.context, dest,
                'disk_available_least').AndReturn(1023)
        rpc.queue_get_for(self.context, FLAGS.compute_topic,
                instance['host']).AndReturn('src_queue')
        instance_disk_info_msg = {
            'method': 'get_instance_disk_info',
            'args': {
                'instance_name': instance['name'],
            },
            'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION,
        }
        instance_disk_info = [{'disk_size': 1024 * (1024 ** 3)}]
        rpc.call(self.context,
                 'src_queue',
                 instance_disk_info_msg,
                 None).AndReturn(jsonutils.dumps(instance_disk_info))

        self.mox.ReplayAll()
        self.assertRaises(exception.MigrationError,
                self.driver.schedule_live_migration, self.context,
                instance_id=instance['id'], dest=dest,
                block_migration=block_migration,
                disk_over_commit=disk_over_commit)
コード例 #3
0
    def test_block_migration_dest_check_service_lack_disk(self):
        """Confirms exception raises when dest doesn't have enough disk."""

        self.mox.StubOutWithMock(db, 'instance_get')
        self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
        self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
        self.mox.StubOutWithMock(utils, 'service_is_up')
        self.mox.StubOutWithMock(self.driver,
                'assert_compute_node_has_enough_memory')
        self.mox.StubOutWithMock(self.driver, '_get_compute_info')
        self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
        self.mox.StubOutWithMock(rpc, 'queue_get_for')
        self.mox.StubOutWithMock(rpc, 'call')

        dest = 'fake_host2'
        block_migration = True
        disk_over_commit = True
        instance = self._live_migration_instance()
        db.instance_get(self.context, instance['id']).AndReturn(instance)

        self.driver._live_migration_src_check(self.context, instance)
        db.service_get_all_compute_by_host(self.context,
                dest).AndReturn(['fake_service3'])
        utils.service_is_up('fake_service3').AndReturn(True)

        # Enough memory
        self.driver.assert_compute_node_has_enough_memory(self.context,
                instance, dest)

        # Not enough disk
        self.driver._get_compute_info(self.context, dest,
                'disk_available_least').AndReturn(1023)
        rpc.queue_get_for(self.context, FLAGS.compute_topic,
                instance['host']).AndReturn('src_queue')
        instance_disk_info_msg = {
            'method': 'get_instance_disk_info',
            'args': {
                'instance_name': instance['name'],
            },
            'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION,
        }
        instance_disk_info = [{'disk_size': 1024 * (1024 ** 3)}]
        rpc.call(self.context,
                 'src_queue',
                 instance_disk_info_msg,
                 None).AndReturn(jsonutils.dumps(instance_disk_info))

        self.mox.ReplayAll()
        self.assertRaises(exception.MigrationError,
                self.driver.schedule_live_migration, self.context,
                instance_id=instance['id'], dest=dest,
                block_migration=block_migration,
                disk_over_commit=disk_over_commit)
コード例 #4
0
    def delete(self, context, volume):
        volume_id = volume['id']
        if not volume['host']:
            # NOTE(vish): scheduling failed, so delete it
            self.db.volume_destroy(context, volume_id)
            return
        if volume['status'] not in ["available", "error"]:
            msg = _("Volume status must be available or error")
            raise exception.InvalidVolume(reason=msg)

        snapshots = self.db.snapshot_get_all_for_volume(context, volume_id)
        if len(snapshots):
            msg = _("Volume still has %d dependent snapshots") % len(snapshots)
            raise exception.InvalidVolume(reason=msg)

        now = timeutils.utcnow()
        self.db.volume_update(context, volume_id, {
            'status': 'deleting',
            'terminated_at': now
        })
        host = volume['host']
        rpc.cast(context, rpc.queue_get_for(context, FLAGS.volume_topic, host),
                 {
                     "method": "delete_volume",
                     "args": {
                         "volume_id": volume_id
                     }
                 })
コード例 #5
0
 def delete_console(self, context, instance_id, console_uuid):
     instance_id = self._translate_uuid_if_necessary(context, instance_uuid)
     console = self.db.console_get(context, console_id, instance_uuid)
     topic = rpc.queue_get_for(context, FLAGS.console_topic,
                               pool['host'])
     rpcapi = console_rpcapi.ConsoleAPI(topic=topic)
     rpcapi.remove_console(context, console['id'])
コード例 #6
0
def cast_to_network_host(context, host, method, update_db=False, **kwargs):
    """Cast request to a network host queue"""

    rpc.cast(context,
             rpc.queue_get_for(context, 'network', host),
             {"method": method, "args": kwargs})
    LOG.debug(_("Casted '%(method)s' to network '%(host)s'") % locals())
コード例 #7
0
ファイル: api.py プロジェクト: MirantisDellCrowbar/nova
 def initialize_connection(self, context, volume, connector):
     host = volume['host']
     queue = rpc.queue_get_for(context, FLAGS.volume_topic, host)
     return rpc.call(context, queue,
                     {"method": "initialize_connection",
                      "args": {"volume_id": volume['id'],
                               "connector": connector}})
コード例 #8
0
ファイル: driver.py プロジェクト: ttx/nova
def cast_to_network_host(context, host, method, update_db=False, **kwargs):
    """Cast request to a network host queue"""

    rpc.cast(context,
             rpc.queue_get_for(context, 'network', host),
             {"method": method, "args": kwargs})
    LOG.debug(_("Casted '%(method)s' to network '%(host)s'") % locals())
コード例 #9
0
ファイル: api.py プロジェクト: MirantisDellCrowbar/nova
    def _create_snapshot(self, context, volume, name, description,
                         force=False):
        check_policy(context, 'create_snapshot', volume)

        if ((not force) and (volume['status'] != "available")):
            msg = _("must be available")
            raise exception.InvalidVolume(reason=msg)

        options = {
            'volume_id': volume['id'],
            'user_id': context.user_id,
            'project_id': context.project_id,
            'status': "creating",
            'progress': '0%',
            'volume_size': volume['size'],
            'display_name': name,
            'display_description': description}

        snapshot = self.db.snapshot_create(context, options)
        host = volume['host']
        rpc.cast(context,
                 rpc.queue_get_for(context, FLAGS.volume_topic, host),
                 {"method": "create_snapshot",
                  "args": {"volume_id": volume['id'],
                           "snapshot_id": snapshot['id']}})
        return snapshot
コード例 #10
0
    def _create_snapshot(self,
                         context,
                         volume,
                         name,
                         description,
                         force=False):
        check_policy(context, 'create_snapshot', volume)

        if ((not force) and (volume['status'] != "available")):
            msg = _("must be available")
            raise exception.InvalidVolume(reason=msg)

        options = {
            'volume_id': volume['id'],
            'user_id': context.user_id,
            'project_id': context.project_id,
            'status': "creating",
            'progress': '0%',
            'volume_size': volume['size'],
            'display_name': name,
            'display_description': description
        }

        snapshot = self.db.snapshot_create(context, options)
        host = volume['host']
        rpc.cast(
            context, rpc.queue_get_for(context, FLAGS.volume_topic, host), {
                "method": "create_snapshot",
                "args": {
                    "volume_id": volume['id'],
                    "snapshot_id": snapshot['id']
                }
            })
        return snapshot
コード例 #11
0
ファイル: vmrc_manager.py プロジェクト: pulchart/nova
 def get_pool_for_instance_host(self, context, instance_host):
     """Gets console pool info for the instance."""
     context = context.elevated()
     console_type = self.driver.console_type
     try:
         pool = self.db.console_pool_get_by_host_type(context,
                                                      instance_host,
                                                      self.host,
                                                      console_type)
     except exception.NotFound:
         pool_info = rpc.call(context,
                              rpc.queue_get_for(context,
                                                FLAGS.compute_topic,
                                                instance_host),
                              {'method': 'get_console_pool_info',
                               'args': {'console_type': console_type}})
         pool_info['password'] = self.driver.fix_pool_password(
                                                 pool_info['password'])
         pool_info['host'] = self.host
         # ESX Address or Proxy Address
         public_host_name = pool_info['address']
         if FLAGS.console_public_hostname:
             public_host_name = FLAGS.console_public_hostname
         pool_info['public_hostname'] = public_host_name
         pool_info['console_type'] = console_type
         pool_info['compute_host'] = instance_host
         pool = self.db.console_pool_create(context, pool_info)
     return pool
コード例 #12
0
    def test_cast_to_network_host(self):
        host = 'fake_host1'
        method = 'fake_method'
        fake_kwargs = {'extra_arg': 'meow'}
        queue = 'fake_queue'

        self.mox.StubOutWithMock(rpc, 'queue_get_for')
        self.mox.StubOutWithMock(rpc, 'cast')

        rpc.queue_get_for(self.context, 'network', host).AndReturn(queue)
        rpc.cast(self.context, queue,
                {'method': method,
                 'args': fake_kwargs})

        self.mox.ReplayAll()
        driver.cast_to_network_host(self.context, host, method,
                update_db=True, **fake_kwargs)
コード例 #13
0
ファイル: test_scheduler.py プロジェクト: wja300/nova
    def test_cast_to_volume_host_update_db_without_volume_id(self):
        host = 'fake_host1'
        method = 'fake_method'
        fake_kwargs = {'extra_arg': 'meow'}
        queue = 'fake_queue'

        self.mox.StubOutWithMock(rpc, 'queue_get_for')
        self.mox.StubOutWithMock(rpc, 'cast')

        rpc.queue_get_for(self.context, 'volume', host).AndReturn(queue)
        rpc.cast(self.context, queue,
                {'method': method,
                 'args': fake_kwargs})

        self.mox.ReplayAll()
        driver.cast_to_volume_host(self.context, host, method,
                update_db=True, **fake_kwargs)
コード例 #14
0
ファイル: api.py プロジェクト: MirantisDellCrowbar/nova
 def attach(self, context, volume, instance_uuid, mountpoint):
     host = volume['host']
     queue = rpc.queue_get_for(context, FLAGS.volume_topic, host)
     return rpc.call(context, queue,
                     {"method": "attach_volume",
                      "args": {"volume_id": volume['id'],
                               "instance_uuid": instance_uuid,
                               "mountpoint": mountpoint}})
コード例 #15
0
ファイル: api.py プロジェクト: MirantisDellCrowbar/nova
 def terminate_connection(self, context, volume, connector):
     self.unreserve_volume(context, volume)
     host = volume['host']
     queue = rpc.queue_get_for(context, FLAGS.volume_topic, host)
     return rpc.call(context, queue,
                     {"method": "terminate_connection",
                      "args": {"volume_id": volume['id'],
                               "connector": connector}})
コード例 #16
0
    def test_cast_to_host_unknown_topic(self):
        host = 'fake_host1'
        method = 'fake_method'
        fake_kwargs = {'extra_arg': 'meow'}
        topic = 'unknown'
        queue = 'fake_queue'

        self.mox.StubOutWithMock(rpc, 'queue_get_for')
        self.mox.StubOutWithMock(rpc, 'cast')

        rpc.queue_get_for(self.context, topic, host).AndReturn(queue)
        rpc.cast(self.context, queue,
                {'method': method,
                 'args': fake_kwargs})

        self.mox.ReplayAll()
        driver.cast_to_host(self.context, topic, host, method,
                update_db=False, **fake_kwargs)
コード例 #17
0
ファイル: test_scheduler.py プロジェクト: wja300/nova
    def test_cast_to_host_unknown_topic(self):
        host = 'fake_host1'
        method = 'fake_method'
        fake_kwargs = {'extra_arg': 'meow'}
        topic = 'unknown'
        queue = 'fake_queue'

        self.mox.StubOutWithMock(rpc, 'queue_get_for')
        self.mox.StubOutWithMock(rpc, 'cast')

        rpc.queue_get_for(self.context, topic, host).AndReturn(queue)
        rpc.cast(self.context, queue,
                {'method': method,
                 'args': fake_kwargs})

        self.mox.ReplayAll()
        driver.cast_to_host(self.context, topic, host, method,
                update_db=False, **fake_kwargs)
コード例 #18
0
 def detach(self, context, volume):
     host = volume['host']
     queue = rpc.queue_get_for(context, FLAGS.volume_topic, host)
     return rpc.call(context, queue, {
         "method": "detach_volume",
         "args": {
             "volume_id": volume['id']
         }
     })
コード例 #19
0
ファイル: test_scheduler.py プロジェクト: wja300/nova
    def test_live_migration_dest_host_incompatable_cpu_raises(self):
        self.mox.StubOutWithMock(db, 'instance_get')
        self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
        self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
        self.mox.StubOutWithMock(rpc, 'queue_get_for')
        self.mox.StubOutWithMock(rpc, 'call')
        self.mox.StubOutWithMock(rpc, 'cast')
        self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')

        dest = 'fake_host2'
        block_migration = False
        disk_over_commit = False
        instance = self._live_migration_instance()
        db.instance_get(self.context, instance['id']).AndReturn(instance)

        self.driver._live_migration_src_check(self.context, instance)
        self.driver._live_migration_dest_check(self.context, instance,
                dest, block_migration, disk_over_commit)

        self._check_shared_storage(dest, instance, True)

        db.service_get_all_compute_by_host(self.context, dest).AndReturn(
                [{'compute_node': [{'hypervisor_type': 'xen',
                                    'hypervisor_version': 1}]}])
        db.service_get_all_compute_by_host(self.context,
            instance['host']).AndReturn(
                    [{'compute_node': [{'hypervisor_type': 'xen',
                                        'hypervisor_version': 1,
                                        'cpu_info': 'fake_cpu_info'}]}])
        rpc.queue_get_for(self.context, FLAGS.compute_topic,
                dest).AndReturn('dest_queue')
        rpc.call(self.context, 'dest_queue',
                {'method': 'compare_cpu',
                 'args': {'cpu_info': 'fake_cpu_info'},
                 'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION}, None
                ).AndRaise(rpc_common.RemoteError())

        self.mox.ReplayAll()
        self.assertRaises(rpc_common.RemoteError,
                self.driver.schedule_live_migration, self.context,
                instance_id=instance['id'], dest=dest,
                block_migration=block_migration)
コード例 #20
0
    def test_live_migration_dest_host_incompatable_cpu_raises(self):
        self.mox.StubOutWithMock(db, 'instance_get')
        self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
        self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
        self.mox.StubOutWithMock(rpc, 'queue_get_for')
        self.mox.StubOutWithMock(rpc, 'call')
        self.mox.StubOutWithMock(rpc, 'cast')
        self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')

        dest = 'fake_host2'
        block_migration = False
        disk_over_commit = False
        instance = self._live_migration_instance()
        db.instance_get(self.context, instance['id']).AndReturn(instance)

        self.driver._live_migration_src_check(self.context, instance)
        self.driver._live_migration_dest_check(self.context, instance,
                dest, block_migration, disk_over_commit)

        self._check_shared_storage(dest, instance, True)

        db.service_get_all_compute_by_host(self.context, dest).AndReturn(
                [{'compute_node': [{'hypervisor_type': 'xen',
                                    'hypervisor_version': 1}]}])
        db.service_get_all_compute_by_host(self.context,
            instance['host']).AndReturn(
                    [{'compute_node': [{'hypervisor_type': 'xen',
                                        'hypervisor_version': 1,
                                        'cpu_info': 'fake_cpu_info'}]}])
        rpc.queue_get_for(self.context, FLAGS.compute_topic,
                dest).AndReturn('dest_queue')
        rpc.call(self.context, 'dest_queue',
                {'method': 'compare_cpu',
                 'args': {'cpu_info': 'fake_cpu_info'},
                 'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION}, None
                ).AndRaise(rpc_common.RemoteError())

        self.mox.ReplayAll()
        self.assertRaises(rpc_common.RemoteError,
                self.driver.schedule_live_migration, self.context,
                instance_id=instance['id'], dest=dest,
                block_migration=block_migration)
コード例 #21
0
    def assert_compute_node_has_enough_disk(self, context, instance_ref, dest,
                                            disk_over_commit):
        """Checks if destination host has enough disk for block migration.

        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host
        :param disk_over_commit: if True, consider real(not virtual)
                                 disk size.

        """

        # Libvirt supports qcow2 disk format,which is usually compressed
        # on compute nodes.
        # Real disk image (compressed) may enlarged to "virtual disk size",
        # that is specified as the maximum disk size.
        # (See qemu-img -f path-to-disk)
        # Scheduler recognizes destination host still has enough disk space
        # if real disk size < available disk size
        # if disk_over_commit is True,
        #  otherwise virtual disk size < available disk size.

        # Getting total available disk of host
        available_gb = self._get_compute_info(context, dest,
                                              'disk_available_least')
        available = available_gb * (1024**3)

        # Getting necessary disk size
        topic = rpc.queue_get_for(context, FLAGS.compute_topic,
                                  instance_ref['host'])
        ret = rpc.call(
            context, topic, {
                "method": 'get_instance_disk_info',
                "args": {
                    'instance_name': instance_ref['name']
                }
            })
        disk_infos = jsonutils.loads(ret)

        necessary = 0
        if disk_over_commit:
            for info in disk_infos:
                necessary += int(info['disk_size'])
        else:
            for info in disk_infos:
                necessary += int(info['virt_disk_size'])

        # Check that available disk > necessary disk
        if (available - necessary) < 0:
            instance_uuid = instance_ref['uuid']
            reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
                       "Lack of disk(host:%(available)s "
                       "<= instance:%(necessary)s)")
            raise exception.MigrationError(reason=reason % locals())
コード例 #22
0
 def initialize_connection(self, context, volume, connector):
     host = volume['host']
     queue = rpc.queue_get_for(context, FLAGS.volume_topic, host)
     return rpc.call(
         context, queue, {
             "method": "initialize_connection",
             "args": {
                 "volume_id": volume['id'],
                 "connector": connector
             }
         })
コード例 #23
0
ファイル: manager.py プロジェクト: kiall/nova
    def delete_network(self, context, fixed_range, uuid):
        """Lookup network by uuid, delete both the IPAM
           subnet and the corresponding Quantum network.

           The fixed_range parameter is kept here for interface compatibility
           but is not used.
        """
        net_ref = db.network_get_by_uuid(context.elevated(), uuid)
        project_id = net_ref['project_id']
        q_tenant_id = project_id or FLAGS.quantum_default_tenant_id
        net_uuid = net_ref['uuid']

        # Check for any attached ports on the network and fail the deletion if
        # there is anything but the gateway port attached.  If it is only the
        # gateway port, unattach and delete it.
        ports = self.q_conn.get_attached_ports(q_tenant_id, net_uuid)
        num_ports = len(ports)
        gw_interface_id = self.driver.get_dev(net_ref)
        gw_port_uuid = None
        if gw_interface_id is not None:
            gw_port_uuid = self.q_conn.get_port_by_attachment(
                q_tenant_id, net_uuid, gw_interface_id)

        if gw_port_uuid:
            num_ports -= 1

        if num_ports > 0:
            raise exception.NetworkBusy(network=net_uuid)

        # only delete gw ports if we are going to finish deleting network
        if gw_port_uuid:
            self.q_conn.detach_and_delete_port(q_tenant_id, net_uuid,
                                               gw_port_uuid)
            self.l3driver.remove_gateway(net_ref)

        # Now we can delete the network
        self.q_conn.delete_network(q_tenant_id, net_uuid)
        LOG.debug("Deleting network %s for tenant: %s" %
                  (net_uuid, q_tenant_id))
        self.ipam.delete_subnets_by_net_id(context, net_uuid, project_id)
        # Get rid of dnsmasq
        if FLAGS.quantum_use_dhcp:
            if net_ref['host'] == self.host:
                self.kill_dhcp(net_ref)
            else:
                topic = rpc.queue_get_for(context, FLAGS.network_topic,
                                          net_ref['host'])

                rpc.call(context, topic, {
                    'method': 'kill_dhcp',
                    'args': {
                        'net_ref': net_ref
                    }
                })
コード例 #24
0
 def attach(self, context, volume, instance_uuid, mountpoint):
     host = volume['host']
     queue = rpc.queue_get_for(context, FLAGS.volume_topic, host)
     return rpc.call(
         context, queue, {
             "method": "attach_volume",
             "args": {
                 "volume_id": volume['id'],
                 "instance_uuid": instance_uuid,
                 "mountpoint": mountpoint
             }
         })
コード例 #25
0
ファイル: api.py プロジェクト: MirantisDellCrowbar/nova
 def delete_snapshot(self, context, snapshot):
     if snapshot['status'] not in ["available", "error"]:
         msg = _("Volume Snapshot status must be available or error")
         raise exception.InvalidVolume(reason=msg)
     self.db.snapshot_update(context, snapshot['id'],
                             {'status': 'deleting'})
     volume = self.db.volume_get(context, snapshot['volume_id'])
     host = volume['host']
     rpc.cast(context,
              rpc.queue_get_for(context, FLAGS.volume_topic, host),
              {"method": "delete_snapshot",
               "args": {"snapshot_id": snapshot['id']}})
コード例 #26
0
 def terminate_connection(self, context, volume, connector):
     self.unreserve_volume(context, volume)
     host = volume['host']
     queue = rpc.queue_get_for(context, FLAGS.volume_topic, host)
     return rpc.call(
         context, queue, {
             "method": "terminate_connection",
             "args": {
                 "volume_id": volume['id'],
                 "connector": connector
             }
         })
コード例 #27
0
ファイル: manager.py プロジェクト: MirantisDellCrowbar/nova
    def delete_network(self, context, fixed_range, uuid):
        """Lookup network by uuid, delete both the IPAM
           subnet and the corresponding Quantum network.

           The fixed_range parameter is kept here for interface compatibility
           but is not used.
        """
        net_ref = db.network_get_by_uuid(context.elevated(), uuid)
        project_id = net_ref['project_id']
        q_tenant_id = project_id or FLAGS.quantum_default_tenant_id
        net_uuid = net_ref['uuid']

        # Check for any attached ports on the network and fail the deletion if
        # there is anything but the gateway port attached.  If it is only the
        # gateway port, unattach and delete it.
        ports = self.q_conn.get_attached_ports(q_tenant_id, net_uuid)
        num_ports = len(ports)
        gw_interface_id = self.driver.get_dev(net_ref)
        gw_port_uuid = None
        if gw_interface_id is not None:
            gw_port_uuid = self.q_conn.get_port_by_attachment(q_tenant_id,
                                        net_uuid, gw_interface_id)

        if gw_port_uuid:
            num_ports -= 1

        if num_ports > 0:
            raise exception.NetworkBusy(network=net_uuid)

        # only delete gw ports if we are going to finish deleting network
        if gw_port_uuid:
            self.q_conn.detach_and_delete_port(q_tenant_id,
                                                   net_uuid,
                                                   gw_port_uuid)
            self.l3driver.remove_gateway(net_ref)

        # Now we can delete the network
        self.q_conn.delete_network(q_tenant_id, net_uuid)
        LOG.debug("Deleting network %s for tenant: %s" %
                  (net_uuid, q_tenant_id))
        self.ipam.delete_subnets_by_net_id(context, net_uuid, project_id)
        # Get rid of dnsmasq
        if FLAGS.quantum_use_dhcp:
            if net_ref['host'] == self.host:
                self.kill_dhcp(net_ref)
            else:
                topic = rpc.queue_get_for(context,
                        FLAGS.network_topic,
                        net_ref['host'])

                rpc.call(context, topic, {'method': 'kill_dhcp',
                        'args': {'net_ref': net_ref}})
コード例 #28
0
    def mounted_on_same_shared_storage(self, context, instance_ref, dest):
        """Check if the src and dest host mount same shared storage.

        At first, dest host creates temp file, and src host can see
        it if they mounts same shared storage. Then src host erase it.

        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host

        """

        src = instance_ref['host']
        dst_t = rpc.queue_get_for(context, FLAGS.compute_topic, dest)
        src_t = rpc.queue_get_for(context, FLAGS.compute_topic, src)

        filename = rpc.call(context, dst_t,
                            {"method": 'create_shared_storage_test_file'})

        try:
            # make sure existence at src host.
            ret = rpc.call(
                context, src_t, {
                    "method": 'check_shared_storage_test_file',
                    "args": {
                        'filename': filename
                    }
                })

        finally:
            rpc.cast(
                context, dst_t, {
                    "method": 'cleanup_shared_storage_test_file',
                    "args": {
                        'filename': filename
                    }
                })

        return ret
コード例 #29
0
def cast_to_volume_host(context, host, method, update_db=True, **kwargs):
    """Cast request to a volume host queue"""

    if update_db:
        volume_id = kwargs.get('volume_id', None)
        if volume_id is not None:
            now = utils.utcnow()
            db.volume_update(context, volume_id,
                    {'host': host, 'scheduled_at': now})
    rpc.cast(context,
             rpc.queue_get_for(context, 'volume', host),
             {"method": method, "args": kwargs})
    LOG.debug(_("Casted '%(method)s' to volume '%(host)s'") % locals())
コード例 #30
0
ファイル: driver.py プロジェクト: ttx/nova
def cast_to_volume_host(context, host, method, update_db=True, **kwargs):
    """Cast request to a volume host queue"""

    if update_db:
        volume_id = kwargs.get('volume_id', None)
        if volume_id is not None:
            now = utils.utcnow()
            db.volume_update(context, volume_id,
                    {'host': host, 'scheduled_at': now})
    rpc.cast(context,
             rpc.queue_get_for(context, 'volume', host),
             {"method": method, "args": kwargs})
    LOG.debug(_("Casted '%(method)s' to volume '%(host)s'") % locals())
コード例 #31
0
ファイル: test_scheduler.py プロジェクト: wja300/nova
    def test_cast_to_volume_host_update_db_with_volume_id(self):
        host = 'fake_host1'
        method = 'fake_method'
        fake_kwargs = {'volume_id': 31337,
                       'extra_arg': 'meow'}
        queue = 'fake_queue'

        self.mox.StubOutWithMock(utils, 'utcnow')
        self.mox.StubOutWithMock(db, 'volume_update')
        self.mox.StubOutWithMock(rpc, 'queue_get_for')
        self.mox.StubOutWithMock(rpc, 'cast')

        utils.utcnow().AndReturn('fake-now')
        db.volume_update(self.context, 31337,
                {'host': host, 'scheduled_at': 'fake-now'})
        rpc.queue_get_for(self.context, 'volume', host).AndReturn(queue)
        rpc.cast(self.context, queue,
                {'method': method,
                 'args': fake_kwargs})

        self.mox.ReplayAll()
        driver.cast_to_volume_host(self.context, host, method,
                update_db=True, **fake_kwargs)
コード例 #32
0
    def test_cast_to_volume_host_update_db_with_volume_id(self):
        host = 'fake_host1'
        method = 'fake_method'
        fake_kwargs = {'volume_id': 31337,
                       'extra_arg': 'meow'}
        queue = 'fake_queue'

        self.mox.StubOutWithMock(utils, 'utcnow')
        self.mox.StubOutWithMock(db, 'volume_update')
        self.mox.StubOutWithMock(rpc, 'queue_get_for')
        self.mox.StubOutWithMock(rpc, 'cast')

        utils.utcnow().AndReturn('fake-now')
        db.volume_update(self.context, 31337,
                {'host': host, 'scheduled_at': 'fake-now'})
        rpc.queue_get_for(self.context, 'volume', host).AndReturn(queue)
        rpc.cast(self.context, queue,
                {'method': method,
                 'args': fake_kwargs})

        self.mox.ReplayAll()
        driver.cast_to_volume_host(self.context, host, method,
                update_db=True, **fake_kwargs)
コード例 #33
0
ファイル: driver.py プロジェクト: ttx/nova
    def assert_compute_node_has_enough_disk(self, context, instance_ref, dest,
                                            disk_over_commit):
        """Checks if destination host has enough disk for block migration.

        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host
        :param disk_over_commit: if True, consider real(not virtual)
                                 disk size.

        """

        # Libvirt supports qcow2 disk format,which is usually compressed
        # on compute nodes.
        # Real disk image (compressed) may enlarged to "virtual disk size",
        # that is specified as the maximum disk size.
        # (See qemu-img -f path-to-disk)
        # Scheduler recognizes destination host still has enough disk space
        # if real disk size < available disk size
        # if disk_over_commit is True,
        #  otherwise virtual disk size < available disk size.

        # Getting total available disk of host
        available_gb = self._get_compute_info(context,
                                              dest, 'disk_available_least')
        available = available_gb * (1024 ** 3)

        # Getting necessary disk size
        topic = rpc.queue_get_for(context, FLAGS.compute_topic,
                                          instance_ref['host'])
        ret = rpc.call(context, topic,
                       {"method": 'get_instance_disk_info',
                        "args": {'instance_name': instance_ref['name']}})
        disk_infos = jsonutils.loads(ret)

        necessary = 0
        if disk_over_commit:
            for info in disk_infos:
                necessary += int(info['disk_size'])
        else:
            for info in disk_infos:
                necessary += int(info['virt_disk_size'])

        # Check that available disk > necessary disk
        if (available - necessary) < 0:
            instance_uuid = instance_ref['uuid']
            reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
                       "Lack of disk(host:%(available)s "
                       "<= instance:%(necessary)s)")
            raise exception.MigrationError(reason=reason % locals())
コード例 #34
0
ファイル: driver.py プロジェクト: ttx/nova
def cast_to_compute_host(context, host, method, update_db=True, **kwargs):
    """Cast request to a compute host queue"""

    if update_db:
        # fall back on the id if the uuid is not present
        instance_id = kwargs.get('instance_id', None)
        instance_uuid = kwargs.get('instance_uuid', instance_id)
        if instance_uuid is not None:
            now = utils.utcnow()
            db.instance_update(context, instance_uuid,
                    {'host': host, 'scheduled_at': now})
    rpc.cast(context,
             rpc.queue_get_for(context, 'compute', host),
             {"method": method, "args": kwargs})
    LOG.debug(_("Casted '%(method)s' to compute '%(host)s'") % locals())
コード例 #35
0
def cast_to_compute_host(context, host, method, update_db=True, **kwargs):
    """Cast request to a compute host queue"""

    if update_db:
        # fall back on the id if the uuid is not present
        instance_id = kwargs.get('instance_id', None)
        instance_uuid = kwargs.get('instance_uuid', instance_id)
        if instance_uuid is not None:
            now = utils.utcnow()
            db.instance_update(context, instance_uuid,
                    {'host': host, 'scheduled_at': now})
    rpc.cast(context,
             rpc.queue_get_for(context, 'compute', host),
             {"method": method, "args": kwargs})
    LOG.debug(_("Casted '%(method)s' to compute '%(host)s'") % locals())
コード例 #36
0
 def delete_snapshot(self, context, snapshot):
     if snapshot['status'] not in ["available", "error"]:
         msg = _("Volume Snapshot status must be available or error")
         raise exception.InvalidVolume(reason=msg)
     self.db.snapshot_update(context, snapshot['id'],
                             {'status': 'deleting'})
     volume = self.db.volume_get(context, snapshot['volume_id'])
     host = volume['host']
     rpc.cast(context, rpc.queue_get_for(context, FLAGS.volume_topic, host),
              {
                  "method": "delete_snapshot",
                  "args": {
                      "snapshot_id": snapshot['id']
                  }
              })
コード例 #37
0
ファイル: pool.py プロジェクト: kiall/nova
def forward_request(context, request_type, master, aggregate_id,
                    slave_compute, slave_address, slave_uuid):
    """Casts add/remove requests to the pool master."""
    # replace the address from the xenapi connection url
    # because this might be 169.254.0.1, i.e. xenapi
    # NOTE: password in clear is not great, but it'll do for now
    sender_url = swap_xapi_host(FLAGS.xenapi_connection_url, slave_address)
    rpc.cast(context, rpc.queue_get_for(context, FLAGS.compute_topic, master),
             {"method": request_type,
              "args": {"aggregate_id": aggregate_id,
                       "host": slave_compute,
                       "url": sender_url,
                       "user": FLAGS.xenapi_connection_username,
                       "passwd": FLAGS.xenapi_connection_password,
                       "compute_uuid": vm_utils.get_this_vm_uuid(),
                       "xenhost_uuid": slave_uuid, },
             })
コード例 #38
0
def cast_to_host(context, topic, host, method, update_db=True, **kwargs):
    """Generic cast to host"""

    topic_mapping = {
            "compute": cast_to_compute_host,
            "volume": cast_to_volume_host,
            'network': cast_to_network_host}

    func = topic_mapping.get(topic)
    if func:
        func(context, host, method, update_db=update_db, **kwargs)
    else:
        rpc.cast(context,
                 rpc.queue_get_for(context, topic, host),
                 {"method": method, "args": kwargs})
        LOG.debug(_("Casted '%(method)s' to %(topic)s '%(host)s'")
                % locals())
コード例 #39
0
ファイル: driver.py プロジェクト: ttx/nova
def cast_to_host(context, topic, host, method, update_db=True, **kwargs):
    """Generic cast to host"""

    topic_mapping = {
            "compute": cast_to_compute_host,
            "volume": cast_to_volume_host,
            'network': cast_to_network_host}

    func = topic_mapping.get(topic)
    if func:
        func(context, host, method, update_db=update_db, **kwargs)
    else:
        rpc.cast(context,
                 rpc.queue_get_for(context, topic, host),
                 {"method": method, "args": kwargs})
        LOG.debug(_("Casted '%(method)s' to %(topic)s '%(host)s'")
                % locals())
コード例 #40
0
ファイル: rpcapi.py プロジェクト: pulchart/nova
    def _compute_topic(self, ctxt, host, instance):
        '''Get the topic to use for a message.

        :param ctxt: request context
        :param host: explicit host to send the message to.
        :param instance: If an explicit host was not specified, use
                         instance['host']

        :returns: A topic string
        '''
        if not host:
            if not instance:
                raise exception.NovaException(_('No compute host specified'))
            host = instance['host']
        if not host:
            raise exception.NovaException(_('Unable to find host for '
                                            'Instance %s') % instance['uuid'])
        return rpc.queue_get_for(ctxt, self.topic, host)
コード例 #41
0
    def _compute_topic(self, ctxt, host, instance):
        '''Get the topic to use for a message.

        :param ctxt: request context
        :param host: explicit host to send the message to.
        :param instance: If an explicit host was not specified, use
                         instance['host']

        :returns: A topic string
        '''
        if not host:
            if not instance:
                raise exception.NovaException(_('No compute host specified'))
            host = instance['host']
        if not host:
            raise exception.NovaException(
                _('Unable to find host for '
                  'Instance %s') % instance['uuid'])
        return rpc.queue_get_for(ctxt, self.topic, host)
コード例 #42
0
ファイル: manager.py プロジェクト: kiall/nova
    def deallocate_for_instance(self, context, **kwargs):
        """Called when a VM is terminated.  Loop through each virtual
           interface in the Nova DB and remove the Quantum port and
           clear the IP allocation using the IPAM.  Finally, remove
           the virtual interfaces from the Nova DB.
        """
        instance_id = kwargs.get('instance_id')
        project_id = kwargs.pop('project_id', None)

        admin_context = context.elevated()
        vifs = db.virtual_interface_get_by_instance(admin_context, instance_id)

        for vif in vifs:
            network = db.network_get(admin_context, vif['network_id'])

            self.deallocate_port(vif['uuid'], network['uuid'], project_id,
                                 instance_id)

            ipam_tenant_id = self.deallocate_ip_address(
                context, network['uuid'], project_id, vif, instance_id)

            if FLAGS.quantum_use_dhcp:
                if network['host'] == self.host:
                    self.update_dhcp(context, ipam_tenant_id, network, vif,
                                     project_id)
                else:
                    topic = rpc.queue_get_for(context, FLAGS.network_topic,
                                              network['host'])
                    rpc.call(
                        context, topic, {
                            'method': 'update_dhcp',
                            'args': {
                                'ipam_tenant_id': ipam_tenant_id,
                                'network_ref': network,
                                'vif_ref': vif,
                                'project_id': network['project_id']
                            }
                        })

            db.virtual_interface_delete(admin_context, vif['id'])
コード例 #43
0
ファイル: api.py プロジェクト: MirantisDellCrowbar/nova
    def delete(self, context, volume):
        volume_id = volume['id']
        if not volume['host']:
            # NOTE(vish): scheduling failed, so delete it
            self.db.volume_destroy(context, volume_id)
            return
        if volume['status'] not in ["available", "error"]:
            msg = _("Volume status must be available or error")
            raise exception.InvalidVolume(reason=msg)

        snapshots = self.db.snapshot_get_all_for_volume(context, volume_id)
        if len(snapshots):
            msg = _("Volume still has %d dependent snapshots") % len(snapshots)
            raise exception.InvalidVolume(reason=msg)

        now = utils.utcnow()
        self.db.volume_update(context, volume_id, {'status': 'deleting',
                                                   'terminated_at': now})
        host = volume['host']
        rpc.cast(context,
                 rpc.queue_get_for(context, FLAGS.volume_topic, host),
                 {"method": "delete_volume",
                  "args": {"volume_id": volume_id}})
コード例 #44
0
ファイル: manager.py プロジェクト: pulchart/nova
 def get_pool_for_instance_host(self, context, instance_host):
     context = context.elevated()
     console_type = self.driver.console_type
     try:
         pool = self.db.console_pool_get_by_host_type(context, instance_host, self.host, console_type)
     except exception.NotFound:
         # NOTE(mdragon): Right now, the only place this info exists is the
         #               compute worker's flagfile, at least for
         #               xenserver. Thus we ned to ask.
         if FLAGS.stub_compute:
             pool_info = {"address": "127.0.0.1", "username": "******", "password": "******"}
         else:
             pool_info = rpc.call(
                 context,
                 rpc.queue_get_for(context, FLAGS.compute_topic, instance_host),
                 {"method": "get_console_pool_info", "args": {"console_type": console_type}},
             )
         pool_info["password"] = self.driver.fix_pool_password(pool_info["password"])
         pool_info["host"] = self.host
         pool_info["public_hostname"] = FLAGS.console_public_hostname
         pool_info["console_type"] = self.driver.console_type
         pool_info["compute_host"] = instance_host
         pool = self.db.console_pool_create(context, pool_info)
     return pool
コード例 #45
0
ファイル: manager.py プロジェクト: MirantisDellCrowbar/nova
    def deallocate_for_instance(self, context, **kwargs):
        """Called when a VM is terminated.  Loop through each virtual
           interface in the Nova DB and remove the Quantum port and
           clear the IP allocation using the IPAM.  Finally, remove
           the virtual interfaces from the Nova DB.
        """
        instance_id = kwargs.get('instance_id')
        project_id = kwargs.pop('project_id', None)

        admin_context = context.elevated()
        vifs = db.virtual_interface_get_by_instance(admin_context,
                                                    instance_id)

        for vif in vifs:
            network = db.network_get(admin_context, vif['network_id'])

            self.deallocate_port(vif['uuid'], network['uuid'], project_id,
                                 instance_id)

            ipam_tenant_id = self.deallocate_ip_address(context,
                                network['uuid'], project_id, vif, instance_id)

            if FLAGS.quantum_use_dhcp:
                if network['host'] == self.host:
                    self.update_dhcp(context, ipam_tenant_id, network,
                                 vif, project_id)
                else:
                    topic = rpc.queue_get_for(context,
                                FLAGS.network_topic, network['host'])
                    rpc.call(context, topic, {'method': 'update_dhcp',
                        'args': {'ipam_tenant_id': ipam_tenant_id,
                        'network_ref': network,
                        'vif_ref': vif,
                        'project_id': network['project_id']}})

            db.virtual_interface_delete(admin_context, vif['id'])
コード例 #46
0
ファイル: test_scheduler.py プロジェクト: wja300/nova
 def _check_shared_storage(self, dest, instance, check_result):
     tmp_filename = 'test-filename'
     rpc.queue_get_for(self.context, FLAGS.compute_topic,
             dest).AndReturn('dest_queue')
     rpc.call(self.context, 'dest_queue',
             {'method': 'create_shared_storage_test_file',
              'args': {},
              'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION}, None
             ).AndReturn(tmp_filename)
     rpc.queue_get_for(self.context, FLAGS.compute_topic,
             instance['host']).AndReturn('src_queue')
     rpc.call(self.context, 'src_queue',
             {'method': 'check_shared_storage_test_file',
              'args': {'filename': tmp_filename},
              'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION}, None
             ).AndReturn(check_result)
     rpc.queue_get_for(self.context, FLAGS.compute_topic,
             dest).AndReturn('dest_queue')
     rpc.cast(self.context, 'dest_queue',
             {'method': 'cleanup_shared_storage_test_file',
              'args': {'filename': tmp_filename},
              'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION})
コード例 #47
0
 def _check_shared_storage(self, dest, instance, check_result):
     tmp_filename = 'test-filename'
     rpc.queue_get_for(self.context, FLAGS.compute_topic,
             dest).AndReturn('dest_queue')
     rpc.call(self.context, 'dest_queue',
             {'method': 'create_shared_storage_test_file',
              'args': {},
              'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION}, None
             ).AndReturn(tmp_filename)
     rpc.queue_get_for(self.context, FLAGS.compute_topic,
             instance['host']).AndReturn('src_queue')
     rpc.call(self.context, 'src_queue',
             {'method': 'check_shared_storage_test_file',
              'args': {'filename': tmp_filename},
              'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION}, None
             ).AndReturn(check_result)
     rpc.queue_get_for(self.context, FLAGS.compute_topic,
             dest).AndReturn('dest_queue')
     rpc.cast(self.context, 'dest_queue',
             {'method': 'cleanup_shared_storage_test_file',
              'args': {'filename': tmp_filename},
              'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION})
コード例 #48
0
ファイル: manager.py プロジェクト: MirantisDellCrowbar/nova
    def allocate_for_instance(self, context, **kwargs):
        """Called by compute when it is creating a new VM.

           There are three key tasks:
                - Determine the number and order of vNICs to create
                - Allocate IP addresses
                - Create ports on a Quantum network and attach vNICs.

           We support two approaches to determining vNICs:
                - By default, a VM gets a vNIC for any network belonging
                  to the VM's project, and a vNIC for any "global" network
                  that has a NULL project_id.  vNIC order is determined
                  by the network's 'priority' field.
                - If the 'os-create-server-ext' was used to create the VM,
                  only the networks in 'requested_networks' are used to
                  create vNICs, and the vNIC order is determiend by the
                  order in the requested_networks array.

           For each vNIC, use the FlatManager to create the entries
           in the virtual_interfaces table, contact Quantum to
           create a port and attachment the vNIC, and use the IPAM
           lib to allocate IP addresses.
        """
        instance_id = kwargs['instance_id']
        rxtx_factor = kwargs['rxtx_factor']
        host = kwargs['host']
        project_id = kwargs['project_id']
        LOG.debug(_("network allocations for instance %s"), project_id)
        requested_networks = kwargs.get('requested_networks')
        instance = db.instance_get(context, instance_id)

        net_proj_pairs = self.ipam.get_project_and_global_net_ids(context,
                                                                project_id)
        if requested_networks:
            # need to figure out if a requested network is owned
            # by the tenant, or by the provider
            # Note: these are the only possible options, as the compute
            # API already validated networks using validate_network()
            proj_net_ids = set([p[0] for p in net_proj_pairs if p[1]])
            net_proj_pairs = []
            for net_id, _i in requested_networks:
                if net_id in proj_net_ids:
                    net_proj_pairs.append((net_id, project_id))
                else:
                    net_proj_pairs.append((net_id, None))

        # Create a port via quantum and attach the vif
        for proj_pair in net_proj_pairs:
            network = self.get_network(context, proj_pair)

            # TODO(tr3buchet): broken. Virtual interfaces require an integer
            #                  network ID and it is not nullable
            vif_rec = self.add_virtual_interface(context,
                                                 instance_id,
                                                 network['id'],
                                                 project_id)

            # talk to Quantum API to create and attach port.
            nova_id = self._get_nova_id(instance)
            # Tell the ipam library to allocate an IP
            ips = self.ipam.allocate_fixed_ips(context, project_id,
                    network['quantum_net_id'], network['net_tenant_id'],
                    vif_rec)
            pairs = []
            # Set up port security if enabled
            if FLAGS.quantum_use_port_security:
                if FLAGS.quantum_port_security_include_link_local:
                    mac = netaddr.EUI(vif_rec['address'])
                    ips.append(str(mac.ipv6_link_local()))

                pairs = [{'mac_address': vif_rec['address'],
                          'ip_address': ip} for ip in ips]

            self.q_conn.create_and_attach_port(network['net_tenant_id'],
                                               network['quantum_net_id'],
                                               vif_rec['uuid'],
                                               vm_id=instance['uuid'],
                                               rxtx_factor=rxtx_factor,
                                               nova_id=nova_id,
                                               allowed_address_pairs=pairs)
            # Set up/start the dhcp server for this network if necessary
            if FLAGS.quantum_use_dhcp:
                if network['host'] == self.host:
                    self.enable_dhcp(context, network['quantum_net_id'],
                            network, vif_rec, network['net_tenant_id'])
                else:
                    topic = rpc.queue_get_for(context,
                                FLAGS.network_topic, network['host'])
                    rpc.call(context, topic, {'method': 'enable_dhcp',
                        'args': {'quantum_net_id': network['quantum_net_id'],
                        'network_ref': network,
                        'vif_rec': vif_rec,
                        'project_id': network['net_tenant_id']}})

        return self.get_instance_nw_info(context, instance_id,
                                         instance['uuid'],
                                         rxtx_factor, host,
                                         project_id=project_id)
コード例 #49
0
ファイル: api.py プロジェクト: pulchart/nova
 def _get_console_topic(self, context, instance_host):
     topic = rpc.queue_get_for(context,
                               FLAGS.compute_topic,
                               instance_host)
     return rpc.call(context, topic, {'method': 'get_console_topic',
                                      'args': {'fake': 1}})
コード例 #50
0
ファイル: driver.py プロジェクト: pulchart/nova
    def _live_migration_common_check(self, context, instance_ref, dest,
                                     block_migration, disk_over_commit):
        """Live migration common check routine.

        Below checkings are followed by
        http://wiki.libvirt.org/page/TodoPreMigrationChecks

        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host
        :param block_migration: if true, block_migration.
        :param disk_over_commit: if True, consider real(not virtual)
                                 disk size.

        """

        # Checking shared storage connectivity
        # if block migration, instances_paths should not be on shared storage.
        shared = self.mounted_on_same_shared_storage(context, instance_ref,
                                                     dest)
        if block_migration:
            if shared:
                reason = _("Block migration can not be used "
                           "with shared storage.")
                raise exception.InvalidSharedStorage(reason=reason, path=dest)

        elif not shared:
            reason = _("Live migration can not be used "
                       "without shared storage.")
            raise exception.InvalidSharedStorage(reason=reason, path=dest)

        # Checking destination host exists.
        dservice_refs = db.service_get_all_compute_by_host(context, dest)
        dservice_ref = dservice_refs[0]['compute_node'][0]

        # Checking original host( where instance was launched at) exists.
        try:
            oservice_refs = db.service_get_all_compute_by_host(context,
                                           instance_ref['host'])
        except exception.NotFound:
            raise exception.SourceHostUnavailable()
        oservice_ref = oservice_refs[0]['compute_node'][0]

        # Checking hypervisor is same.
        orig_hypervisor = oservice_ref['hypervisor_type']
        dest_hypervisor = dservice_ref['hypervisor_type']
        if orig_hypervisor != dest_hypervisor:
            raise exception.InvalidHypervisorType()

        # Checkng hypervisor version.
        orig_hypervisor = oservice_ref['hypervisor_version']
        dest_hypervisor = dservice_ref['hypervisor_version']
        if orig_hypervisor > dest_hypervisor:
            raise exception.DestinationHypervisorTooOld()

        # Checking cpuinfo.
        try:
            rpc.call(context,
                     rpc.queue_get_for(context, FLAGS.compute_topic, dest),
                     {"method": 'compare_cpu',
                      "args": {'cpu_info': oservice_ref['cpu_info']}})

        except exception.InvalidCPUInfo:
            src = instance_ref['host']
            LOG.exception(_("host %(dest)s is not compatible with "
                                "original host %(src)s.") % locals())
            raise
コード例 #51
0
ファイル: manager.py プロジェクト: kiall/nova
    def allocate_for_instance(self, context, **kwargs):
        """Called by compute when it is creating a new VM.

           There are three key tasks:
                - Determine the number and order of vNICs to create
                - Allocate IP addresses
                - Create ports on a Quantum network and attach vNICs.

           We support two approaches to determining vNICs:
                - By default, a VM gets a vNIC for any network belonging
                  to the VM's project, and a vNIC for any "global" network
                  that has a NULL project_id.  vNIC order is determined
                  by the network's 'priority' field.
                - If the 'os-create-server-ext' was used to create the VM,
                  only the networks in 'requested_networks' are used to
                  create vNICs, and the vNIC order is determiend by the
                  order in the requested_networks array.

           For each vNIC, use the FlatManager to create the entries
           in the virtual_interfaces table, contact Quantum to
           create a port and attachment the vNIC, and use the IPAM
           lib to allocate IP addresses.
        """
        instance_id = kwargs['instance_id']
        rxtx_factor = kwargs['rxtx_factor']
        host = kwargs['host']
        project_id = kwargs['project_id']
        LOG.debug(_("network allocations for instance %s"), project_id)
        requested_networks = kwargs.get('requested_networks')
        instance = db.instance_get(context, instance_id)

        net_proj_pairs = self.ipam.get_project_and_global_net_ids(
            context, project_id)
        if requested_networks:
            # need to figure out if a requested network is owned
            # by the tenant, or by the provider
            # Note: these are the only possible options, as the compute
            # API already validated networks using validate_network()
            proj_net_ids = set([p[0] for p in net_proj_pairs if p[1]])
            net_proj_pairs = []
            for net_id, _i in requested_networks:
                if net_id in proj_net_ids:
                    net_proj_pairs.append((net_id, project_id))
                else:
                    net_proj_pairs.append((net_id, None))

        # Create a port via quantum and attach the vif
        for proj_pair in net_proj_pairs:
            network = self.get_network(context, proj_pair)

            # TODO(tr3buchet): broken. Virtual interfaces require an integer
            #                  network ID and it is not nullable
            vif_rec = self.add_virtual_interface(context, instance_id,
                                                 network['id'], project_id)

            # talk to Quantum API to create and attach port.
            nova_id = self._get_nova_id(instance)
            # Tell the ipam library to allocate an IP
            ips = self.ipam.allocate_fixed_ips(context, project_id,
                                               network['quantum_net_id'],
                                               network['net_tenant_id'],
                                               vif_rec)
            pairs = []
            # Set up port security if enabled
            if FLAGS.quantum_use_port_security:
                if FLAGS.quantum_port_security_include_link_local:
                    mac = netaddr.EUI(vif_rec['address'])
                    ips.append(str(mac.ipv6_link_local()))

                pairs = [{
                    'mac_address': vif_rec['address'],
                    'ip_address': ip
                } for ip in ips]

            self.q_conn.create_and_attach_port(network['net_tenant_id'],
                                               network['quantum_net_id'],
                                               vif_rec['uuid'],
                                               vm_id=instance['uuid'],
                                               rxtx_factor=rxtx_factor,
                                               nova_id=nova_id,
                                               allowed_address_pairs=pairs)
            # Set up/start the dhcp server for this network if necessary
            if FLAGS.quantum_use_dhcp:
                if network['host'] == self.host:
                    self.enable_dhcp(context, network['quantum_net_id'],
                                     network, vif_rec,
                                     network['net_tenant_id'])
                else:
                    topic = rpc.queue_get_for(context, FLAGS.network_topic,
                                              network['host'])
                    rpc.call(
                        context, topic, {
                            'method': 'enable_dhcp',
                            'args': {
                                'quantum_net_id': network['quantum_net_id'],
                                'network_ref': network,
                                'vif_rec': vif_rec,
                                'project_id': network['net_tenant_id']
                            }
                        })

        return self.get_instance_nw_info(context,
                                         instance_id,
                                         instance['uuid'],
                                         rxtx_factor,
                                         host,
                                         project_id=project_id)
コード例 #52
0
ファイル: api.py プロジェクト: MirantisDellCrowbar/nova
 def detach(self, context, volume):
     host = volume['host']
     queue = rpc.queue_get_for(context, FLAGS.volume_topic, host)
     return rpc.call(context, queue,
              {"method": "detach_volume",
               "args": {"volume_id": volume['id']}})
コード例 #53
0
    def test_live_migration_all_checks_pass(self):
        """Test live migration when all checks pass."""

        self.mox.StubOutWithMock(db, 'instance_get')
        self.mox.StubOutWithMock(utils, 'service_is_up')
        self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
        self.mox.StubOutWithMock(self.driver, '_get_compute_info')
        self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
        self.mox.StubOutWithMock(rpc, 'queue_get_for')
        self.mox.StubOutWithMock(rpc, 'call')
        self.mox.StubOutWithMock(rpc, 'cast')
        self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
        self.mox.StubOutWithMock(driver, 'cast_to_compute_host')

        dest = 'fake_host2'
        block_migration = True
        disk_over_commit = True
        instance = self._live_migration_instance()
        db.instance_get(self.context, instance['id']).AndReturn(instance)

        db.service_get_all_compute_by_host(self.context,
                instance['host']).AndReturn(['fake_service2'])
        utils.service_is_up('fake_service2').AndReturn(True)

        # Destination checks (compute is up, enough memory, disk)
        db.service_get_all_compute_by_host(self.context,
                dest).AndReturn(['fake_service3'])
        utils.service_is_up('fake_service3').AndReturn(True)
        # assert_compute_node_has_enough_memory()
        self.driver._get_compute_info(self.context, dest,
                'memory_mb').AndReturn(2048)
        db.instance_get_all_by_host(self.context, dest).AndReturn(
                [dict(memory_mb=256), dict(memory_mb=512)])
        # assert_compute_node_has_enough_disk()
        self.driver._get_compute_info(self.context, dest,
                'disk_available_least').AndReturn(1025)
        rpc.queue_get_for(self.context, FLAGS.compute_topic,
                instance['host']).AndReturn('src_queue1')
        instance_disk_info_msg = {
            'method': 'get_instance_disk_info',
            'args': {
                'instance_name': instance['name'],
            },
            'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION,
        }
        instance_disk_info = [{'disk_size': 1024 * (1024 ** 3)}]
        rpc.call(self.context,
                 'src_queue1',
                 instance_disk_info_msg,
                 None).AndReturn(jsonutils.dumps(instance_disk_info))

        # Common checks (shared storage ok, same hypervisor, etc)
        self._check_shared_storage(dest, instance, False)

        db.service_get_all_compute_by_host(self.context, dest).AndReturn(
                [{'compute_node': [{'hypervisor_type': 'xen',
                                    'hypervisor_version': 1}]}])
        # newer hypervisor version for src
        db.service_get_all_compute_by_host(self.context,
            instance['host']).AndReturn(
                    [{'compute_node': [{'hypervisor_type': 'xen',
                                        'hypervisor_version': 1,
                                        'cpu_info': 'fake_cpu_info'}]}])
        rpc.queue_get_for(self.context, FLAGS.compute_topic,
                dest).AndReturn('dest_queue')
        rpc.call(self.context, 'dest_queue',
                {'method': 'compare_cpu',
                 'args': {'cpu_info': 'fake_cpu_info'},
                 'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION}, None
                ).AndReturn(True)

        db.instance_update_and_get_original(self.context, instance['id'],
                {"vm_state": vm_states.MIGRATING}).AndReturn(
                        (instance, instance))

        driver.cast_to_compute_host(self.context, instance['host'],
                'live_migration', update_db=False,
                instance_id=instance['id'], dest=dest,
                block_migration=block_migration)

        self.mox.ReplayAll()
        result = self.driver.schedule_live_migration(self.context,
                instance_id=instance['id'], dest=dest,
                block_migration=block_migration,
                disk_over_commit=disk_over_commit)
        self.assertEqual(result, None)
コード例 #54
0
ファイル: test_scheduler.py プロジェクト: wja300/nova
    def test_live_migration_all_checks_pass(self):
        """Test live migration when all checks pass."""

        self.mox.StubOutWithMock(db, 'instance_get')
        self.mox.StubOutWithMock(utils, 'service_is_up')
        self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
        self.mox.StubOutWithMock(self.driver, '_get_compute_info')
        self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
        self.mox.StubOutWithMock(rpc, 'queue_get_for')
        self.mox.StubOutWithMock(rpc, 'call')
        self.mox.StubOutWithMock(rpc, 'cast')
        self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
        self.mox.StubOutWithMock(driver, 'cast_to_compute_host')

        dest = 'fake_host2'
        block_migration = True
        disk_over_commit = True
        instance = self._live_migration_instance()
        db.instance_get(self.context, instance['id']).AndReturn(instance)

        db.service_get_all_compute_by_host(self.context,
                instance['host']).AndReturn(['fake_service2'])
        utils.service_is_up('fake_service2').AndReturn(True)

        # Destination checks (compute is up, enough memory, disk)
        db.service_get_all_compute_by_host(self.context,
                dest).AndReturn(['fake_service3'])
        utils.service_is_up('fake_service3').AndReturn(True)
        # assert_compute_node_has_enough_memory()
        self.driver._get_compute_info(self.context, dest,
                'memory_mb').AndReturn(2048)
        db.instance_get_all_by_host(self.context, dest).AndReturn(
                [dict(memory_mb=256), dict(memory_mb=512)])
        # assert_compute_node_has_enough_disk()
        self.driver._get_compute_info(self.context, dest,
                'disk_available_least').AndReturn(1025)
        rpc.queue_get_for(self.context, FLAGS.compute_topic,
                instance['host']).AndReturn('src_queue1')
        rpc.call(self.context, 'src_queue1',
                {'method': 'get_instance_disk_info',
                 'args': {'instance_name': instance['name']},
                 'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION},
                 None).AndReturn(
                        json.dumps([{'disk_size': 1024 * (1024 ** 3)}]))

        # Common checks (shared storage ok, same hypervisor, etc)
        self._check_shared_storage(dest, instance, False)

        db.service_get_all_compute_by_host(self.context, dest).AndReturn(
                [{'compute_node': [{'hypervisor_type': 'xen',
                                    'hypervisor_version': 1}]}])
        # newer hypervisor version for src
        db.service_get_all_compute_by_host(self.context,
            instance['host']).AndReturn(
                    [{'compute_node': [{'hypervisor_type': 'xen',
                                        'hypervisor_version': 1,
                                        'cpu_info': 'fake_cpu_info'}]}])
        rpc.queue_get_for(self.context, FLAGS.compute_topic,
                dest).AndReturn('dest_queue')
        rpc.call(self.context, 'dest_queue',
                {'method': 'compare_cpu',
                 'args': {'cpu_info': 'fake_cpu_info'},
                 'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION}, None
                ).AndReturn(True)

        db.instance_update_and_get_original(self.context, instance['id'],
                {"vm_state": vm_states.MIGRATING}).AndReturn(
                        (instance, instance))

        driver.cast_to_compute_host(self.context, instance['host'],
                'live_migration', update_db=False,
                instance_id=instance['id'], dest=dest,
                block_migration=block_migration)

        self.mox.ReplayAll()
        result = self.driver.schedule_live_migration(self.context,
                instance_id=instance['id'], dest=dest,
                block_migration=block_migration,
                disk_over_commit=disk_over_commit)
        self.assertEqual(result, None)