Ejemplo n.º 1
0
    def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
        """Picks a host that is up and has the fewest volumes."""
        volume_ref = db.volume_get(context, volume_id)
        if (volume_ref['availability_zone']
            and ':' in volume_ref['availability_zone']
            and context.is_admin):
            zone, _x, host = volume_ref['availability_zone'].partition(':')
            service = db.service_get_by_args(context.elevated(), host,
                                             'nova-volume')
            if not self.service_is_up(service):
                raise driver.WillNotSchedule(_("Host %s not available") % host)

            # TODO(vish): this probably belongs in the manager, if we
            #             can generalize this somehow
            now = datetime.datetime.utcnow()
            db.volume_update(context, volume_id, {'host': host,
                                                  'scheduled_at': now})
            return host
        results = db.service_get_all_volume_sorted(context)
        for result in results:
            (service, volume_gigabytes) = result
            if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
                raise driver.NoValidHost(_("All hosts have too many "
                                           "gigabytes"))
            if self.service_is_up(service):
                # NOTE(vish): this probably belongs in the manager, if we
                #             can generalize this somehow
                now = datetime.datetime.utcnow()
                db.volume_update(context,
                                 volume_id,
                                 {'host': service['host'],
                                  'scheduled_at': now})
                return service['host']
        raise driver.NoValidHost(_("No hosts found"))
Ejemplo n.º 2
0
 def create_volume(self, context, volume_id, snapshot_id, reservations=None, image_id=None):
     try:
         self.driver.schedule_create_volume(context, volume_id, snapshot_id, image_id)
     except Exception as ex:
         with excutils.save_and_reraise_exception():
             LOG.warning(_("Failed to schedule create_volume: %(ex)s") % locals())
             db.volume_update(context, volume_id, {"status": "error"})
Ejemplo n.º 3
0
    def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
        """Picks a host that is up and has the fewest volumes."""
        volume_ref = db.volume_get(context, volume_id)
        if (volume_ref['availability_zone']
                and ':' in volume_ref['availability_zone']
                and context.is_admin):
            zone, _x, host = volume_ref['availability_zone'].partition(':')
            service = db.service_get_by_args(context.elevated(), host,
                                             'nova-volume')
            if not self.service_is_up(service):
                raise driver.WillNotSchedule(_("Host %s not available") % host)

            # TODO(vish): this probably belongs in the manager, if we
            #             can generalize this somehow
            now = datetime.datetime.utcnow()
            db.volume_update(context, volume_id, {
                'host': host,
                'scheduled_at': now
            })
            return host

        results = db.service_get_all_volume_sorted(context)

        for result in results:
            (service, volume_gigabytes) = result

            compute_ref = db.service_get_all_compute_by_host(
                context, service['host'])[0]
            compute_node_ref = compute_ref['compute_node'][0]

            if volume_ref['size'] + volume_gigabytes > compute_node_ref[
                    'local_gb']:
                raise driver.NoValidHost(
                    _("All hosts have too many "
                      "gigabytes"))

            LOG.debug(
                _("requested volume GBs = %s + used compute node GBs = %s < total compute node GBs = %s"
                  ) % (volume_ref['size'], volume_gigabytes,
                       compute_node_ref['local_gb']))

            if self.service_is_up(service):
                # NOTE(vish): this probably belongs in the manager, if we
                #             can generalize this somehow
                now = datetime.datetime.utcnow()
                db.volume_update(context, volume_id, {
                    'host': service['host'],
                    'scheduled_at': now
                })

                LOG.debug(
                    _("volume = %s scheduled to host = %s") %
                    (volume_id, service['host']))

                return service['host']
        raise driver.NoValidHost(
            _("Scheduler was unable to locate a host"
              " for this request. Is the appropriate"
              " service running?"))
Ejemplo n.º 4
0
def cast_to_volume_host(context, host, method, **kwargs):
    """Cast request to a volume host queue"""

    volume_id = kwargs.get("volume_id", None)
    if volume_id is not None:
        now = timeutils.utcnow()
        db.volume_update(context, volume_id, {"host": host, "scheduled_at": now})
    rpc.cast(context, rpc.queue_get_for(context, "volume", host), {"method": method, "args": kwargs})
    LOG.debug(_("Casted '%(method)s' to volume '%(host)s'") % locals())
Ejemplo n.º 5
0
 def create_volume(self, context, volume_id, snapshot_id,
                   reservations=None, image_id=None):
     try:
         self.driver.schedule_create_volume(
             context, volume_id, snapshot_id, image_id)
     except Exception as ex:
         with excutils.save_and_reraise_exception():
             LOG.warning(_("Failed to schedule create_volume: %(ex)s") %
                         locals())
             db.volume_update(context, volume_id, {'status': 'error'})
Ejemplo n.º 6
0
    def test_create_volume_no_valid_host_puts_volume_in_error(self):
        self._mox_schedule_method_helper('schedule_create_volume')
        self.mox.StubOutWithMock(db, 'volume_update')

        self.manager.driver.schedule_create_volume(self.context, '1', '2',
                None).AndRaise(exception.NoValidHost(reason=''))
        db.volume_update(self.context, '1', {'status': 'error'})

        self.mox.ReplayAll()
        self.assertRaises(exception.NoValidHost, self.manager.create_volume,
                          self.context, '1', '2')
Ejemplo n.º 7
0
    def test_create_volume_no_valid_host_puts_volume_in_error(self):
        self._mox_schedule_method_helper("schedule_create_volume")
        self.mox.StubOutWithMock(db, "volume_update")

        self.manager.driver.schedule_create_volume(self.context, "1", "2", None).AndRaise(
            exception.NoValidHost(reason="")
        )
        db.volume_update(self.context, "1", {"status": "error"})

        self.mox.ReplayAll()
        self.assertRaises(exception.NoValidHost, self.manager.create_volume, self.context, "1", "2")
Ejemplo n.º 8
0
    def test_create_volume_no_valid_host_puts_volume_in_error(self):
        self._mox_schedule_method_helper('schedule_create_volume')
        self.mox.StubOutWithMock(db, 'volume_update')

        self.manager.driver.schedule_create_volume(self.context, '1', '2',
                None).AndRaise(exception.NoValidHost(reason=''))
        db.volume_update(self.context, '1', {'status': 'error'})

        self.mox.ReplayAll()
        self.assertRaises(exception.NoValidHost, self.manager.create_volume,
                          self.context, '1', '2')
Ejemplo n.º 9
0
def cast_to_volume_host(context, host, method, **kwargs):
    """Cast request to a volume host queue"""

    volume_id = kwargs.get('volume_id', None)
    if volume_id is not None:
        now = timeutils.utcnow()
        db.volume_update(context, volume_id,
                {'host': host, 'scheduled_at': now})
    rpc.cast(context,
             rpc.queue_get_for(context, 'volume', host),
             {"method": method, "args": kwargs})
    LOG.debug(_("Casted '%(method)s' to volume '%(host)s'") % locals())
Ejemplo n.º 10
0
    def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
        """Picks the best host based on requested drive type capability."""
        volume_ref = db.volume_get(context, volume_id)

        host = self._check_host_enforcement(context,
                                            volume_ref['availability_zone'])
        if host:
            now = utils.utcnow()
            db.volume_update(context, volume_id, {'host': host,
                                                  'scheduled_at': now})
            return host

        volume_type_id = volume_ref['volume_type_id']
        if volume_type_id:
            volume_type = volume_types.get_volume_type(context, volume_type_id)

        if volume_type_id is None or\
           volume_types.is_vsa_volume(volume_type_id, volume_type):

            LOG.debug(_("Non-VSA volume %d"), volume_ref['id'])
            return super(VsaScheduler, self).schedule_create_volume(context,
                        volume_id, *_args, **_kwargs)

        self._print_capabilities_info()

        drive_type = {
            'name': volume_type['extra_specs'].get('drive_name'),
            'type': volume_type['extra_specs'].get('drive_type'),
            'size': int(volume_type['extra_specs'].get('drive_size')),
            'rpm': volume_type['extra_specs'].get('drive_rpm'),
            }

        LOG.debug(_("Spawning volume %(volume_id)s with drive type "\
                    "%(drive_type)s"), locals())

        request_spec = {'size': volume_ref['size'],
                        'drive_type': drive_type}
        hosts = self._filter_hosts("volume", request_spec)

        try:
            (host, qos_cap) = self._select_hosts(request_spec, all_hosts=hosts)
        except:
            if volume_ref['to_vsa_id']:
                db.vsa_update(context, volume_ref['to_vsa_id'],
                                dict(status=VsaState.FAILED))
            raise

        if host:
            now = utils.utcnow()
            db.volume_update(context, volume_id, {'host': host,
                                                  'scheduled_at': now})
            self._consume_resource(qos_cap, volume_ref['size'], -1)
            return host
Ejemplo n.º 11
0
def cast_to_volume_host(context, host, method, update_db=True, **kwargs):
    """Cast request to a volume host queue"""

    if update_db:
        volume_id = kwargs.get('volume_id', None)
        if volume_id is not None:
            now = utils.utcnow()
            db.volume_update(context, volume_id,
                    {'host': host, 'scheduled_at': now})
    rpc.cast(context,
            db.queue_get_for(context, 'volume', host),
            {"method": method, "args": kwargs})
    LOG.debug(_("Casted '%(method)s' to volume '%(host)s'") % locals())
Ejemplo n.º 12
0
Archivo: driver.py Proyecto: altai/nova
    def schedule_live_migration(self,
                                context,
                                instance_id,
                                dest,
                                block_migration=False,
                                disk_over_commit=False):
        """Live migration scheduling method.

        :param context:
        :param instance_id:
        :param dest: destination host
        :param block_migration: if true, block_migration.
        :param disk_over_commit: if True, consider real(not virtual)
                                 disk size.

        :return:
            The host where instance is running currently.
            Then scheduler send request that host.
        """
        # Whether instance exists and is running.
        instance_ref = db.instance_get(context, instance_id)

        # Checking instance.
        self._live_migration_src_check(context, instance_ref)

        # Checking destination host.
        self._live_migration_dest_check(context, instance_ref, dest,
                                        block_migration, disk_over_commit)
        # Common checking.
        self._live_migration_common_check(context, instance_ref, dest,
                                          block_migration, disk_over_commit)

        # Changing instance_state.
        values = {"vm_state": vm_states.MIGRATING}
        db.instance_update(context, instance_id, values)

        # Changing volume state
        for volume_ref in instance_ref['volumes']:
            db.volume_update(context, volume_ref['id'],
                             {'status': 'migrating'})

        src = instance_ref['host']
        cast_to_compute_host(context,
                             src,
                             'live_migration',
                             update_db=False,
                             instance_id=instance_id,
                             dest=dest,
                             block_migration=block_migration)
Ejemplo n.º 13
0
    def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
        """Picks a host that is up and has the fewest volumes."""
        volume_ref = db.volume_get(context, volume_id)
        if (volume_ref['availability_zone']
            and ':' in volume_ref['availability_zone']
            and context.is_admin):
            zone, _x, host = volume_ref['availability_zone'].partition(':')
            service = db.service_get_by_args(context.elevated(), host,
                                             'nova-volume')
            if not self.service_is_up(service):
                raise driver.WillNotSchedule(_("Host %s not available") % host)

            # TODO(vish): this probably belongs in the manager, if we
            #             can generalize this somehow
            now = datetime.datetime.utcnow()
            db.volume_update(context, volume_id, {'host': host,
                                                  'scheduled_at': now})
            return host

        results = db.service_get_all_volume_sorted(context)

        for result in results:
            (service, volume_gigabytes) = result

            compute_ref = db.service_get_all_compute_by_host(context, service['host'])[0]
            compute_node_ref = compute_ref['compute_node'][0]

            if volume_ref['size'] + volume_gigabytes > compute_node_ref['local_gb']:
                raise driver.NoValidHost(_("All hosts have too many "
                                           "gigabytes"))

            LOG.debug(_("requested volume GBs = %s + used compute node GBs = %s < total compute node GBs = %s") % (volume_ref['size'], volume_gigabytes, compute_node_ref['local_gb']))

            if self.service_is_up(service):
                # NOTE(vish): this probably belongs in the manager, if we
                #             can generalize this somehow
                now = datetime.datetime.utcnow()
                db.volume_update(context,
                                 volume_id,
                                 {'host': service['host'],
                                  'scheduled_at': now})

                LOG.debug(_("volume = %s scheduled to host = %s") % (volume_id, service['host']))

                return service['host']
        raise driver.NoValidHost(_("Scheduler was unable to locate a host"
                                   " for this request. Is the appropriate"
                                   " service running?"))
Ejemplo n.º 14
0
    def schedule_live_migration(self, context, instance_id, dest,
                                block_migration=False,
                                disk_over_commit=False):
        """Live migration scheduling method.

        :param context:
        :param instance_id:
        :param dest: destination host
        :param block_migration: if true, block_migration.
        :param disk_over_commit: if True, consider real(not virtual)
                                 disk size.

        :return:
            The host where instance is running currently.
            Then scheduler send request that host.
        """
        # Whether instance exists and is running.
        instance_ref = db.instance_get(context, instance_id)

        # Checking instance.
        self._live_migration_src_check(context, instance_ref)

        # Checking destination host.
        self._live_migration_dest_check(context, instance_ref,
                                        dest, block_migration,
                                        disk_over_commit)
        # Common checking.
        self._live_migration_common_check(context, instance_ref,
                                          dest, block_migration,
                                          disk_over_commit)

        # Changing instance_state.
        values = {"vm_state": vm_states.MIGRATING}
        db.instance_update(context, instance_id, values)

        # Changing volume state
        for volume_ref in instance_ref['volumes']:
            db.volume_update(context,
                             volume_ref['id'],
                             {'status': 'migrating'})

        src = instance_ref['host']
        cast_to_compute_host(context, src, 'live_migration',
                update_db=False,
                instance_id=instance_id,
                dest=dest,
                block_migration=block_migration)
Ejemplo n.º 15
0
    def test_cast_to_volume_host_update_db_with_volume_id(self):
        host = "fake_host1"
        method = "fake_method"
        fake_kwargs = {"volume_id": 31337, "extra_arg": "meow"}
        queue = "fake_queue"

        self.mox.StubOutWithMock(timeutils, "utcnow")
        self.mox.StubOutWithMock(db, "volume_update")
        self.mox.StubOutWithMock(rpc, "queue_get_for")
        self.mox.StubOutWithMock(rpc, "cast")

        timeutils.utcnow().AndReturn("fake-now")
        db.volume_update(self.context, 31337, {"host": host, "scheduled_at": "fake-now"})
        rpc.queue_get_for(self.context, "volume", host).AndReturn(queue)
        rpc.cast(self.context, queue, {"method": method, "args": fake_kwargs})

        self.mox.ReplayAll()
        driver.cast_to_volume_host(self.context, host, method, **fake_kwargs)
Ejemplo n.º 16
0
    def test_cast_to_volume_host_update_db_with_volume_id(self):
        host = 'fake_host1'
        method = 'fake_method'
        fake_kwargs = {'volume_id': 31337, 'extra_arg': 'meow'}
        queue = 'fake_queue'

        self.mox.StubOutWithMock(timeutils, 'utcnow')
        self.mox.StubOutWithMock(db, 'volume_update')
        self.mox.StubOutWithMock(rpc, 'queue_get_for')
        self.mox.StubOutWithMock(rpc, 'cast')

        timeutils.utcnow().AndReturn('fake-now')
        db.volume_update(self.context, 31337, {
            'host': host,
            'scheduled_at': 'fake-now'
        })
        rpc.queue_get_for(self.context, 'volume', host).AndReturn(queue)
        rpc.cast(self.context, queue, {'method': method, 'args': fake_kwargs})

        self.mox.ReplayAll()
        driver.cast_to_volume_host(self.context, host, method, **fake_kwargs)
Ejemplo n.º 17
0
    def schedule_live_migration(self, context, instance_id, dest):
        """Live migration scheduling method.

        :param context:
        :param instance_id:
        :param dest: destination host
        :return:
            The host where instance is running currently.
            Then scheduler send request that host.

        """

        # Whether instance exists and is running.
        instance_ref = db.instance_get(context, instance_id)

        # Checking instance.
        self._live_migration_src_check(context, instance_ref)

        # Checking destination host.
        self._live_migration_dest_check(context, instance_ref, dest)

        # Common checking.
        self._live_migration_common_check(context, instance_ref, dest)

        # Changing instance_state.
        db.instance_set_state(context,
                              instance_id,
                              power_state.PAUSED,
                              'migrating')

        # Changing volume state
        for volume_ref in instance_ref['volumes']:
            db.volume_update(context,
                             volume_ref['id'],
                             {'status': 'migrating'})

        # Return value is necessary to send request to src
        # Check _schedule() in detail.
        src = instance_ref['host']
        return src
Ejemplo n.º 18
0
    def schedule_live_migration(self,
                                context,
                                instance_id,
                                dest,
                                block_migration=False):
        """Live migration scheduling method.

        :param context:
        :param instance_id:
        :param dest: destination host
        :return:
            The host where instance is running currently.
            Then scheduler send request that host.
        """
        # Whether instance exists and is running.
        instance_ref = db.instance_get(context, instance_id)

        # Checking instance.
        self._live_migration_src_check(context, instance_ref)

        # Checking destination host.
        self._live_migration_dest_check(context, instance_ref, dest,
                                        block_migration)
        # Common checking.
        self._live_migration_common_check(context, instance_ref, dest,
                                          block_migration)

        # Changing instance_state.
        values = {"vm_state": vm_states.MIGRATING}
        db.instance_update(context, instance_id, values)

        # Changing volume state
        for volume_ref in instance_ref['volumes']:
            db.volume_update(context, volume_ref['id'],
                             {'status': 'migrating'})

        # Return value is necessary to send request to src
        # Check _schedule() in detail.
        src = instance_ref['host']
        return src
Ejemplo n.º 19
0
    def test_live_migration_basic(self):
        """Test basic schedule_live_migration functionality"""
        self.mox.StubOutWithMock(db, 'instance_get')
        self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
        self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
        self.mox.StubOutWithMock(self.driver, '_live_migration_common_check')
        self.mox.StubOutWithMock(db, 'instance_update')
        self.mox.StubOutWithMock(db, 'volume_update')
        self.mox.StubOutWithMock(driver, 'cast_to_compute_host')

        dest = 'fake_host2'
        block_migration = False
        disk_over_commit = False
        instance = self._live_migration_instance()
        db.instance_get(self.context, instance['id']).AndReturn(instance)

        self.driver._live_migration_src_check(self.context, instance)
        self.driver._live_migration_dest_check(self.context, instance,
                dest, block_migration, disk_over_commit)
        self.driver._live_migration_common_check(self.context, instance,
                dest, block_migration, disk_over_commit)
        db.instance_update(self.context, instance['id'],
                {'vm_state': vm_states.MIGRATING})

        db.volume_update(self.context, instance['volumes'][0]['id'],
                {'status': 'migrating'})
        db.volume_update(self.context, instance['volumes'][1]['id'],
                {'status': 'migrating'})

        driver.cast_to_compute_host(self.context, instance['host'],
                'live_migration', update_db=False,
                instance_id=instance['id'], dest=dest,
                block_migration=block_migration)

        self.mox.ReplayAll()
        self.driver.schedule_live_migration(self.context,
                instance_id=instance['id'], dest=dest,
                block_migration=block_migration,
                disk_over_commit=disk_over_commit)
Ejemplo n.º 20
0
    def schedule_live_migration(self, context, instance_id, dest, block_migration=False):
        """Live migration scheduling method.

        :param context:
        :param instance_id:
        :param dest: destination host
        :return:
            The host where instance is running currently.
            Then scheduler send request that host.
        """
        # Whether instance exists and is running.
        instance_ref = db.instance_get(context, instance_id)

        # Checking instance.
        self._live_migration_src_check(context, instance_ref)

        # Checking destination host.
        self._live_migration_dest_check(context, instance_ref, dest, block_migration)
        # Common checking.
        self._live_migration_common_check(context, instance_ref, dest, block_migration)

        # Changing instance_state.
        values = {"vm_state": vm_states.MIGRATING}
        db.instance_update(context, instance_id, values)

        # Changing volume state
        for volume_ref in instance_ref["volumes"]:
            db.volume_update(context, volume_ref["id"], {"status": "migrating"})

        src = instance_ref["host"]
        cast_to_compute_host(
            context,
            src,
            "live_migration",
            update_db=False,
            instance_id=instance_id,
            dest=dest,
            block_migration=block_migration,
        )
Ejemplo n.º 21
0
    def test_cast_to_volume_host_update_db_with_volume_id(self):
        host = 'fake_host1'
        method = 'fake_method'
        fake_kwargs = {'volume_id': 31337,
                       'extra_arg': 'meow'}
        queue = 'fake_queue'

        self.mox.StubOutWithMock(utils, 'utcnow')
        self.mox.StubOutWithMock(db, 'volume_update')
        self.mox.StubOutWithMock(db, 'queue_get_for')
        self.mox.StubOutWithMock(rpc, 'cast')

        utils.utcnow().AndReturn('fake-now')
        db.volume_update(self.context, 31337,
                {'host': host, 'scheduled_at': 'fake-now'})
        db.queue_get_for(self.context, 'volume', host).AndReturn(queue)
        rpc.cast(self.context, queue,
                {'method': method,
                 'args': fake_kwargs})

        self.mox.ReplayAll()
        driver.cast_to_volume_host(self.context, host, method,
                update_db=True, **fake_kwargs)
Ejemplo n.º 22
0
    def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
        """Picks a host that is up and has the fewest volumes."""
        volume_ref = db.volume_get(context, volume_id)
        if (volume_ref['availability_zone']
                and ':' in volume_ref['availability_zone']
                and context.is_admin):
            zone, _x, host = volume_ref['availability_zone'].partition(':')
            service = db.service_get_by_args(context.elevated(), host,
                                             'nova-volume')
            if not self.service_is_up(service):
                raise driver.WillNotSchedule(_("Host %s not available") % host)

            # TODO(vish): this probably belongs in the manager, if we
            #             can generalize this somehow
            now = datetime.datetime.utcnow()
            db.volume_update(context, volume_id, {
                'host': host,
                'scheduled_at': now
            })
            return host
        results = db.service_get_all_volume_sorted(context)
        for result in results:
            (service, volume_gigabytes) = result
            if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
                raise driver.NoValidHost(
                    _("All hosts have too many "
                      "gigabytes"))
            if self.service_is_up(service):
                # NOTE(vish): this probably belongs in the manager, if we
                #             can generalize this somehow
                now = datetime.datetime.utcnow()
                db.volume_update(context, volume_id, {
                    'host': service['host'],
                    'scheduled_at': now
                })
                return service['host']
        raise driver.NoValidHost(_("No hosts found"))
Ejemplo n.º 23
0
    def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
        """Picks a host that is up and has the fewest volumes."""
        elevated = context.elevated()
        volume_ref = db.volume_get(context, volume_id)
        if (volume_ref['availability_zone']
            and ':' in volume_ref['availability_zone']
            and context.is_admin):
            zone, _x, host = volume_ref['availability_zone'].partition(':')
            service = db.service_get_by_args(elevated, host,
                                             'nova-volume')
            if not self.service_is_up(service):
                raise driver.WillNotSchedule(_("Host %s not available") % host)

            # TODO(vish): this probably belongs in the manager, if we
            #             can generalize this somehow
            now = utils.utcnow()
            db.volume_update(context, volume_id, {'host': host,
                                                  'scheduled_at': now})
            return host
        results = db.service_get_all_volume_sorted(elevated)
        for result in results:
            (service, volume_gigabytes) = result
            if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
                raise driver.NoValidHost(_("Not enough allocatable volume"
                                           "gigabytes remaining"))
            if self.service_is_up(service):
                # NOTE(vish): this probably belongs in the manager, if we
                #             can generalize this somehow
                now = utils.utcnow()
                db.volume_update(context,
                                 volume_id,
                                 {'host': service['host'],
                                  'scheduled_at': now})
                return service['host']
        raise driver.NoValidHost(_("Scheduler was unable to locate a host"
                                   " for this request. Is the appropriate"
                                   " service running?"))
Ejemplo n.º 24
0
 def _update(self, *args, **kwargs):
     db.volume_update(*args, **kwargs)
Ejemplo n.º 25
0
    def test_live_migration_all_checks_pass(self):
        """Test live migration when all checks pass."""

        self.mox.StubOutWithMock(db, 'instance_get')
        self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
        self.mox.StubOutWithMock(utils, 'service_is_up')
        self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
        self.mox.StubOutWithMock(self.driver, '_get_compute_info')
        self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
        self.mox.StubOutWithMock(db, 'queue_get_for')
        self.mox.StubOutWithMock(rpc, 'call')
        self.mox.StubOutWithMock(rpc, 'cast')
        self.mox.StubOutWithMock(db, 'instance_update')
        self.mox.StubOutWithMock(db, 'volume_update')
        self.mox.StubOutWithMock(driver, 'cast_to_compute_host')

        dest = 'fake_host2'
        block_migration = True
        disk_over_commit = True
        instance = self._live_migration_instance()
        db.instance_get(self.context, instance['id']).AndReturn(instance)

        # Source checks (volume and source compute are up)
        db.service_get_all_by_topic(self.context, 'volume').AndReturn(
                ['fake_service'])
        utils.service_is_up('fake_service').AndReturn(True)
        db.service_get_all_compute_by_host(self.context,
                instance['host']).AndReturn(['fake_service2'])
        utils.service_is_up('fake_service2').AndReturn(True)

        # Destination checks (compute is up, enough memory, disk)
        db.service_get_all_compute_by_host(self.context,
                dest).AndReturn(['fake_service3'])
        utils.service_is_up('fake_service3').AndReturn(True)
        # assert_compute_node_has_enough_memory()
        self.driver._get_compute_info(self.context, dest,
                'memory_mb').AndReturn(2048)
        db.instance_get_all_by_host(self.context, dest).AndReturn(
                [dict(memory_mb=256), dict(memory_mb=512)])
        # assert_compute_node_has_enough_disk()
        db.queue_get_for(self.context, FLAGS.compute_topic,
                dest).AndReturn('dest_queue1')
        rpc.call(self.context, 'dest_queue1',
                {'method': 'update_available_resource'})
        self.driver._get_compute_info(self.context, dest,
                'disk_available_least').AndReturn(1025)
        db.queue_get_for(self.context, FLAGS.compute_topic,
                instance['host']).AndReturn('src_queue1')
        rpc.call(self.context, 'src_queue1',
                {'method': 'get_instance_disk_info',
                 'args': {'instance_name': instance['name']}}).AndReturn(
                        json.dumps([{'disk_size': 1024 * (1024 ** 3)}]))

        # Common checks (shared storage ok, same hypervisor,e tc)
        db.queue_get_for(self.context, FLAGS.compute_topic,
                dest).AndReturn('dest_queue')
        db.queue_get_for(self.context, FLAGS.compute_topic,
                instance['host']).AndReturn('src_queue')
        tmp_filename = 'test-filename'
        rpc.call(self.context, 'dest_queue',
                {'method': 'create_shared_storage_test_file'}
                ).AndReturn(tmp_filename)
        rpc.call(self.context, 'src_queue',
                {'method': 'check_shared_storage_test_file',
                 'args': {'filename': tmp_filename}}).AndReturn(False)
        rpc.call(self.context, 'dest_queue',
                {'method': 'cleanup_shared_storage_test_file',
                 'args': {'filename': tmp_filename}})
        db.service_get_all_compute_by_host(self.context, dest).AndReturn(
                [{'compute_node': [{'hypervisor_type': 'xen',
                                    'hypervisor_version': 1}]}])
        # newer hypervisor version for src
        db.service_get_all_compute_by_host(self.context,
            instance['host']).AndReturn(
                    [{'compute_node': [{'hypervisor_type': 'xen',
                                        'hypervisor_version': 1,
                                        'cpu_info': 'fake_cpu_info'}]}])
        db.queue_get_for(self.context, FLAGS.compute_topic,
                dest).AndReturn('dest_queue')
        rpc.call(self.context, 'dest_queue',
                {'method': 'compare_cpu',
                 'args': {'cpu_info': 'fake_cpu_info'}}).AndReturn(True)

        db.instance_update(self.context, instance['id'],
                {'vm_state': vm_states.MIGRATING})
        db.volume_update(self.context, instance['volumes'][0]['id'],
                {'status': 'migrating'})
        db.volume_update(self.context, instance['volumes'][1]['id'],
                {'status': 'migrating'})

        driver.cast_to_compute_host(self.context, instance['host'],
                'live_migration', update_db=False,
                instance_id=instance['id'], dest=dest,
                block_migration=block_migration)

        self.mox.ReplayAll()
        result = self.driver.schedule_live_migration(self.context,
                instance_id=instance['id'], dest=dest,
                block_migration=block_migration,
                disk_over_commit=disk_over_commit)
        self.assertEqual(result, None)
Ejemplo n.º 26
0
 def _update(self, *args, **kwargs):
     db.volume_update(*args, **kwargs)