示例#1
0
    def _schedule(self, context, topic, request_spec, filter_properties):
        """Picks a host that is up at random."""

        elevated = context.elevated()
        hosts = self.hosts_up(elevated, topic)
        if not hosts:
            msg = _("Is the appropriate service running?")
            raise exception.NoValidHost(reason=msg)

        hosts = self._filter_hosts(request_spec, hosts, filter_properties)
        if not hosts:
            msg = _("Could not find another compute")
            raise exception.NoValidHost(reason=msg)

        return random.choice(hosts)
示例#2
0
    def select_destinations(self, context, request_spec, filter_properties):
        """Selects a filtered set of hosts and nodes."""
        self.notifier.info(context, 'scheduler.select_destinations.start',
                           dict(request_spec=request_spec))

        num_instances = request_spec['num_instances']
        selected_hosts = self._schedule(context, request_spec,
                                        filter_properties)

        # Couldn't fulfill the request_spec
        if len(selected_hosts) < num_instances:
            # Log the details but don't put those into the reason since
            # we don't want to give away too much information about our
            # actual environment.
            LOG.debug(
                'There are %(hosts)d hosts available but '
                '%(num_instances)d instances requested to build.', {
                    'hosts': len(selected_hosts),
                    'num_instances': num_instances
                })

            reason = _('There are not enough hosts available.')
            raise exception.NoValidHost(reason=reason)

        dests = [
            dict(host=host.obj.host,
                 nodename=host.obj.nodename,
                 limits=host.obj.limits) for host in selected_hosts
        ]

        self.notifier.info(context, 'scheduler.select_destinations.end',
                           dict(request_spec=request_spec))
        return dests
示例#3
0
def populate_retry(filter_properties, instance_uuid):
    max_attempts = _max_attempts()
    force_hosts = filter_properties.get('force_hosts', [])
    force_nodes = filter_properties.get('force_nodes', [])

    # In the case of multiple force hosts/nodes, scheduler should not
    # disable retry filter but traverse all force hosts/nodes one by
    # one till scheduler gets a valid target host.
    if (max_attempts == 1 or len(force_hosts) == 1 or len(force_nodes) == 1):
        # re-scheduling is disabled.
        return

    # retry is enabled, update attempt count:
    retry = filter_properties.setdefault(
        'retry',
        {
            'num_attempts': 0,
            'hosts': []  # list of compute hosts tried
        })
    retry['num_attempts'] += 1

    _log_compute_error(instance_uuid, retry)
    exc = retry.pop('exc', None)

    if retry['num_attempts'] > max_attempts:
        msg = (_('Exceeded max scheduling attempts %(max_attempts)d '
                 'for instance %(instance_uuid)s. '
                 'Last exception: %(exc)s') % {
                     'max_attempts': max_attempts,
                     'instance_uuid': instance_uuid,
                     'exc': exc
                 })
        raise exception.NoValidHost(reason=msg)
示例#4
0
    def test_setup_instance_group_with_filter_not_configured(self, mock_ggd):
        mock_ggd.side_effect = exception.NoValidHost(reason='whatever')
        spec = {'instance_properties': {'uuid': 'fake-uuid'}}
        filter_props = {'group_hosts': ['hostC']}

        self.assertRaises(exception.NoValidHost,
                          scheduler_utils.setup_instance_group, self.context,
                          spec, filter_props)
示例#5
0
    def _check_not_over_max_retries(self, attempted_hosts):
        if CONF.migrate_max_retries == -1:
            return

        retries = len(attempted_hosts) - 1
        if retries > CONF.migrate_max_retries:
            msg = (_('Exceeded max scheduling retries %(max_retries)d for '
                     'instance %(instance_uuid)s during live migration') % {
                         'max_retries': retries,
                         'instance_uuid': self.instance.uuid
                     })
            raise exception.NoValidHost(reason=msg)
示例#6
0
    def select_destinations(self, context, request_spec, filter_properties):
        """Selects random destinations."""
        num_instances = request_spec['num_instances']
        # NOTE(timello): Returns a list of dicts with 'host', 'nodename' and
        # 'limits' as keys for compatibility with filter_scheduler.
        dests = []
        for i in range(num_instances):
            host = self._schedule(context, CONF.compute_topic,
                    request_spec, filter_properties)
            host_state = dict(host=host, nodename=None, limits=None)
            dests.append(host_state)

        if len(dests) < num_instances:
            raise exception.NoValidHost(reason='')
        return dests
示例#7
0
    def test_find_destination_when_runs_out_of_hosts(self):
        self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
        self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
        self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
        self.mox.StubOutWithMock(self.task.scheduler_client,
                                 'select_destinations')
        compute_utils.get_image_metadata(self.context, self.task.image_api,
                                         self.instance_image,
                                         self.instance).AndReturn("image")
        scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
                                           mox.IgnoreArg()).AndReturn({})
        scheduler_utils.setup_instance_group(
            self.context, {}, {'ignore_hosts': [self.instance_host]})
        self.task.scheduler_client.select_destinations(
            self.context, mox.IgnoreArg(),
            mox.IgnoreArg()).AndRaise(exception.NoValidHost(reason=""))

        self.mox.ReplayAll()
        self.assertRaises(exception.NoValidHost, self.task._find_destination)
示例#8
0
def _host_find(context, session, src_aggregate, host_ref):
    """Return the host from the xenapi host reference.

    :param src_aggregate: the aggregate that the compute host being put in
                          maintenance (source of VMs) belongs to
    :param host_ref: the hypervisor host reference (destination of VMs)

    :return: the compute host that manages host_ref
    """
    # NOTE: this would be a lot simpler if patron-compute stored
    # CONF.host in the XenServer host's other-config map.
    # TODO(armando-migliaccio): improve according the note above
    uuid = session.host.get_uuid(host_ref)
    for compute_host, host_uuid in src_aggregate.metadetails.iteritems():
        if host_uuid == uuid:
            return compute_host
    raise exception.NoValidHost(reason='Host %(host_uuid)s could not be found '
                                'from aggregate metadata: %(metadata)s.' % {
                                    'host_uuid': uuid,
                                    'metadata': src_aggregate.metadetails
                                })
示例#9
0
    def _cold_migrate(self, context, instance, flavor, filter_properties,
                      reservations, clean_shutdown):
        image_ref = instance.image_ref
        image = compute_utils.get_image_metadata(
            context, self.image_api, image_ref, instance)

        request_spec = scheduler_utils.build_request_spec(
            context, image, [instance], instance_type=flavor)

        quotas = objects.Quotas.from_reservations(context,
                                                  reservations,
                                                  instance=instance)
        try:
            scheduler_utils.setup_instance_group(context, request_spec,
                                                 filter_properties)
            scheduler_utils.populate_retry(filter_properties, instance['uuid'])
            hosts = self.scheduler_client.select_destinations(
                    context, request_spec, filter_properties)
            host_state = hosts[0]
        except exception.NoValidHost as ex:
            vm_state = instance.vm_state
            if not vm_state:
                vm_state = vm_states.ACTIVE
            updates = {'vm_state': vm_state, 'task_state': None}
            self._set_vm_state_and_notify(context, instance.uuid,
                                          'migrate_server',
                                          updates, ex, request_spec)
            quotas.rollback()

            # if the flavor IDs match, it's migrate; otherwise resize
            if flavor['id'] == instance['instance_type_id']:
                msg = _("No valid host found for cold migrate")
            else:
                msg = _("No valid host found for resize")
            raise exception.NoValidHost(reason=msg)
        except exception.UnsupportedPolicyException as ex:
            with excutils.save_and_reraise_exception():
                vm_state = instance.vm_state
                if not vm_state:
                    vm_state = vm_states.ACTIVE
                updates = {'vm_state': vm_state, 'task_state': None}
                self._set_vm_state_and_notify(context, instance.uuid,
                                              'migrate_server',
                                              updates, ex, request_spec)
                quotas.rollback()

        try:
            scheduler_utils.populate_filter_properties(filter_properties,
                                                       host_state)
            # context is not serializable
            filter_properties.pop('context', None)

            (host, node) = (host_state['host'], host_state['nodename'])
            self.compute_rpcapi.prep_resize(
                context, image, instance,
                flavor, host,
                reservations, request_spec=request_spec,
                filter_properties=filter_properties, node=node,
                clean_shutdown=clean_shutdown)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                updates = {'vm_state': instance.vm_state,
                           'task_state': None}
                self._set_vm_state_and_notify(context, instance.uuid,
                                              'migrate_server',
                                              updates, ex, request_spec)
                quotas.rollback()
示例#10
0
 def test_migrate_live_no_valid_host(self):
     self._test_migrate_live_failed_with_exception(
         exception.NoValidHost(reason=''))
示例#11
0
    def host_maintenance_mode(self, host, mode):
        """Start/Stop host maintenance window. On start, it triggers
        guest VMs evacuation.
        """
        if not mode:
            return 'off_maintenance'
        host_list = [
            host_ref for host_ref in self._session.host.get_all()
            if host_ref != self._session.host_ref
        ]
        migrations_counter = vm_counter = 0
        ctxt = context.get_admin_context()
        for vm_ref, vm_rec in vm_utils.list_vms(self._session):
            for host_ref in host_list:
                try:
                    # Ensure only guest instances are migrated
                    uuid = vm_rec['other_config'].get('patron_uuid')
                    if not uuid:
                        name = vm_rec['name_label']
                        uuid = _uuid_find(ctxt, host, name)
                        if not uuid:
                            LOG.info(
                                _LI('Instance %(name)s running on '
                                    '%(host)s could not be found in '
                                    'the database: assuming it is a '
                                    'worker VM and skip ping migration '
                                    'to a new host'), {
                                        'name': name,
                                        'host': host
                                    })
                            continue
                    instance = objects.Instance.get_by_uuid(ctxt, uuid)
                    vm_counter = vm_counter + 1

                    aggregate = objects.AggregateList.get_by_host(
                        ctxt, host, key=pool_states.POOL_FLAG)
                    if not aggregate:
                        msg = _('Aggregate for host %(host)s count not be'
                                ' found.') % dict(host=host)
                        raise exception.NotFound(msg)

                    dest = _host_find(ctxt, self._session, aggregate[0],
                                      host_ref)
                    instance.host = dest
                    instance.task_state = task_states.MIGRATING
                    instance.save()

                    self._session.VM.pool_migrate(vm_ref, host_ref,
                                                  {"live": "true"})
                    migrations_counter = migrations_counter + 1

                    instance.vm_state = vm_states.ACTIVE
                    instance.save()

                    break
                except self._session.XenAPI.Failure:
                    LOG.exception(
                        _LE('Unable to migrate VM %(vm_ref)s '
                            'from %(host)s'), {
                                'vm_ref': vm_ref,
                                'host': host
                            })
                    instance.host = host
                    instance.vm_state = vm_states.ACTIVE
                    instance.save()

        if vm_counter == migrations_counter:
            return 'on_maintenance'
        else:
            raise exception.NoValidHost(reason='Unable to find suitable '
                                        'host for VMs evacuation')