Пример #1
0
 def select_hosts(self, context, request_spec, filter_properties):
     """Selects a filtered set of hosts."""
     instance_uuids = request_spec.get('instance_uuids')
     hosts = [
         host.obj.host for host in self._schedule(
             context, request_spec, filter_properties, instance_uuids)
     ]
     if not hosts:
         raise exception.NoValidHost(reason="")
     return hosts
Пример #2
0
    def test_select_hosts_throws_rpc_clientexception(self):
        self.mox.StubOutWithMock(self.manager.driver, 'select_hosts')

        self.manager.driver.select_hosts(self.context, {}, {}).AndRaise(
                exception.NoValidHost(reason=""))

        self.mox.ReplayAll()
        self.assertRaises(rpc_common.ClientException,
                          self.manager.select_hosts,
                          self.context, {}, {})
Пример #3
0
def _get_group_details(context, instance_uuid, user_group_hosts=None):
    """Provide group_hosts and group_policies sets related to instances if
    those instances are belonging to a group and if corresponding filters are
    enabled.

    :param instance_uuid: UUID of the instance to check
    :param user_group_hosts: Hosts from the group or empty set

    :returns: None or namedtuple GroupDetails
    """
    global _SUPPORTS_AFFINITY
    if _SUPPORTS_AFFINITY is None:
        _SUPPORTS_AFFINITY = validate_filter(
            'ServerGroupAffinityFilter')
    global _SUPPORTS_ANTI_AFFINITY
    if _SUPPORTS_ANTI_AFFINITY is None:
        _SUPPORTS_ANTI_AFFINITY = validate_filter(
            'ServerGroupAntiAffinityFilter')
    _supports_server_groups = any((_SUPPORTS_AFFINITY,
                                   _SUPPORTS_ANTI_AFFINITY))
    if not _supports_server_groups or not instance_uuid:
        return

    try:
        group = objects.InstanceGroup.get_by_instance_uuid(context,
                                                           instance_uuid)
    except exception.InstanceGroupNotFound:
        return

    policies = set(('anti-affinity', 'affinity'))
    if any((policy in policies) for policy in group.policies):
        if (not _SUPPORTS_AFFINITY and 'affinity' in group.policies):
            msg = _("ServerGroupAffinityFilter not configured")
            LOG.error(msg)
            raise exception.NoValidHost(reason=msg)
        if (not _SUPPORTS_ANTI_AFFINITY and 'anti-affinity' in group.policies):
            msg = _("ServerGroupAntiAffinityFilter not configured")
            LOG.error(msg)
            raise exception.NoValidHost(reason=msg)
        group_hosts = set(group.get_hosts(context))
        user_hosts = set(user_group_hosts) if user_group_hosts else set()
        return GroupDetails(hosts=user_hosts | group_hosts,
                            policies=group.policies)
Пример #4
0
    def test_select_hosts_throws_rpc_clientexception(self):
        self.mox.StubOutWithMock(self.manager.driver, 'select_destinations')

        self.manager.driver.select_destinations(self.context, {}, {}).AndRaise(
                exception.NoValidHost(reason=""))

        self.mox.ReplayAll()
        self.assertRaises(messaging.ExpectedException,
                          self.manager.select_hosts,
                          self.context, {}, {})
Пример #5
0
 def select_hosts(self, context, request_spec, filter_properties):
     """Selects a set of random hosts."""
     hosts = [
         self._schedule(context, CONF.compute_topic, request_spec,
                        filter_properties)
         for instance_uuid in request_spec.get('instance_uuids', [])
     ]
     if not hosts:
         raise exception.NoValidHost(reason="")
     return hosts
Пример #6
0
    def schedule_run_instance(self, context, request_spec,
                              admin_password, injected_files,
                              requested_networks, is_first_time,
                              filter_properties):
        """This method is called from nova.compute.api to provision
        an instance.  We first create a build plan (a list of WeightedHosts)
        and then provision.

        Returns a list of the instances created.
        """
        instance_uuids = request_spec.get('instance_uuids')
        num_instances = len(instance_uuids)
        LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
                locals())

        payload = dict(request_spec=request_spec)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.start', notifier.INFO, payload)

        weighed_hosts = self._schedule(context, request_spec,
                filter_properties, instance_uuids)

        # NOTE(comstud): Make sure we do not pass this through.  It
        # contains an instance of RpcContext that cannot be serialized.
        filter_properties.pop('context', None)

        for num, instance_uuid in enumerate(instance_uuids):
            request_spec['instance_properties']['launch_index'] = num

            try:
                try:
                    weighed_host = weighed_hosts.pop(0)
                except IndexError:
                    raise exception.NoValidHost(reason="")

                self._provision_resource(context, weighed_host,
                                         request_spec,
                                         filter_properties,
                                         requested_networks,
                                         injected_files, admin_password,
                                         is_first_time,
                                         instance_uuid=instance_uuid)
            except Exception as ex:
                # NOTE(vish): we don't reraise the exception here to make sure
                #             that all instances in the request get set to
                #             error properly
                driver.handle_schedule_error(context, ex, instance_uuid,
                                             request_spec)
            # scrub retry host list in case we're scheduling multiple
            # instances:
            retry = filter_properties.get('retry', {})
            retry['hosts'] = []

        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.end', notifier.INFO, payload)
Пример #7
0
    def _schedule(self, context, topic, spec_obj, instance_uuids):
        """Picks a host that is up at random."""

        elevated = context.elevated()
        hosts = self.hosts_up(elevated, topic)
        if not hosts:
            msg = _("Is the appropriate service running?")
            raise exception.NoValidHost(reason=msg)

        hosts = self._filter_hosts(hosts, spec_obj)
        if not hosts:
            msg = _("Could not find another compute")
            raise exception.NoValidHost(reason=msg)

        num_instances = len(instance_uuids)
        selected_host_lists = []
        # If possible, we'd like to return distinct hosts for each instance.
        # But when there are fewer available hosts than requested instances, we
        # will need to return some duplicates.
        if len(hosts) >= num_instances:
            selected_hosts = random.sample(hosts, num_instances)
        else:
            selected_hosts = [
                random.choice(hosts) for i in range(num_instances)
            ]

        # We can't return dupes as alternates, since alternates are used when
        # building to the selected host fails.
        alts_per_instance = min(len(hosts), CONF.scheduler.max_attempts)
        for sel_host in selected_hosts:
            sel_plus_alts = [sel_host]
            while len(sel_plus_alts) < alts_per_instance:
                candidate = random.choice(hosts)
                if (candidate
                        not in sel_plus_alts) and (candidate
                                                   not in selected_hosts):
                    # We don't want to include a selected host as an alternate,
                    # as it will have a high likelihood of not having enough
                    # resources left after it has an instance built on it.
                    sel_plus_alts.append(candidate)
            selected_host_lists.append(sel_plus_alts)
        return selected_host_lists
Пример #8
0
    def test_create_volume_no_valid_host_puts_volume_in_error(self):
        self._mox_schedule_method_helper('schedule_create_volume')
        self.mox.StubOutWithMock(db, 'volume_update')

        self.manager.driver.schedule_create_volume(self.context, '1', '2',
                None).AndRaise(exception.NoValidHost(reason=''))
        db.volume_update(self.context, '1', {'status': 'error'})

        self.mox.ReplayAll()
        self.assertRaises(exception.NoValidHost, self.manager.create_volume,
                          self.context, '1', '2')
Пример #9
0
    def _check_not_over_max_retries(self, attempted_hosts):
        if CONF.migrate_max_retries == -1:
            return

        retries = len(attempted_hosts) - 1
        if retries > CONF.migrate_max_retries:
            msg = (_('Exceeded max scheduling retries %(max_retries)d for '
                     'instance %(instance_uuid)s during live migration')
                   % {'max_retries': retries,
                      'instance_uuid': self.instance.uuid})
            raise exception.NoValidHost(reason=msg)
Пример #10
0
    def schedule_run_instance(self, context, request_spec, admin_password,
                              injected_files, requested_networks,
                              is_first_time, filter_properties, reservations):
        """This method is called from nova.compute.api to provision
        an instance.  We first create a build plan (a list of WeightedHosts)
        and then provision.

        Returns a list of the instances created.
        """
        elevated = context.elevated()
        num_instances = request_spec.get('num_instances', 1)
        LOG.debug(
            _("Attempting to build %(num_instances)d instance(s)") % locals())

        payload = dict(request_spec=request_spec)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.start', notifier.INFO, payload)

        weighted_hosts = self._schedule(context, "compute", request_spec,
                                        filter_properties)

        if not weighted_hosts:
            raise exception.NoValidHost(reason="")

        # NOTE(comstud): Make sure we do not pass this through.  It
        # contains an instance of RpcContext that cannot be serialized.
        filter_properties.pop('context', None)

        instances = []
        for num in xrange(num_instances):
            if not weighted_hosts:
                break
            weighted_host = weighted_hosts.pop(0)

            request_spec['instance_properties']['launch_index'] = num

            instance = self._provision_resource(elevated, weighted_host,
                                                request_spec, reservations,
                                                filter_properties,
                                                requested_networks,
                                                injected_files, admin_password,
                                                is_first_time)
            # scrub retry host list in case we're scheduling multiple
            # instances:
            retry = filter_properties.get('retry', {})
            retry['hosts'] = []

            if instance:
                instances.append(instance)

        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.end', notifier.INFO, payload)

        return instances
Пример #11
0
    def _cold_migrate(self, context, instance, flavor, filter_properties,
                      reservations):
        image_ref = instance.image_ref
        image = compute_utils.get_image_metadata(
            context, self.image_api, image_ref, instance)

        request_spec = scheduler_utils.build_request_spec(
            context, image, [instance], instance_type=flavor)

        quotas = objects.Quotas.from_reservations(context,
                                                  reservations,
                                                  instance=instance)
        try:
            scheduler_utils.populate_retry(filter_properties, instance['uuid'])
            hosts = self.scheduler_rpcapi.select_destinations(
                    context, request_spec, filter_properties)
            host_state = hosts[0]
        except exception.NoValidHost as ex:
            vm_state = instance['vm_state']
            if not vm_state:
                vm_state = vm_states.ACTIVE
            updates = {'vm_state': vm_state, 'task_state': None}
            self._set_vm_state_and_notify(context, 'migrate_server',
                                          updates, ex, request_spec)
            quotas.rollback()

            msg = _("No valid host found for cold migrate")
            raise exception.NoValidHost(reason=msg)

        try:
            scheduler_utils.populate_filter_properties(filter_properties,
                                                       host_state)
            # context is not serializable
            filter_properties.pop('context', None)

            # TODO(timello): originally, instance_type in request_spec
            # on compute.api.resize does not have 'extra_specs', so we
            # remove it for now to keep tests backward compatibility.
            request_spec['instance_type'].pop('extra_specs')

            (host, node) = (host_state['host'], host_state['nodename'])
            self.compute_rpcapi.prep_resize(
                context, image, instance,
                flavor, host,
                reservations, request_spec=request_spec,
                filter_properties=filter_properties, node=node)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                updates = {'vm_state': instance['vm_state'],
                           'task_state': None}
                self._set_vm_state_and_notify(context, 'migrate_server',
                                              updates, ex, request_spec)
                quotas.rollback()
Пример #12
0
    def test_find_destination_when_runs_out_of_hosts(self):
        self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
        self.mox.StubOutWithMock(self.task.scheduler_client,
                                 'select_destinations')
        scheduler_utils.setup_instance_group(self.context, self.fake_spec)
        self.task.scheduler_client.select_destinations(self.context,
                self.fake_spec, [self.instance.uuid], return_objects=True,
                return_alternates=False).AndRaise(
                exception.NoValidHost(reason=""))

        self.mox.ReplayAll()
        self.assertRaises(exception.NoValidHost, self.task._find_destination)
Пример #13
0
    def _cold_migrate(self, context, instance, flavor, filter_properties,
                      reservations, clean_shutdown, request_spec):
        image = utils.get_image_from_system_metadata(
            instance.system_metadata)

        # NOTE(sbauza): If a reschedule occurs when prep_resize(), then
        # it only provides filter_properties legacy dict back to the
        # conductor with no RequestSpec part of the payload.
        if not request_spec:
            request_spec = objects.RequestSpec.from_components(
                context, instance.uuid, image,
                instance.flavor, instance.numa_topology, instance.pci_requests,
                filter_properties, None, instance.availability_zone)

        task = self._build_cold_migrate_task(context, instance, flavor,
                                             request_spec,
                                             reservations, clean_shutdown)
        # TODO(sbauza): Provide directly the RequestSpec object once
        # _set_vm_state_and_notify() accepts it
        legacy_spec = request_spec.to_legacy_request_spec_dict()
        try:
            task.execute()
        except exception.NoValidHost as ex:
            vm_state = instance.vm_state
            if not vm_state:
                vm_state = vm_states.ACTIVE
            updates = {'vm_state': vm_state, 'task_state': None}
            self._set_vm_state_and_notify(context, instance.uuid,
                                          'migrate_server',
                                          updates, ex, legacy_spec)

            # if the flavor IDs match, it's migrate; otherwise resize
            if flavor.id == instance.instance_type_id:
                msg = _("No valid host found for cold migrate")
            else:
                msg = _("No valid host found for resize")
            raise exception.NoValidHost(reason=msg)
        except exception.UnsupportedPolicyException as ex:
            with excutils.save_and_reraise_exception():
                vm_state = instance.vm_state
                if not vm_state:
                    vm_state = vm_states.ACTIVE
                updates = {'vm_state': vm_state, 'task_state': None}
                self._set_vm_state_and_notify(context, instance.uuid,
                                              'migrate_server',
                                              updates, ex, legacy_spec)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                updates = {'vm_state': instance.vm_state,
                           'task_state': None}
                self._set_vm_state_and_notify(context, instance.uuid,
                                              'migrate_server',
                                              updates, ex, legacy_spec)
Пример #14
0
    def host_maintenance_mode(self, host, mode):
        """Start/Stop host maintenance window. On start, it triggers
        guest VMs evacuation."""
        if mode:
            host_list = [host_ref for host_ref in
                         self._session.call_xenapi('host.get_all') \
                         if host_ref != self._session.get_xenapi_host()]
            migrations_counter = vm_counter = 0
            ctxt = context.get_admin_context()
            for vm_ref, vm_rec in vm_utils.VMHelper.list_vms(self._session):
                for host_ref in host_list:
                    try:
                        # Ensure only guest instances are migrated
                        uuid = vm_rec['other_config'].get('nova_uuid')
                        if not uuid:
                            name = vm_rec['name_label']
                            uuid = _uuid_find(ctxt, host, name)
                            if not uuid:
                                msg = _('Instance %(name)s running on %(host)s'
                                        ' could not be found in the database:'
                                        ' assuming it is a worker VM and skip'
                                        'ping migration to a new host')
                                LOG.info(msg % locals())
                                continue
                        instance = db.instance_get_by_uuid(ctxt, uuid)
                        vm_counter = vm_counter + 1

                        dest = _host_find(ctxt, self._session, host, host_ref)
                        db.instance_update(ctxt, instance.id, {
                            'host': dest,
                            'vm_state': vm_states.MIGRATING
                        })
                        self._session.call_xenapi('VM.pool_migrate', vm_ref,
                                                  host_ref, {})
                        migrations_counter = migrations_counter + 1
                        db.instance_update(ctxt, instance.id,
                                           {'vm_state': vm_states.ACTIVE})
                        break
                    except self.XenAPI.Failure:
                        LOG.exception('Unable to migrate VM %(vm_ref)s'
                                      'from %(host)s' % locals())
                        db.instance_update(ctxt, instance.id, {
                            'host': host,
                            'vm_state': vm_states.ACTIVE
                        })
            if vm_counter == migrations_counter:
                return 'on_maintenance'
            else:
                raise exception.NoValidHost(reason='Unable to find suitable '
                                            'host for VMs evacuation')
        else:
            return 'off_maintenance'
Пример #15
0
    def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
        """Picks a host that is up and has the fewest volumes."""
        elevated = context.elevated()

        volume_ref = db.volume_get(context, volume_id)
        availability_zone = volume_ref.get('availability_zone')

        zone, host = None, None
        if availability_zone:
            zone, _x, host = availability_zone.partition(':')
        if host and context.is_admin:
            service = db.service_get_by_args(elevated, host, 'nova-volume')
            if not utils.service_is_up(service):
                raise exception.WillNotSchedule(host=host)
            driver.cast_to_volume_host(context,
                                       host,
                                       'create_volume',
                                       volume_id=volume_id,
                                       **_kwargs)
            return None

        results = db.service_get_all_volume_sorted(elevated)
        if zone:
            results = [(service, gigs) for (service, gigs) in results
                       if service['availability_zone'] == zone]
        for result in results:
            (service, volume_gigabytes) = result
            if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
                msg = _("Not enough allocatable volume gigabytes remaining")
                raise exception.NoValidHost(reason=msg)
            if utils.service_is_up(service) and not service['disabled']:
                driver.cast_to_volume_host(context,
                                           service['host'],
                                           'create_volume',
                                           volume_id=volume_id,
                                           **_kwargs)
                return None
        msg = _("Is the appropriate service running?")
        raise exception.NoValidHost(reason=msg)
Пример #16
0
    def _schedule_instance(self, context, instance_opts, *_args, **_kwargs):
        """Picks a host that is up and has the fewest running instances."""
        elevated = context.elevated()

        availability_zone = instance_opts.get('availability_zone')

        zone, host = FLAGS.default_schedule_zone, None
        if availability_zone:
            zone, _x, host = availability_zone.partition(':')

        if host and context.is_admin:
            service = db.service_get_by_args(elevated, host, 'nova-compute')
            if not utils.service_is_up(service):
                raise exception.WillNotSchedule(host=host)
            return host

        results = db.service_get_all_compute_sorted(elevated)
        in_isolation = instance_opts['image_ref'] in FLAGS.isolated_images
        check_cores = not in_isolation or not FLAGS.skip_isolated_core_check
        if zone:
            results = [(service, cores) for (service, cores) in results
                       if service['availability_zone'] == zone]
        for result in results:
            (service, instance_cores) = result
            if in_isolation and service['host'] not in FLAGS.isolated_hosts:
                # isloated images run on isolated hosts
                continue
            if service['host'] in FLAGS.isolated_hosts and not in_isolation:
                # images that aren't isolated only run on general hosts
                continue
            if (check_cores and
                    instance_cores + instance_opts['vcpus'] > FLAGS.max_cores):
                msg = _("Not enough allocatable CPU cores remaining")
                raise exception.NoValidHost(reason=msg)
            if utils.service_is_up(service) and not service['disabled']:
                return service['host']
        msg = _("Is the appropriate service running?")
        raise exception.NoValidHost(reason=msg)
Пример #17
0
    def schedule_run_instance(self, context, request_spec, *args, **kwargs):
        """This method is called from nova.compute.api to provision
        an instance. However we need to look at the parameters being
        passed in to see if this is a request to:
        1. Create build plan (a list of WeightedHosts) and then provision, or
        2. Use the WeightedHost information in the request parameters
           to simply create the instance (either in this zone or
           a child zone).

        returns a list of the instances created.
        """

        elevated = context.elevated()
        num_instances = request_spec.get('num_instances', 1)
        LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
                locals())

        weighted_hosts = []

        # Having a 'blob' hint means we've already provided a build plan.
        # We need to turn this back into a WeightedHost object.
        blob = request_spec.get('blob', None)
        if blob:
            weighted_hosts.append(self._make_weighted_host_from_blob(blob))
        else:
            # No plan ... better make one.
            weighted_hosts = self._schedule(elevated, "compute", request_spec,
                                        *args, **kwargs)

        if not weighted_hosts:
            raise exception.NoValidHost(reason=_(""))

        instances = []
        for num in xrange(num_instances):
            if not weighted_hosts:
                break
            weighted_host = weighted_hosts.pop(0)

            instance = None
            if weighted_host.host:
                instance = self._provision_resource_locally(elevated,
                                        weighted_host, request_spec, kwargs)
            else:
                instance = self._ask_child_zone_to_create_instance(elevated,
                                        weighted_host, request_spec, kwargs)

            if instance:
                instances.append(instance)

        return instances
Пример #18
0
    def select_destinations(self,
                            ctxt,
                            request_spec=None,
                            filter_properties=None,
                            spec_obj=_sentinel,
                            instance_uuids=None):
        """Returns destinations(s) best suited for this RequestSpec.

        The result should be a list of dicts with 'host', 'nodename' and
        'limits' as keys.
        """
        LOG.debug("Starting to schedule for instances: %s", instance_uuids)

        # TODO(sbauza): Change the method signature to only accept a spec_obj
        # argument once API v5 is provided.
        if spec_obj is self._sentinel:
            spec_obj = objects.RequestSpec.from_primitives(
                ctxt, request_spec, filter_properties)
        resources = utils.resources_from_request_spec(spec_obj)
        alloc_reqs_by_rp_uuid, provider_summaries = None, None
        if self.driver.USES_ALLOCATION_CANDIDATES:
            res = self.placement_client.get_allocation_candidates(resources)
            if res is None:
                # We have to handle the case that we failed to connect to the
                # Placement service and the safe_connect decorator on
                # get_allocation_candidates returns None.
                alloc_reqs, provider_summaries = None, None
            else:
                alloc_reqs, provider_summaries = res
            if not alloc_reqs:
                LOG.debug("Got no allocation candidates from the Placement "
                          "API. This may be a temporary occurrence as compute "
                          "nodes start up and begin reporting inventory to "
                          "the Placement service.")
                raise exception.NoValidHost(reason="")
            else:
                # Build a dict of lists of allocation requests, keyed by
                # provider UUID, so that when we attempt to claim resources for
                # a host, we can grab an allocation request easily
                alloc_reqs_by_rp_uuid = collections.defaultdict(list)
                for ar in alloc_reqs:
                    for rr in ar['allocations']:
                        rp_uuid = rr['resource_provider']['uuid']
                        alloc_reqs_by_rp_uuid[rp_uuid].append(ar)

        dests = self.driver.select_destinations(ctxt, spec_obj, instance_uuids,
                                                alloc_reqs_by_rp_uuid,
                                                provider_summaries)
        dest_dicts = [_host_state_obj_to_dict(d) for d in dests]
        return jsonutils.to_primitive(dest_dicts)
Пример #19
0
    def select_destinations(self, context, request_spec, filter_properties):
        """Selects a filtered set of hosts and nodes."""
        num_instances = request_spec['num_instances']
        instance_uuids = request_spec.get('instance_uuids')
        selected_hosts = self._schedule(context, request_spec,
                                        filter_properties, instance_uuids)

        # Couldn't fulfill the request_spec
        if len(selected_hosts) < num_instances:
            raise exception.NoValidHost(reason='')

        dests = [dict(host=host.obj.host, nodename=host.obj.nodename,
                      limits=host.obj.limits) for host in selected_hosts]
        return dests
Пример #20
0
    def test_find_destination_when_runs_out_of_hosts(self):
        self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
        self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
        self.mox.StubOutWithMock(self.task.scheduler_client,
                                 'select_destinations')
        utils.get_image_from_system_metadata(
            self.instance.system_metadata).AndReturn("image")
        scheduler_utils.setup_instance_group(self.context, self.fake_spec)
        self.task.scheduler_client.select_destinations(
            self.context, self.fake_spec,
            [self.instance.uuid]).AndRaise(exception.NoValidHost(reason=""))

        self.mox.ReplayAll()
        self.assertRaises(exception.NoValidHost, self.task._find_destination)
Пример #21
0
    def select_destinations(self, context, request_spec, filter_properties):
        try:
            return super(HuaweiFilterScheduler,
                         self).select_destinations(context, request_spec,
                                                   filter_properties)
        except exception.NoValidHost:
            reason = ''

            if '__tracker' in filter_properties:
                LOG.debug('generate trace before raise')
                tracker = filter_properties.pop('__tracker')
                reason = ('Filter traceback: %s' % tracker.to_string())

            raise exception.NoValidHost(reason=reason)
Пример #22
0
    def test_migrate_live_unexpected_error(self):
        exc = exception.NoValidHost(reason="No valid host found")
        self.mox.StubOutWithMock(self.compute_api, 'live_migrate')
        instance = self._stub_instance_get()
        self.compute_api.live_migrate(self.context, instance, None,
                                      self.disk_over_commit, 'hostname',
                                      self.force, self.async).AndRaise(exc)

        self.mox.ReplayAll()
        body = {'os-migrateLive':
                {'host': 'hostname', 'block_migration': 'auto'}}

        self.assertRaises(webob.exc.HTTPInternalServerError,
                          self.controller._migrate_live,
                          self.req, instance.uuid, body=body)
Пример #23
0
    def select_destinations(self, context, request_spec, filter_properties):
        """Selects random destinations."""
        num_instances = request_spec['num_instances']
        # NOTE(timello): Returns a list of dicts with 'host', 'nodename' and
        # 'limits' as keys for compatibility with filter_scheduler.
        dests = []
        for i in range(num_instances):
            host = self._schedule(context, CONF.compute_topic, request_spec,
                                  filter_properties)
            host_state = dict(host=host, nodename=None, limits=None)
            dests.append(host_state)

        if len(dests) < num_instances:
            raise exception.NoValidHost(reason='')
        return dests
Пример #24
0
    def select_destinations(self, context, spec_obj):
        """Selects random destinations."""
        num_instances = spec_obj.num_instances
        # NOTE(timello): Returns a list of dicts with 'host', 'nodename' and
        # 'limits' as keys for compatibility with filter_scheduler.
        dests = []
        for i in range(num_instances):
            host = self._schedule(context, CONF.compute_topic, spec_obj)
            host_state = dict(host=host, nodename=None, limits=None)
            dests.append(host_state)

        if len(dests) < num_instances:
            reason = _('There are not enough hosts available.')
            raise exception.NoValidHost(reason=reason)
        return dests
Пример #25
0
    def select_destinations(self, ctxt, request_spec, filter_properties):
        self.host_manager.reload_aggregates(ctxt)
        availability_zone = self.host_manager.get_availability_zone(
            request_spec[u'instance_properties'][u'availability_zone'])

        for site in availability_zone.valid_sites:
            site.prepare_for_instance(request_spec, filter_properties)
            return [{
                'host': site.name,
                'nodename': site.get_nodes()[0].hypervisor_hostname,
                'limits': None,
            }]
        else:
            raise exception.NoValidHost(
                "No sites match requested availability zone")
Пример #26
0
def get_host_ref(session, cluster=None):
    """Get reference to a host within the cluster specified."""
    if cluster is None:
        results = session._call_method(vim_util, "get_objects", "HostSystem")
        _cancel_retrieve_if_necessary(session, results)
        host_mor = results.objects[0].obj
    else:
        host_ret = session._call_method(vim_util, "get_dynamic_property",
                                        cluster, "ClusterComputeResource",
                                        "host")
        if not host_ret or not host_ret.ManagedObjectReference:
            msg = _('No host available on cluster')
            raise exception.NoValidHost(reason=msg)
        host_mor = host_ret.ManagedObjectReference[0]

    return host_mor
Пример #27
0
    def test_find_destination_when_runs_out_of_hosts(self):
        self.mox.StubOutWithMock(self.task.image_service, 'show')
        self.mox.StubOutWithMock(flavors, 'extract_flavor')
        self.mox.StubOutWithMock(self.task.scheduler_rpcapi, 'select_hosts')
        self.mox.StubOutWithMock(self.task,
                '_check_compatible_with_source_hypervisor')
        self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')

        self.task.image_service.show(self.context,
                                     self.instance_image).AndReturn("image")
        flavors.extract_flavor(self.instance).AndReturn("inst_type")
        self.task.scheduler_rpcapi.select_hosts(self.context, mox.IgnoreArg(),
                mox.IgnoreArg()).AndRaise(exception.NoValidHost(reason=""))

        self.mox.ReplayAll()
        self.assertRaises(exception.NoValidHost, self.task._find_destination)
Пример #28
0
    def test_find_destination_when_runs_out_of_hosts(self):
        self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
        self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
        self.mox.StubOutWithMock(self.task.scheduler_rpcapi,
                                 'select_destinations')
        compute_utils.get_image_metadata(self.context, self.task.image_service,
                                         self.instance_image,
                                         self.instance).AndReturn("image")
        scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
                                           mox.IgnoreArg()).AndReturn({})
        self.task.scheduler_rpcapi.select_destinations(
            self.context, mox.IgnoreArg(),
            mox.IgnoreArg()).AndRaise(exception.NoValidHost(reason=""))

        self.mox.ReplayAll()
        self.assertRaises(exception.NoValidHost, self.task._find_destination)
Пример #29
0
    def select_destinations(self, context, request_spec, filter_properties):
        """Selects a filtered set of hosts and nodes."""
        # TODO(sbauza): Change the select_destinations method to accept a
        # RequestSpec object directly (and add a new RPC API method for passing
        # a RequestSpec object over the wire)
        spec_obj = objects.RequestSpec.from_primitives(context, request_spec,
                                                       filter_properties)
        self.notifier.info(
            context, 'scheduler.select_destinations.start',
            dict(request_spec=spec_obj.to_legacy_request_spec_dict()))

        num_instances = spec_obj.num_instances
        selected_hosts = self._schedule(context, spec_obj)

        # Couldn't fulfill the request_spec
        if len(selected_hosts) < num_instances:
            # NOTE(Rui Chen): If multiple creates failed, set the updated time
            # of selected HostState to None so that these HostStates are
            # refreshed according to database in next schedule, and release
            # the resource consumed by instance in the process of selecting
            # host.
            for host in selected_hosts:
                host.obj.updated = None

            # Log the details but don't put those into the reason since
            # we don't want to give away too much information about our
            # actual environment.
            LOG.debug(
                'There are %(hosts)d hosts available but '
                '%(num_instances)d instances requested to build.', {
                    'hosts': len(selected_hosts),
                    'num_instances': num_instances
                })

            reason = _('There are not enough hosts available.')
            raise exception.NoValidHost(reason=reason)

        dests = [
            dict(host=host.obj.host,
                 nodename=host.obj.nodename,
                 limits=host.obj.limits) for host in selected_hosts
        ]

        self.notifier.info(
            context, 'scheduler.select_destinations.end',
            dict(request_spec=spec_obj.to_legacy_request_spec_dict()))
        return dests
Пример #30
0
    def test_handle_schedule_error_adds_instance_fault(self):
        instance = {'uuid': 'fake-uuid'}
        self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
        self.mox.StubOutWithMock(db, 'instance_fault_create')
        self.mox.StubOutWithMock(notifier, 'notify')
        db.instance_update_and_get_original(self.context, instance['uuid'],
                                            mox.IgnoreArg()).AndReturn(
                                                (None, instance))
        db.instance_fault_create(self.context, mox.IgnoreArg())
        notifier.notify(self.context, mox.IgnoreArg(),
                        'scheduler.run_instance', notifier.ERROR,
                        mox.IgnoreArg())
        self.mox.ReplayAll()

        driver.handle_schedule_error(self.context,
                                     exception.NoValidHost('test'),
                                     instance['uuid'], {})