コード例 #1
0
    def _find_destination(self):
        # TODO(johngarbutt) this retry loop should be shared
        attempted_hosts = [self.source]
        request_spec = self._get_request_spec_for_select_destinations(
            attempted_hosts)

        host = None
        while host is None:
            self._check_not_over_max_retries(attempted_hosts)
            request_spec.ignore_hosts = attempted_hosts
            try:
                hoststate = self.scheduler_client.select_destinations(
                    self.context, request_spec, [self.instance.uuid])[0]
                host = hoststate['host']
            except messaging.RemoteError as ex:
                # TODO(ShaoHe Feng) There maybe multi-scheduler, and the
                # scheduling algorithm is R-R, we can let other scheduler try.
                # Note(ShaoHe Feng) There are types of RemoteError, such as
                # NoSuchMethod, UnsupportedVersion, we can distinguish it by
                # ex.exc_type.
                raise exception.MigrationSchedulerRPCError(
                    reason=six.text_type(ex))
            try:
                self._check_compatible_with_source_hypervisor(host)
                self._call_livem_checks_on_host(host)
            except (exception.Invalid, exception.MigrationPreCheckError) as e:
                LOG.debug("Skipping host: %(host)s because: %(e)s",
                    {"host": host, "e": e})
                attempted_hosts.append(host)
                # The scheduler would have created allocations against the
                # selected destination host in Placement, so we need to remove
                # those before moving on.
                self._remove_host_allocations(host, hoststate['nodename'])
                host = None
        return host, hoststate['nodename']
コード例 #2
0
    def _find_destination(self):
        # TODO(johngarbutt) this retry loop should be shared
        attempted_hosts = [self.source]
        request_spec = self._get_request_spec_for_select_destinations(
            attempted_hosts)

        host = None
        while host is None:
            self._check_not_over_max_retries(attempted_hosts)
            request_spec.ignore_hosts = attempted_hosts
            try:
                selection_lists = self.query_client.select_destinations(
                    self.context,
                    request_spec, [self.instance.uuid],
                    return_objects=True,
                    return_alternates=False)
                # We only need the first item in the first list, as there is
                # only one instance, and we don't care about any alternates.
                selection = selection_lists[0][0]
                host = selection.service_host
            except messaging.RemoteError as ex:
                # TODO(ShaoHe Feng) There maybe multi-scheduler, and the
                # scheduling algorithm is R-R, we can let other scheduler try.
                # Note(ShaoHe Feng) There are types of RemoteError, such as
                # NoSuchMethod, UnsupportedVersion, we can distinguish it by
                # ex.exc_type.
                raise exception.MigrationSchedulerRPCError(
                    reason=six.text_type(ex))

            scheduler_utils.fill_provider_mapping(request_spec, selection)

            provider_mapping = request_spec.get_request_group_mapping()

            if provider_mapping:
                # NOTE(gibi): this call might update the pci_requests of the
                # instance based on the destination host if so then such change
                # will be persisted when post_live_migration_at_destination
                # runs.
                compute_utils.\
                    update_pci_request_spec_with_allocated_interface_name(
                        self.context, self.report_client, self.instance,
                        provider_mapping)
            try:
                self._check_compatible_with_source_hypervisor(host)
                self._call_livem_checks_on_host(host, provider_mapping)
            except (exception.Invalid, exception.MigrationPreCheckError) as e:
                LOG.debug("Skipping host: %(host)s because: %(e)s", {
                    "host": host,
                    "e": e
                })
                attempted_hosts.append(host)
                # The scheduler would have created allocations against the
                # selected destination host in Placement, so we need to remove
                # those before moving on.
                self._remove_host_allocations(selection.compute_node_uuid)
                host = None
        # TODO(artom) We should probably just return the whole selection object
        # at this point.
        return (selection.service_host, selection.nodename, selection.limits)
コード例 #3
0
    def _find_destination(self):
        # TODO(johngarbutt) this retry loop should be shared
        attempted_hosts = [self.source]
        image = utils.get_image_from_system_metadata(
            self.instance.system_metadata)
        filter_properties = {'ignore_hosts': attempted_hosts}
        # TODO(sbauza): Remove that once setup_instance_group() accepts a
        # RequestSpec object
        request_spec = {'instance_properties': {'uuid': self.instance.uuid}}
        scheduler_utils.setup_instance_group(self.context, request_spec,
                                                 filter_properties)
        if not self.request_spec:
            # NOTE(sbauza): We were unable to find an original RequestSpec
            # object - probably because the instance is old.
            # We need to mock that the old way
            request_spec = objects.RequestSpec.from_components(
                self.context, self.instance.uuid, image,
                self.instance.flavor, self.instance.numa_topology,
                self.instance.pci_requests,
                filter_properties, None, self.instance.availability_zone
            )
        else:
            request_spec = self.request_spec
            # NOTE(sbauza): Force_hosts/nodes needs to be reset
            # if we want to make sure that the next destination
            # is not forced to be the original host
            request_spec.reset_forced_destinations()

        host = None
        while host is None:
            self._check_not_over_max_retries(attempted_hosts)
            request_spec.ignore_hosts = attempted_hosts
            try:
                host = self.scheduler_client.select_destinations(self.context,
                        request_spec, [self.instance.uuid])[0]['host']
            except messaging.RemoteError as ex:
                # TODO(ShaoHe Feng) There maybe multi-scheduler, and the
                # scheduling algorithm is R-R, we can let other scheduler try.
                # Note(ShaoHe Feng) There are types of RemoteError, such as
                # NoSuchMethod, UnsupportedVersion, we can distinguish it by
                # ex.exc_type.
                raise exception.MigrationSchedulerRPCError(
                    reason=six.text_type(ex))
            try:
                self._check_compatible_with_source_hypervisor(host)
                self._call_livem_checks_on_host(host)
            except (exception.Invalid, exception.MigrationPreCheckError) as e:
                LOG.debug("Skipping host: %(host)s because: %(e)s",
                    {"host": host, "e": e})
                attempted_hosts.append(host)
                host = None
        return host
コード例 #4
0
ファイル: live_migrate.py プロジェクト: fp314/for_openstack
    def _find_destination(self):
        # TODO(johngarbutt) this retry loop should be shared
        attempted_hosts = [self.source]
        image = utils.get_image_from_system_metadata(
            self.instance.system_metadata)
        filter_properties = {'ignore_hosts': attempted_hosts}
        if not self.request_spec:
            # NOTE(sbauza): We were unable to find an original RequestSpec
            # object - probably because the instance is old.
            # We need to mock that the old way
            request_spec = objects.RequestSpec.from_components(
                self.context, self.instance.uuid, image, self.instance.flavor,
                self.instance.numa_topology, self.instance.pci_requests,
                filter_properties, None, self.instance.availability_zone)
        else:
            request_spec = self.request_spec
            # NOTE(sbauza): Force_hosts/nodes needs to be reset
            # if we want to make sure that the next destination
            # is not forced to be the original host
            request_spec.reset_forced_destinations()
        scheduler_utils.setup_instance_group(self.context, request_spec)

        # We currently only support live migrating to hosts in the same
        # cell that the instance lives in, so we need to tell the scheduler
        # to limit the applicable hosts based on cell.
        cell_mapping = self._get_source_cell_mapping()
        LOG.debug('Requesting cell %(cell)s while live migrating',
                  {'cell': cell_mapping.identity},
                  instance=self.instance)
        if ('requested_destination' in request_spec
                and request_spec.requested_destination):
            request_spec.requested_destination.cell = cell_mapping
        else:
            request_spec.requested_destination = objects.Destination(
                cell=cell_mapping)

        request_spec.ensure_project_id(self.instance)
        host = None
        while host is None:
            self._check_not_over_max_retries(attempted_hosts)
            request_spec.ignore_hosts = attempted_hosts
            try:
                hoststate = self.scheduler_client.select_destinations(
                    self.context, request_spec, [self.instance.uuid])[0]
                host = hoststate['host']
            except messaging.RemoteError as ex:
                # TODO(ShaoHe Feng) There maybe multi-scheduler, and the
                # scheduling algorithm is R-R, we can let other scheduler try.
                # Note(ShaoHe Feng) There are types of RemoteError, such as
                # NoSuchMethod, UnsupportedVersion, we can distinguish it by
                # ex.exc_type.
                raise exception.MigrationSchedulerRPCError(
                    reason=six.text_type(ex))
            try:
                self._check_compatible_with_source_hypervisor(host)
                self._call_livem_checks_on_host(host)
            except (exception.Invalid, exception.MigrationPreCheckError) as e:
                LOG.debug("Skipping host: %(host)s because: %(e)s", {
                    "host": host,
                    "e": e
                })
                attempted_hosts.append(host)
                # The scheduler would have created allocations against the
                # selected destination host in Placement, so we need to remove
                # those before moving on.
                self._remove_host_allocations(host, hoststate['nodename'])
                host = None
        return host
コード例 #5
0
ファイル: live_migrate.py プロジェクト: ychen2u/stx-nova
    def _find_destination(self):
        # TODO(johngarbutt) this retry loop should be shared
        attempted_hosts = [self.source]
        image = utils.get_image_from_system_metadata(
            self.instance.system_metadata)
        filter_properties = {'ignore_hosts': attempted_hosts}
        if not self.request_spec:
            # NOTE(sbauza): We were unable to find an original RequestSpec
            # object - probably because the instance is old.
            # We need to mock that the old way

            # WRS: these hints are needed by the vcpu filter
            hints = filter_properties.get('scheduler_hints', {})
            hints['task_state'] = self.instance.task_state or ""
            hints['host'] = self.instance.host or ""
            hints['node'] = self.instance.node or ""
            filter_properties['scheduler_hints'] = hints

            request_spec = objects.RequestSpec.from_components(
                self.context, self.instance.uuid, image, self.instance.flavor,
                self.instance.numa_topology, self.instance.pci_requests,
                filter_properties, None, self.instance.availability_zone)
        else:
            request_spec = self.request_spec
            # WRS: these hints are needed by the vcpu filter
            hints = dict()
            hints['task_state'] = [self.instance.task_state or ""]
            hints['host'] = [self.instance.host or ""]
            hints['node'] = [self.instance.node or ""]
            if request_spec.obj_attr_is_set('scheduler_hints') and \
                request_spec.scheduler_hints:
                request_spec.scheduler_hints.update(hints)
            else:
                request_spec.scheduler_hints = hints

            # NOTE(sbauza): Force_hosts/nodes needs to be reset
            # if we want to make sure that the next destination
            # is not forced to be the original host
            request_spec.reset_forced_destinations()

            # WRS: The request_spec has stale flavor, so this field must be
            # updated. This occurs when we do a live-migration after a resize.
            request_spec.flavor = self.instance.flavor

            # WRS: The request_spec has stale instance_group information.
            # Update from db to get latest members and metadetails.
            if hasattr(request_spec, 'instance_group') and \
                       request_spec.instance_group:
                request_spec.instance_group = \
                    objects.InstanceGroup.get_by_instance_uuid(
                           self.context, self.instance.uuid)

                # WRS: add hosts to Server group host list for group members
                # that are migrating in progress
                metadetails = request_spec.instance_group['metadetails']
                is_best_effort = strutils.bool_from_string(
                    metadetails.get('wrs-sg:best_effort', 'False'))

                if ('anti-affinity' in request_spec.instance_group['policies']
                        and not is_best_effort):
                    group_members = request_spec.instance_group['members']

                    for member_uuid in group_members:
                        if member_uuid == self.instance.uuid:
                            continue
                        filters = {
                            'migration_type':
                            'live-migration',
                            'instance_uuid':
                            member_uuid,
                            'status': [
                                'queued', 'accepted', 'pre-migrating',
                                'preparing', 'running'
                            ]
                        }
                        migrations = objects.MigrationList. \
                            get_by_filters(self.context, filters)

                        for migration in migrations:
                            if migration['source_compute'] not in \
                                    request_spec.instance_group['hosts']:
                                request_spec.instance_group['hosts'].\
                                    append(migration['source_compute'])
                            if (migration['dest_compute'] and
                                (migration['dest_compute']
                                 not in request_spec.instance_group['hosts'])):
                                request_spec.instance_group['hosts'].\
                                    append(migration['dest_compute'])

        scheduler_utils.setup_instance_group(self.context, request_spec)

        # We currently only support live migrating to hosts in the same
        # cell that the instance lives in, so we need to tell the scheduler
        # to limit the applicable hosts based on cell.
        cell_mapping = self._get_source_cell_mapping()
        LOG.debug('Requesting cell %(cell)s while live migrating',
                  {'cell': cell_mapping.identity},
                  instance=self.instance)
        if ('requested_destination' in request_spec
                and request_spec.requested_destination):
            request_spec.requested_destination.cell = cell_mapping
        else:
            request_spec.requested_destination = objects.Destination(
                cell=cell_mapping)

        request_spec.ensure_project_id(self.instance)

        # WRS: determine offline cpus due to scaling to be used to calculate
        # placement service resource claim in scheduler
        request_spec.offline_cpus = scheduler_utils.determine_offline_cpus(
            self.instance.flavor, self.instance.numa_topology)
        host = limits = None
        migration_error = {}
        while host is None:
            self._check_not_over_max_retries(attempted_hosts)
            request_spec.ignore_hosts = attempted_hosts
            try:
                # WRS: determine if instance is volume backed and update
                # request spec to avoid allocating local disk resources.
                request_spec_copy = request_spec
                if self.instance.is_volume_backed():
                    LOG.debug('Requesting zero root disk for '
                              'boot-from-volume instance')
                    # Clone this so we don't mutate the RequestSpec that was
                    # passed in
                    request_spec_copy = request_spec.obj_clone()
                    request_spec_copy.flavor.root_gb = 0
                hoststate = self.scheduler_client.select_destinations(
                    self.context, request_spec_copy, [self.instance.uuid])[0]
                host = hoststate['host']
                limits = hoststate['limits']
            except messaging.RemoteError as ex:
                # TODO(ShaoHe Feng) There maybe multi-scheduler, and the
                # scheduling algorithm is R-R, we can let other scheduler try.
                # Note(ShaoHe Feng) There are types of RemoteError, such as
                # NoSuchMethod, UnsupportedVersion, we can distinguish it by
                # ex.exc_type.
                raise exception.MigrationSchedulerRPCError(
                    reason=six.text_type(ex))
            except exception.NoValidHost as ex:
                if (migration_error):
                    # remove duplicated and superfluous info from exception
                    msg = "%s" % ex.message
                    msg = msg.replace("No valid host was found.", "")
                    msg = msg.replace("No filter information", "")
                    fp = {'reject_map': migration_error}
                    scheduler_utils.NoValidHost_extend(fp, reason=msg)
                else:
                    raise
            try:
                self._check_compatible_with_source_hypervisor(host)
                # NOTE(ndipanov): We don't need to pass the node as it's not
                # relevant for drivers that support live migration
                self._call_livem_checks_on_host(host, limits=limits)
            except (exception.Invalid, exception.MigrationPreCheckError) as e:
                # WRS: Change this from 'debug' log to 'info', we need this.
                LOG.info("Skipping host: %(host)s because: %(e)s", {
                    "host": host,
                    "e": e
                })
                migration_error[host] = "%s" % e.message
                attempted_hosts.append(host)
                # The scheduler would have created allocations against the
                # selected destination host in Placement, so we need to remove
                # those before moving on.
                self._remove_host_allocations(host, hoststate['nodename'],
                                              request_spec)
                host = limits = None
        return host, limits