示例#1
0
 def test_is_single_unspecified(self):
     requests = objects.NetworkRequestList(
         objects=[objects.NetworkRequest(network_id='123')])
     self.assertFalse(requests.is_single_unspecified)
     requests = objects.NetworkRequestList(
         objects=[objects.NetworkRequest(),
                  objects.NetworkRequest()])
     self.assertFalse(requests.is_single_unspecified)
     requests = objects.NetworkRequestList(
         objects=[objects.NetworkRequest()])
     self.assertTrue(requests.is_single_unspecified)
示例#2
0
    def build_instances(self, context, instances, image, filter_properties,
            admin_password, injected_files, requested_networks,
            security_groups, block_device_mapping=None, legacy_bdm=True):
        # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
        #                 2.0 of the RPC API.
        request_spec = scheduler_utils.build_request_spec(context, image,
                                                          instances)
        scheduler_utils.setup_instance_group(context, request_spec,
                                             filter_properties)
        # TODO(danms): Remove this in version 2.0 of the RPC API
        if (requested_networks and
                not isinstance(requested_networks,
                               objects.NetworkRequestList)):
            requested_networks = objects.NetworkRequestList(
                objects=[objects.NetworkRequest.from_tuple(t)
                         for t in requested_networks])

        try:
            # check retry policy. Rather ugly use of instances[0]...
            # but if we've exceeded max retries... then we really only
            # have a single instance.
            scheduler_utils.populate_retry(filter_properties,
                instances[0].uuid)
            hosts = self.scheduler_client.select_destinations(context,
                    request_spec, filter_properties)
        except Exception as exc:
            for instance in instances:
                scheduler_driver.handle_schedule_error(context, exc,
                        instance.uuid, request_spec)
            return

        for (instance, host) in itertools.izip(instances, hosts):
            try:
                instance.refresh()
            except (exception.InstanceNotFound,
                    exception.InstanceInfoCacheNotFound):
                LOG.debug('Instance deleted during build', instance=instance)
                continue
            LOG.info(_("Choosing host %(host)s "
                       "for instance %(instance_uuid)s"),
                     {'host': host,
                      'instance_uuid': instance['uuid']})
            local_filter_props = copy.deepcopy(filter_properties)
            scheduler_utils.populate_filter_properties(local_filter_props,
                host)
            # The block_device_mapping passed from the api doesn't contain
            # instance specific information
            bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
                    context, instance.uuid)

            self.compute_rpcapi.build_and_run_instance(context,
                    instance=instance, host=host['host'], image=image,
                    request_spec=request_spec,
                    filter_properties=local_filter_props,
                    admin_password=admin_password,
                    injected_files=injected_files,
                    requested_networks=requested_networks,
                    security_groups=security_groups,
                    block_device_mapping=bdms, node=host['nodename'],
                    limits=local_filter_props.get('limits'))
示例#3
0
 def test_list_as_tuples(self, is_neutron):
     requests = objects.NetworkRequestList(
         objects=[objects.NetworkRequest(network_id='123'),
                  objects.NetworkRequest(network_id='456')])
     self.assertEqual(
         [('123', None, None, None), ('456', None, None, None)],
          requests.as_tuples())
示例#4
0
 def allocate_port_for_instance(self,
                                context,
                                instance,
                                port_id,
                                network_id=None,
                                requested_ip=None,
                                pci_list=None):
     """Allocate a port for the instance."""
     if not network_id and not port_id:
         nets = self._get_available_networks(context, context.project_id)
         if len(nets) > 1:
             msg = _("Multiple possible networks found, use a Network "
                     "ID to be more specific.")
             raise exception.NetworkAmbiguous(msg)
     requested_networks = objects.NetworkRequestList(objects=[
         objects.NetworkRequest(network_id=network_id,
                                address=requested_ip,
                                port_id=port_id,
                                pci_request_id=None)
     ])
     return self.allocate_for_instance(
         context,
         instance,
         requested_networks=requested_networks,
         pci_list=pci_list)
示例#5
0
    def _get_requested_networks(self, requested_networks):
        """Create a list of requested networks from the networks attribute."""
        networks = []
        network_uuids = []
        for network in requested_networks:
            request = objects.NetworkRequest()
            try:
                try:
                    request.port_id = network.get('port', None)
                except ValueError:
                    msg = _("Bad port format: port uuid is "
                            "not in proper format "
                            "(%s)") % network.get('port')
                    raise exc.HTTPBadRequest(explanation=msg)
                if request.port_id:
                    request.network_id = None
                    if not utils.is_neutron():
                        # port parameter is only for neutron v2.0
                        msg = _("Unknown argument : port")
                        raise exc.HTTPBadRequest(explanation=msg)
                else:
                    request.network_id = network['uuid']

                if (not request.port_id and not
                        uuidutils.is_uuid_like(request.network_id)):
                    br_uuid = request.network_id.split('-', 1)[-1]
                    if not uuidutils.is_uuid_like(br_uuid):
                        msg = _("Bad networks format: network uuid is "
                                "not in proper format "
                                "(%s)") % request.network_id
                        raise exc.HTTPBadRequest(explanation=msg)

                # fixed IP address is optional
                # if the fixed IP address is not provided then
                # it will use one of the available IP address from the network
                try:
                    request.address = network.get('fixed_ip', None)
                except ValueError:
                    msg = (_("Invalid fixed IP address (%s)") %
                           network.get('fixed_ip'))
                    raise exc.HTTPBadRequest(explanation=msg)

                # duplicate networks are allowed only for neutron v2.0
                if (not utils.is_neutron() and request.network_id and
                        request.network_id in network_uuids):
                    expl = (_("Duplicate networks"
                              " (%s) are not allowed") %
                            request.network_id)
                    raise exc.HTTPBadRequest(explanation=expl)
                network_uuids.append(request.network_id)
                networks.append(request)
            except KeyError as key:
                expl = _('Bad network format: missing %s') % key
                raise exc.HTTPBadRequest(explanation=expl)
            except TypeError:
                expl = _('Bad networks format')
                raise exc.HTTPBadRequest(explanation=expl)

        return objects.NetworkRequestList(objects=networks)
示例#6
0
 def test_auto_allocate(self):
     # no objects
     requests = objects.NetworkRequestList()
     self.assertFalse(requests.auto_allocate)
     # single object with network uuid
     requests = objects.NetworkRequestList(
         objects=[objects.NetworkRequest(network_id=FAKE_UUID)])
     self.assertFalse(requests.auto_allocate)
     # multiple objects
     requests = objects.NetworkRequestList(
         objects=[objects.NetworkRequest(),
                  objects.NetworkRequest()])
     self.assertFalse(requests.auto_allocate)
     # single object, 'auto' case
     requests = objects.NetworkRequestList(
         objects=[objects.NetworkRequest(
             network_id=network_request.NETWORK_ID_AUTO)])
     self.assertTrue(requests.auto_allocate)
示例#7
0
 def test_deallocate_for_instance_auto_allocate(self, mock_rpc_dealloc):
     # Tests that we pass requested_networks=None to the RPC API when
     # we're auto-allocating.
     instance = fake_instance.fake_instance_obj(self.context)
     req_net = objects.NetworkRequest(
         network_id=net_req_obj.NETWORK_ID_AUTO)
     requested_networks = objects.NetworkRequestList(objects=[req_net])
     self.network_api.deallocate_for_instance(
         self.context, instance, requested_networks)
     mock_rpc_dealloc.assert_called_once_with(self.context,
                                              instance=instance,
                                              requested_networks=None)
示例#8
0
    def test_routed_networks_filter_with_requested_net(self,
                                                       mock_get_aggs_network):
        req_net = objects.NetworkRequest(network_id=uuids.net1)
        reqspec = objects.RequestSpec(
            requested_networks=objects.NetworkRequestList(objects=[req_net]))
        mock_get_aggs_network.return_value = [uuids.agg1]

        self.assertTrue(
            request_filter.routed_networks_filter(self.context, reqspec))
        self.assertEqual([uuids.agg1],
                         reqspec.requested_destination.aggregates)
        mock_get_aggs_network.assert_called_once_with(self.context, mock.ANY,
                                                      mock.ANY, uuids.net1)
示例#9
0
 def allocate_port_for_instance(self,
                                context,
                                instance,
                                port_id,
                                network_id=None,
                                requested_ip=None):
     """Allocate a port for the instance."""
     requested_networks = objects.NetworkRequestList(objects=[
         objects.NetworkRequest(network_id=network_id,
                                address=requested_ip,
                                port_id=port_id,
                                pci_request_id=None)
     ])
     return self.allocate_for_instance(
         context, instance, requested_networks=requested_networks)
示例#10
0
    def test_routed_networks_filter_with_two_requested_nets(
        self, mock_get_aggs_network
    ):
        req_net1 = objects.NetworkRequest(network_id=uuids.net1)
        req_net2 = objects.NetworkRequest(network_id=uuids.net2)
        reqspec = objects.RequestSpec(
            requested_networks=objects.NetworkRequestList(
                objects=[req_net1, req_net2]))
        mock_get_aggs_network.side_effect = ([uuids.agg1, uuids.agg2],
                                             [uuids.agg3])

        self.assertTrue(request_filter.routed_networks_filter(
                        self.context, reqspec))
        # require_aggregates() has a specific semantics here where multiple
        # aggregates provided in the same call have their UUIDs being joined.
        self.assertEqual([','.join([uuids.agg1, uuids.agg2]), uuids.agg3],
                         reqspec.requested_destination.aggregates)
        mock_get_aggs_network.assert_has_calls([
            mock.call(self.context, mock.ANY, mock.ANY, uuids.net1),
            mock.call(self.context, mock.ANY, mock.ANY, uuids.net2)])
示例#11
0
文件: servers.py 项目: xagent003/nova
    def _get_requested_networks(self, requested_networks):
        """Create a list of requested networks from the networks attribute."""
        networks = []
        network_uuids = []
        for network in requested_networks:
            request = objects.NetworkRequest()
            try:
                # fixed IP address is optional
                # if the fixed IP address is not provided then
                # it will use one of the available IP address from the network
                request.address = network.get('fixed_ip', None)
                request.port_id = network.get('port', None)

                if request.port_id:
                    request.network_id = None
                    if not utils.is_neutron():
                        # port parameter is only for neutron v2.0
                        msg = _("Unknown argument: port")
                        raise exc.HTTPBadRequest(explanation=msg)
                    if request.address is not None:
                        msg = _("Specified Fixed IP '%(addr)s' cannot be used "
                                "with port '%(port)s': port already has "
                                "a Fixed IP allocated.") % {
                                    "addr": request.address,
                                    "port": request.port_id}
                        raise exc.HTTPBadRequest(explanation=msg)
                else:
                    request.network_id = network['uuid']
                    self._validate_network_id(
                        request.network_id, network_uuids)
                    network_uuids.append(request.network_id)

                networks.append(request)
            except KeyError as key:
                expl = _('Bad network format: missing %s') % key
                raise exc.HTTPBadRequest(explanation=expl)
            except TypeError:
                expl = _('Bad networks format')
                raise exc.HTTPBadRequest(explanation=expl)

        return objects.NetworkRequestList(objects=networks)
示例#12
0
    def test_routed_networks_filter_with_requested_port_deferred(
        self, mock_show_port, mock_get_aggs_network
    ):
        req_net = objects.NetworkRequest(port_id=uuids.port1)
        reqspec = objects.RequestSpec(
            requested_networks=objects.NetworkRequestList(objects=[req_net]))
        # The port was created with a deferred allocation so for the moment,
        # it's not bound to a specific segment.
        mock_show_port.return_value = {
            'port': {
                'fixed_ips': [],
                'network_id': uuids.net1}}
        mock_get_aggs_network.return_value = [uuids.agg1]

        self.assertTrue(request_filter.routed_networks_filter(
                        self.context, reqspec))
        self.assertEqual([uuids.agg1],
                         reqspec.requested_destination.aggregates)
        mock_show_port.assert_called_once_with(self.context, uuids.port1)
        mock_get_aggs_network.assert_called_once_with(
            self.context, mock.ANY, mock.ANY, uuids.net1)
示例#13
0
    def test_routed_networks_filter_with_requested_port_immediate(
        self, mock_show_port, mock_get_aggs_subnet
    ):
        req_net = objects.NetworkRequest(port_id=uuids.port1)
        reqspec = objects.RequestSpec(
            requested_networks=objects.NetworkRequestList(objects=[req_net]))
        # Check whether the port was already bound to a segment
        mock_show_port.return_value = {
            'port': {
                'fixed_ips': [
                    {
                        'subnet_id': uuids.subnet1
            }]}}
        mock_get_aggs_subnet.return_value = [uuids.agg1]

        self.assertTrue(request_filter.routed_networks_filter(
                        self.context, reqspec))
        self.assertEqual([uuids.agg1],
                         reqspec.requested_destination.aggregates)
        mock_show_port.assert_called_once_with(self.context, uuids.port1)
        mock_get_aggs_subnet.assert_called_once_with(
            self.context, mock.ANY, mock.ANY, uuids.subnet1)
示例#14
0
    def build_instances(self,
                        context,
                        instances,
                        image,
                        filter_properties,
                        admin_password,
                        injected_files,
                        requested_networks,
                        security_groups,
                        block_device_mapping=None,
                        legacy_bdm=True):
        # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
        #                 2.0 of the RPC API.
        request_spec = scheduler_utils.build_request_spec(
            context, image, instances)
        # TODO(danms): Remove this in version 2.0 of the RPC API
        if (requested_networks and not isinstance(requested_networks,
                                                  objects.NetworkRequestList)):
            requested_networks = objects.NetworkRequestList(objects=[
                objects.NetworkRequest.from_tuple(t)
                for t in requested_networks
            ])
        # TODO(melwitt): Remove this in version 2.0 of the RPC API
        flavor = filter_properties.get('instance_type')
        if flavor and not isinstance(flavor, objects.Flavor):
            # Code downstream may expect extra_specs to be populated since it
            # is receiving an object, so lookup the flavor to ensure this.
            flavor = objects.Flavor.get_by_id(context, flavor['id'])
            filter_properties = dict(filter_properties, instance_type=flavor)

        try:
            scheduler_utils.setup_instance_group(context, request_spec,
                                                 filter_properties)
            # check retry policy. Rather ugly use of instances[0]...
            # but if we've exceeded max retries... then we really only
            # have a single instance.
            scheduler_utils.populate_retry(filter_properties,
                                           instances[0].uuid)
            hosts = self.scheduler_client.select_destinations(
                context, request_spec, filter_properties)
        except Exception as exc:
            updates = {'vm_state': vm_states.ERROR, 'task_state': None}
            for instance in instances:
                self._set_vm_state_and_notify(context, instance.uuid,
                                              'build_instances', updates, exc,
                                              request_spec)
            return

        for (instance, host) in itertools.izip(instances, hosts):
            try:
                instance.refresh()
            except (exception.InstanceNotFound,
                    exception.InstanceInfoCacheNotFound):
                LOG.debug('Instance deleted during build', instance=instance)
                continue
            local_filter_props = copy.deepcopy(filter_properties)
            scheduler_utils.populate_filter_properties(local_filter_props,
                                                       host)
            # The block_device_mapping passed from the api doesn't contain
            # instance specific information
            bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
                context, instance.uuid)

            self.compute_rpcapi.build_and_run_instance(
                context,
                instance=instance,
                host=host['host'],
                image=image,
                request_spec=request_spec,
                filter_properties=local_filter_props,
                admin_password=admin_password,
                injected_files=injected_files,
                requested_networks=requested_networks,
                security_groups=security_groups,
                block_device_mapping=bdms,
                node=host['nodename'],
                limits=host['limits'])
示例#15
0
    def reschedule(self, context, instance):
        """Rescheduler the given instance."""

        # if reschedule process rebuild, instance is possibly in REBUILD- task
        # state for a long time, or just stuck in the task state, while a
        # second reschedule request triggered if vm status is ERROR or the host
        # is fault, we should ignore this kind of duplicated request
        if instance['task_state'] in (task_states.REBUILDING,
                                      task_states.REBUILD_BLOCK_DEVICE_MAPPING,
                                      task_states.REBUILD_SPAWNING,
                                      task_states.DELETING):
            LOG.warning(_('instance task_state is %s, ignore this request'),
                        instance['task_state'],
                        instance=instance)
            return

        LOG.info('reschedule instance', instance=instance)

        orig_image_ref = instance['image_ref'] or ''

        current_instance_type = flavors.extract_flavor(instance)

        # Ignore current host
        filter_properties = {'ignore_hosts': []}
        if not CONF.allow_reschedule_to_same_host:
            filter_properties['ignore_hosts'].append(instance['host'])

        image_ref = instance.image_ref
        image = compute_utils.get_image_metadata(context, self.image_api,
                                                 image_ref, instance)

        request_spec = scheduler_utils.build_request_spec(
            context, image, [instance], instance_type=current_instance_type)

        # Get scheduler_hint info
        inst_extra = objects.HuaweiInstanceExtra.get_by_instance_uuid(
            context, instance.uuid)
        injected_files = self.db.injected_files_get_by_instance_uuid(
            context, instance.uuid)
        request_networks = []
        if inst_extra:
            scheduler_hints = jsonutils.loads(inst_extra.scheduler_hints
                                              or '{}')
            request_networks = jsonutils.loads(inst_extra.request_network
                                               or '[]')
        else:
            scheduler_hints = {}
        pci_requests = objects.InstancePCIRequests.\
            get_by_instance_uuid_and_newness(
                context, instance['uuid'], False)
        if pci_requests:
            filter_properties['pci_requests'] = pci_requests
        filter_properties['scheduler_hints'] = scheduler_hints

        LOG.info("reschedule filter_properties %s",
                 filter_properties,
                 instance=instance)

        self._record_action_start(context, instance, hw_actions.RESCHEDULE)
        try:
            hosts = self._select_destinations(context, instance, request_spec,
                                              filter_properties)
            host_state = hosts[0]['host']
            LOG.info("HA selected host %s", host_state, instance=instance)
        except exception.NoValidHost as ex:
            LOG.warning(_("No valid host found"), instance=instance)

            if instance['host']:
                self._try_local_reboot(context, instance, 'HARD')

            return

        bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
            context, instance.uuid)
        LOG.info("instance bdms %s",
                 jsonutils.to_primitive(bdms),
                 instance=instance)

        scheduler_utils.populate_filter_properties(filter_properties, hosts[0])

        def _get_network_info(nw_info):
            pci_req_id = None
            if len(nw_info) > 3:
                pci_req_id = nw_info[3]
            return (nw_info[0], nw_info[1], nw_info[2], pci_req_id)

        request_networks = [_get_network_info(n) for n in request_networks]

        alive_dict = {'alive': False, 'fault': False, 'count': 0}

        def async_check_live():
            try:
                self.compute_rpcapi.check_alive(context, host_state,
                                                'nova-api')
                alive_dict['alive'] = True
            except Exception as e:
                LOG.error(_LE('check alive fault, host %s, %s'), host_state, e)
                alive_dict['fault'] = True

        def _loop_check():
            if alive_dict['fault']:
                raise loopingcall.LoopingCallDone()

            if alive_dict['alive']:
                LOG.debug('compute service alive, host %s',
                          host_state,
                          instance=instance)
                raise loopingcall.LoopingCallDone()

            if alive_dict['count'] == 120:
                LOG.debug('check alive timeout, host %s',
                          host_state,
                          instance=instance)
                raise loopingcall.LoopingCallDone()

            alive_dict['count'] += 1

        # Clear the resource of instance on the source host
        if instance['host'] and self.judge_branch(context, instance,
                                                  request_networks):
            instance.task_state = task_states.REBUILDING
            instance.save()

            # in some extreme case rpc message will stack on HA dest host, to
            # avoid that we change rpc api build_and_run_instance to sync
            # 'call' instead of async 'cast', but we can't block outside
            # request, so use async way to call rpc method
            def _async_reschedule():
                # check dest compute service is alive
                utils.spawn_n(async_check_live)
                timer = loopingcall.FixedIntervalLoopingCall(_loop_check)
                timer.start(interval=1).wait()

                if not alive_dict['alive']:
                    LOG.warn(
                        '%s compute service seems down, revert instance '
                        'task state',
                        host_state,
                        instance=instance)
                    instance.task_state = None
                    instance.save()
                    return

                LOG.info('reschedule instance to host %s',
                         host_state,
                         instance=instance)
                try:
                    self.compute_rpcapi.sync_reschedule_instance(
                        context,
                        instance=instance,
                        new_pass=None,
                        injected_files=jsonutils.loads(injected_files),
                        image_ref=image_ref,
                        orig_image_ref=orig_image_ref,
                        orig_sys_metadata=None,
                        bdms=bdms,
                        host=host_state,
                        filter_properties=filter_properties)
                except Exception as e:
                    LOG.error(_LE('reschedule call failed: %s'), e)
                    self.db.instance_update(context,
                                            instance.uuid,
                                            task_state=None)

            utils.spawn_n(_async_reschedule)
        else:
            security_groups = self.db.security_group_get_by_instance(
                context, instance.uuid)
            block_device_mapping = \
                self.db.block_device_mapping_get_all_by_instance(
                    context, instance.uuid)
            request_spec.update({
                'block_device_mapping': block_device_mapping,
                'security_group': security_groups
            })

            # TODO(): Remove this in version 2.0 of the RPC API
            if (request_networks and not isinstance(
                    request_networks, objects.NetworkRequestList)):
                request_networks = objects.NetworkRequestList(objects=[
                    objects.NetworkRequest.from_tuple(t)
                    for t in request_networks
                ])

            # in some extreme case rpc message will stack on HA dest host, to
            # avoid that we change rpc api build_and_run_instance to sync
            # 'call' instead of async 'cast', but we can't block outside
            # request, so use async way to call rpc method
            def _async_build_and_run_instance():
                # check dest compute service is alive
                utils.spawn_n(async_check_live)
                timer = loopingcall.FixedIntervalLoopingCall(_loop_check)
                timer.start(interval=1).wait()

                if not alive_dict['alive']:
                    LOG.warn(
                        '%s compute service seems down, revert instance '
                        'task state',
                        host_state,
                        instance=instance)
                    instance.task_state = None
                    instance.save()
                    return

                LOG.info('build instance on host %s',
                         host_state,
                         instance=instance)
                self.compute_rpcapi.sync_build_and_run_instance(
                    context,
                    instance=instance,
                    host=host_state,
                    image=image,
                    request_spec=request_spec,
                    filter_properties=filter_properties,
                    admin_password=None,
                    injected_files=jsonutils.loads(injected_files),
                    requested_networks=request_networks,
                    security_groups=security_groups,
                    block_device_mapping=bdms,
                    node=host_state,
                    limits=hosts[0]['limits'])

            utils.spawn_n(_async_build_and_run_instance)
示例#16
0
    def build_instances(self,
                        context,
                        instances,
                        image,
                        filter_properties,
                        admin_password,
                        injected_files,
                        requested_networks,
                        security_groups,
                        block_device_mapping=None,
                        legacy_bdm=True):
        # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
        #                 2.0 of the RPC API.
        request_spec = scheduler_utils.build_request_spec(
            context, image, instances)
        # NOTE(sbauza): filter_properties['hints'] can be None
        hints = filter_properties.get('scheduler_hints', {}) or {}
        group_hint = hints.get('group')
        group_hosts = filter_properties.get('group_hosts')
        group_info = scheduler_utils.setup_instance_group(
            context, group_hint, group_hosts)
        if isinstance(group_info, tuple):
            filter_properties['group_updated'] = True
            (filter_properties['group_hosts'],
             filter_properties['group_policies']) = group_info
        # TODO(danms): Remove this in version 2.0 of the RPC API
        if (requested_networks and not isinstance(requested_networks,
                                                  objects.NetworkRequestList)):
            requested_networks = objects.NetworkRequestList(objects=[
                objects.NetworkRequest.from_tuple(t)
                for t in requested_networks
            ])

        try:
            # check retry policy. Rather ugly use of instances[0]...
            # but if we've exceeded max retries... then we really only
            # have a single instance.
            # (luzhq) 验证重试策略
            # 更新filter_properties中的重试属性,若当前为重试部署则同时检测当前
            # 的重试次数是否超过最大重试次数,需要注意的是:这里使用instances[0]
            # 表示如重试的话只会有一个instance重试
            scheduler_utils.populate_retry(filter_properties,
                                           instances[0].uuid)
            hosts = self.scheduler_client.select_destinations(
                context, request_spec, filter_properties)
        except Exception as exc:
            for instance in instances:
                scheduler_driver.handle_schedule_error(context, exc,
                                                       instance.uuid,
                                                       request_spec)
            return

        for (instance, host) in itertools.izip(instances, hosts):
            try:
                instance.refresh()
            except (exception.InstanceNotFound,
                    exception.InstanceInfoCacheNotFound):
                LOG.debug('Instance deleted during build', instance=instance)
                continue
            local_filter_props = copy.deepcopy(filter_properties)
            scheduler_utils.populate_filter_properties(local_filter_props,
                                                       host)
            # The block_device_mapping passed from the api doesn't contain
            # instance specific information
            bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
                context, instance.uuid)

            # (luzhq) self.compute_rpcapi = compute_rpcapi.ComputeAPI()
            self.compute_rpcapi.build_and_run_instance(
                context,
                instance=instance,
                host=host['host'],
                image=image,
                request_spec=request_spec,
                filter_properties=local_filter_props,
                admin_password=admin_password,
                injected_files=injected_files,
                requested_networks=requested_networks,
                security_groups=security_groups,
                block_device_mapping=bdms,
                node=host['nodename'],
                limits=host['limits'])
示例#17
0
    def build_instances(self,
                        context,
                        instances,
                        image,
                        filter_properties,
                        admin_password,
                        injected_files,
                        requested_networks,
                        security_groups,
                        block_device_mapping=None,
                        legacy_bdm=True):
        # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
        #                 2.0 of the RPC API.
        # TODO(danms): Remove this in version 2.0 of the RPC API
        if (requested_networks and not isinstance(requested_networks,
                                                  objects.NetworkRequestList)):
            requested_networks = objects.NetworkRequestList(objects=[
                objects.NetworkRequest.from_tuple(t)
                for t in requested_networks
            ])
        # TODO(melwitt): Remove this in version 2.0 of the RPC API
        flavor = filter_properties.get('instance_type')
        if flavor and not isinstance(flavor, objects.Flavor):
            # Code downstream may expect extra_specs to be populated since it
            # is receiving an object, so lookup the flavor to ensure this.
            flavor = objects.Flavor.get_by_id(context, flavor['id'])
            filter_properties = dict(filter_properties, instance_type=flavor)

        request_spec = {}
        try:
            # check retry policy. Rather ugly use of instances[0]...
            # but if we've exceeded max retries... then we really only
            # have a single instance.
            scheduler_utils.populate_retry(filter_properties,
                                           instances[0].uuid)
            request_spec = scheduler_utils.build_request_spec(
                context, image, instances)
            hosts = self._schedule_instances(context, request_spec,
                                             filter_properties)
        except Exception as exc:
            updates = {'vm_state': vm_states.ERROR, 'task_state': None}
            for instance in instances:
                self._set_vm_state_and_notify(context, instance.uuid,
                                              'build_instances', updates, exc,
                                              request_spec)
                self._cleanup_allocated_networks(context, instance,
                                                 requested_networks)
            return

        host_hypervisor = ''
        hosts_info = []
        reselect_flag = self.need_select_image(request_spec)
        if reselect_flag:
            # Normal user need promoted privilege to search db
            elevated = context.elevated()
            hosts_info = db.compute_node_get_all(elevated)
            LOG.debug("hosts_info: {0}".format(hosts_info))

        for (instance, host) in six.moves.zip(instances, hosts):
            if reselect_flag:
                for hi in hosts_info:
                    if hi.get('service') and hi['service'].get(
                            'host') == host['host']:
                        host_hypervisor = hi.get('hypervisor_type')
                        LOG.debug(
                            'host_hypervisor: {0}'.format(host_hypervisor))
                        break

                image, instance, request_spec = self.select_image(
                    context, image, host_hypervisor, instance, request_spec,
                    filter_properties)
                LOG.debug("Final image: {0}".format(image.get('id')))

            try:
                instance.save()
                instance.refresh()
            except (exception.InstanceNotFound,
                    exception.InstanceInfoCacheNotFound):
                LOG.debug('Instance deleted during build', instance=instance)
                continue
            local_filter_props = copy.deepcopy(filter_properties)
            scheduler_utils.populate_filter_properties(local_filter_props,
                                                       host)
            # The block_device_mapping passed from the api doesn't contain
            # instance specific information
            bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
                context, instance.uuid)

            self.compute_rpcapi.build_and_run_instance(
                context,
                instance=instance,
                host=host['host'],
                image=image,
                request_spec=request_spec,
                filter_properties=local_filter_props,
                admin_password=admin_password,
                injected_files=injected_files,
                requested_networks=requested_networks,
                security_groups=security_groups,
                block_device_mapping=bdms,
                node=host['nodename'],
                limits=host['limits'])