def schedule_run_instance(self, context, request_spec,
                              admin_password, injected_files,
                              requested_networks, is_first_time,
                              filter_properties):
        """This method is called from nova.compute.api to provision
        an instance.  We first create a build plan (a list of WeightedHosts)
        and then provision.

        Returns a list of the instances created.
        """
        payload = dict(request_spec=request_spec)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.start', notifier.INFO, payload)

        instance_uuids = request_spec.get('instance_uuids')
        num_instances = len(instance_uuids)
        LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
                locals())

        weighed_hosts = self._schedule(context, request_spec,
                filter_properties, instance_uuids)

        # NOTE: Pop instance_uuids as individual creates do not need the
        # set of uuids. Do not pop before here as the upper exception
        # handler fo NoValidHost needs the uuid to set error state
        instance_uuids = request_spec.pop('instance_uuids')

        # NOTE(comstud): Make sure we do not pass this through.  It
        # contains an instance of RpcContext that cannot be serialized.
        filter_properties.pop('context', None)

        for num, instance_uuid in enumerate(instance_uuids):
            request_spec['instance_properties']['launch_index'] = num

            try:
                try:
                    weighed_host = weighed_hosts.pop(0)
                except IndexError:
                    raise exception.NoValidHost(reason="")

                self._provision_resource(context, weighed_host,
                                         request_spec,
                                         filter_properties,
                                         requested_networks,
                                         injected_files, admin_password,
                                         is_first_time,
                                         instance_uuid=instance_uuid)
            except Exception as ex:
                # NOTE(vish): we don't reraise the exception here to make sure
                #             that all instances in the request get set to
                #             error properly
                driver.handle_schedule_error(context, ex, instance_uuid,
                                             request_spec)
            # scrub retry host list in case we're scheduling multiple
            # instances:
            retry = filter_properties.get('retry', {})
            retry['hosts'] = []

        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.end', notifier.INFO, payload)
Exemple #2
0
 def schedule_run_instance(self, context, request_spec,
                           admin_password, injected_files,
                           requested_networks, is_first_time,
                           filter_properties):
     """Create and run an instance or instances"""
     instance_uuids = request_spec.get('instance_uuids')
     for num, instance_uuid in enumerate(instance_uuids):
         request_spec['instance_properties']['launch_index'] = num
         try:
             host = self._schedule(context, FLAGS.compute_topic,
                                   request_spec, filter_properties)
             updated_instance = driver.instance_update_db(context,
                     instance_uuid)
             self.compute_rpcapi.run_instance(context,
                     instance=updated_instance, host=host,
                     requested_networks=requested_networks,
                     injected_files=injected_files,
                     admin_password=admin_password,
                     is_first_time=is_first_time,
                     request_spec=request_spec,
                     filter_properties=filter_properties)
         except Exception as ex:
             # NOTE(vish): we don't reraise the exception here to make sure
             #             that all instances in the request get set to
             #             error properly
             driver.handle_schedule_error(context, ex, instance_uuid,
                                          request_spec)
Exemple #3
0
    def build_instances(self, context, instances, image, filter_properties,
            admin_password, injected_files, requested_networks,
            security_groups, block_device_mapping=None, legacy_bdm=True):
        # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
        #                 2.0 of the RPC API.
        request_spec = scheduler_utils.build_request_spec(context, image,
                                                          instances)
        scheduler_utils.setup_instance_group(context, request_spec,
                                             filter_properties)
        # TODO(danms): Remove this in version 2.0 of the RPC API
        if (requested_networks and
                not isinstance(requested_networks,
                               objects.NetworkRequestList)):
            requested_networks = objects.NetworkRequestList(
                objects=[objects.NetworkRequest.from_tuple(t)
                         for t in requested_networks])

        try:
            # check retry policy. Rather ugly use of instances[0]...
            # but if we've exceeded max retries... then we really only
            # have a single instance.
            scheduler_utils.populate_retry(filter_properties,
                instances[0].uuid)
            hosts = self.scheduler_client.select_destinations(context,
                    request_spec, filter_properties)
        except Exception as exc:
            for instance in instances:
                scheduler_driver.handle_schedule_error(context, exc,
                        instance.uuid, request_spec)
            return

        for (instance, host) in itertools.izip(instances, hosts):
            try:
                instance.refresh()
            except (exception.InstanceNotFound,
                    exception.InstanceInfoCacheNotFound):
                LOG.debug('Instance deleted during build', instance=instance)
                continue
            LOG.info(_("Choosing host %(host)s "
                       "for instance %(instance_uuid)s"),
                     {'host': host,
                      'instance_uuid': instance['uuid']})
            local_filter_props = copy.deepcopy(filter_properties)
            scheduler_utils.populate_filter_properties(local_filter_props,
                host)
            # The block_device_mapping passed from the api doesn't contain
            # instance specific information
            bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
                    context, instance.uuid)

            self.compute_rpcapi.build_and_run_instance(context,
                    instance=instance, host=host['host'], image=image,
                    request_spec=request_spec,
                    filter_properties=local_filter_props,
                    admin_password=admin_password,
                    injected_files=injected_files,
                    requested_networks=requested_networks,
                    security_groups=security_groups,
                    block_device_mapping=bdms, node=host['nodename'],
                    limits=local_filter_props.get('limits'))
 def schedule_run_instance(self, context, request_spec,
                       admin_password, injected_files,
                       requested_networks, is_first_time,
                       filter_properties, legacy_bdm_in_spec):
     """Create and run an instance or instances."""
     instance_uuids = request_spec.get('instance_uuids')
     for num, instance_uuid in enumerate(instance_uuids):
         request_spec['instance_properties']['launch_index'] = num
         try:
             #LOG.info("jach:context = %(context)s" % {'context': context.__dict__})
             #LOG.info("jach:request_spec = %(request_spec)s" % locals())
             #LOG.info("jach:filter_properties = %(filter_properties)s" % locals())
             
             host = self._schedule(context, CONF.compute_topic,
                 request_spec, filter_properties)
             updated_instance = driver.instance_update_db(context,
                                    instance_uuid)
             self.compute_rpcapi.run_instance(context,
                 instance=updated_instance, host=host,
                 requested_networks=requested_networks,
                 injected_files=injected_files,
                 admin_password=admin_password,
                 is_first_time=is_first_time,
                 request_spec=request_spec,
                 filter_properties=filter_properties,
                 legacy_bdm_in_spec=legacy_bdm_in_spec)
         except Exception as ex:
             # NOTE(vish): we don't reraise the exception here to make sure
             #             that all instances in the request get set to
             #             error properly
             driver.handle_schedule_error(context, ex, instance_uuid,
                                          request_spec)
Exemple #5
0
 def schedule_run_instance(self, context, request_spec, admin_password,
                           injected_files, requested_networks,
                           is_first_time, filter_properties):
     """Create and run an instance or instances."""
     instance_uuids = request_spec.get('instance_uuids')
     for num, instance_uuid in enumerate(instance_uuids):
         request_spec['instance_properties']['launch_index'] = num
         try:
             host = self._schedule(context, CONF.compute_topic,
                                   request_spec, filter_properties)
             updated_instance = driver.instance_update_db(
                 context, instance_uuid)
             self.compute_rpcapi.run_instance(
                 context,
                 instance=updated_instance,
                 host=host,
                 requested_networks=requested_networks,
                 injected_files=injected_files,
                 admin_password=admin_password,
                 is_first_time=is_first_time,
                 request_spec=request_spec,
                 filter_properties=filter_properties)
         except Exception as ex:
             # NOTE(vish): we don't reraise the exception here to make sure
             #             that all instances in the request get set to
             #             error properly
             driver.handle_schedule_error(context, ex, instance_uuid,
                                          request_spec)
Exemple #6
0
    def schedule_run_instance(self, context, request_spec, admin_password,
                              injected_files, requested_networks,
                              is_first_time, filter_properties):
        """This method is called from nova.compute.api to provision
        an instance.  We first create a build plan (a list of WeightedHosts)
        and then provision.

        Returns a list of the instances created.
        """
        elevated = context.elevated()
        instance_uuids = request_spec.get('instance_uuids')
        num_instances = len(instance_uuids)
        LOG.debug(
            _("Attempting to build %(num_instances)d instance(s)") % locals())

        payload = dict(request_spec=request_spec)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.start', notifier.INFO, payload)

        weighted_hosts = self._schedule(context, "compute", request_spec,
                                        filter_properties, instance_uuids)

        # NOTE(comstud): Make sure we do not pass this through.  It
        # contains an instance of RpcContext that cannot be serialized.
        filter_properties.pop('context', None)

        for num, instance_uuid in enumerate(instance_uuids):
            request_spec['instance_properties']['launch_index'] = num

            try:
                try:
                    weighted_host = weighted_hosts.pop(0)
                except IndexError:
                    raise exception.NoValidHost(reason="")

                self._provision_resource(elevated,
                                         weighted_host,
                                         request_spec,
                                         filter_properties,
                                         requested_networks,
                                         injected_files,
                                         admin_password,
                                         is_first_time,
                                         instance_uuid=instance_uuid)
            except Exception as ex:
                # NOTE(vish): we don't reraise the exception here to make sure
                #             that all instances in the request get set to
                #             error properly
                driver.handle_schedule_error(context, ex, instance_uuid,
                                             request_spec)
            # scrub retry host list in case we're scheduling multiple
            # instances:
            retry = filter_properties.get('retry', {})
            retry['hosts'] = []

        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.end', notifier.INFO, payload)
Exemple #7
0
    def build_instances(self, context, instances, image, filter_properties,
            admin_password, injected_files, requested_networks,
            security_groups, block_device_mapping=None, legacy_bdm=True):
        # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
        #                 2.0 of the RPC API.
        request_spec = scheduler_utils.build_request_spec(context, image,
                                                          instances)
        scheduler_utils.setup_instance_group(context, request_spec,
                                             filter_properties)
        # TODO(danms): Remove this in version 2.0 of the RPC API
        if (requested_networks and
                not isinstance(requested_networks,
                               objects.NetworkRequestList)):
            requested_networks = objects.NetworkRequestList(
                objects=[objects.NetworkRequest.from_tuple(t)
                         for t in requested_networks])

        try:
            # check retry policy. Rather ugly use of instances[0]...
            # but if we've exceeded max retries... then we really only
            # have a single instance.
            scheduler_utils.populate_retry(filter_properties,
                instances[0].uuid)
            hosts = self.scheduler_client.select_destinations(context,
                    request_spec, filter_properties)
        except Exception as exc:
            for instance in instances:
                scheduler_driver.handle_schedule_error(context, exc,
                        instance.uuid, request_spec)
            return

        for (instance, host) in itertools.izip(instances, hosts):
            try:
                instance.refresh()
            except (exception.InstanceNotFound,
                    exception.InstanceInfoCacheNotFound):
                LOG.debug('Instance deleted during build', instance=instance)
                continue
            local_filter_props = copy.deepcopy(filter_properties)
            scheduler_utils.populate_filter_properties(local_filter_props,
                host)
            # The block_device_mapping passed from the api doesn't contain
            # instance specific information
            bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
                    context, instance.uuid)

            self.compute_rpcapi.build_and_run_instance(context,
                    instance=instance, host=host['host'], image=image,
                    request_spec=request_spec,
                    filter_properties=local_filter_props,
                    admin_password=admin_password,
                    injected_files=injected_files,
                    requested_networks=requested_networks,
                    security_groups=security_groups,
                    block_device_mapping=bdms, node=host['nodename'],
                    limits=host['limits'])
Exemple #8
0
    def test_handle_schedule_error_adds_instance_fault(self):
        instance = {"uuid": "fake-uuid"}
        self.mox.StubOutWithMock(db, "instance_update_and_get_original")
        self.mox.StubOutWithMock(db, "instance_fault_create")
        self.mox.StubOutWithMock(notifier, "notify")
        db.instance_update_and_get_original(self.context, instance["uuid"], mox.IgnoreArg()).AndReturn((None, instance))
        db.instance_fault_create(self.context, mox.IgnoreArg())
        notifier.notify(self.context, mox.IgnoreArg(), "scheduler.run_instance", notifier.ERROR, mox.IgnoreArg())
        self.mox.ReplayAll()

        driver.handle_schedule_error(self.context, exception.NoValidHost("test"), instance["uuid"], {})
Exemple #9
0
    def test_handle_schedule_error_adds_instance_fault(self):
        instance = {"uuid": "fake-uuid"}
        self.mox.StubOutWithMock(db, "instance_update_and_get_original")
        self.mox.StubOutWithMock(db, "instance_fault_create")
        db.instance_update_and_get_original(self.context, instance["uuid"], mox.IgnoreArg()).AndReturn((None, instance))
        db.instance_fault_create(self.context, mox.IgnoreArg())
        self.mox.StubOutWithMock(rpc, "get_notifier")
        notifier = self.mox.CreateMockAnything()
        rpc.get_notifier("conductor", CONF.host).AndReturn(notifier)
        rpc.get_notifier("scheduler").AndReturn(notifier)
        notifier.error(self.context, "scheduler.run_instance", mox.IgnoreArg())
        self.mox.ReplayAll()

        driver.handle_schedule_error(self.context, exception.NoValidHost("test"), instance["uuid"], {})
Exemple #10
0
    def test_handle_schedule_error_adds_instance_fault(self):
        instance = {'uuid': 'fake-uuid'}
        self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
        self.mox.StubOutWithMock(db, 'instance_fault_create')
        self.mox.StubOutWithMock(notifier, 'notify')
        db.instance_update_and_get_original(self.context, instance['uuid'],
                                            mox.IgnoreArg()).AndReturn(
                                                (None, instance))
        db.instance_fault_create(self.context, mox.IgnoreArg())
        notifier.notify(self.context, mox.IgnoreArg(),
                        'scheduler.run_instance', notifier.ERROR,
                        mox.IgnoreArg())
        self.mox.ReplayAll()

        driver.handle_schedule_error(self.context,
                                     exception.NoValidHost('test'),
                                     instance['uuid'], {})
Exemple #11
0
    def test_handle_schedule_error_adds_instance_fault(self):
        instance = {'uuid': 'fake-uuid'}
        self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
        self.mox.StubOutWithMock(db, 'instance_fault_create')
        self.mox.StubOutWithMock(notifier, 'notify')
        db.instance_update_and_get_original(self.context, instance['uuid'],
                                            mox.IgnoreArg()).AndReturn(
                                                (None, instance))
        db.instance_fault_create(self.context, mox.IgnoreArg())
        notifier.notify(self.context, mox.IgnoreArg(),
                        'scheduler.run_instance',
                        notifier.ERROR, mox.IgnoreArg())
        self.mox.ReplayAll()

        driver.handle_schedule_error(self.context,
                                     exception.NoValidHost('test'),
                                     instance['uuid'], {})
Exemple #12
0
    def test_handle_schedule_error_adds_instance_fault(self):
        instance = {'uuid': 'fake-uuid'}
        self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
        self.mox.StubOutWithMock(db, 'instance_fault_create')
        db.instance_update_and_get_original(self.context, instance['uuid'],
                                            mox.IgnoreArg()).AndReturn(
                                                (None, instance))
        db.instance_fault_create(self.context, mox.IgnoreArg()).AndReturn(
            test_instance_fault.fake_faults['fake-uuid'][0])
        self.mox.StubOutWithMock(rpc, 'get_notifier')
        notifier = self.mox.CreateMockAnything()
        rpc.get_notifier('scheduler').AndReturn(notifier)
        notifier.error(self.context, 'scheduler.run_instance', mox.IgnoreArg())
        self.mox.ReplayAll()

        driver.handle_schedule_error(self.context,
                                     exception.NoValidHost('test'),
                                     instance['uuid'], {})
Exemple #13
0
    def test_handle_schedule_error_adds_instance_fault(self):
        instance = {'uuid': 'fake-uuid'}
        self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
        self.mox.StubOutWithMock(db, 'instance_fault_create')
        db.instance_update_and_get_original(self.context, instance['uuid'],
                                            mox.IgnoreArg()).AndReturn(
                                                (None, instance))
        db.instance_fault_create(self.context, mox.IgnoreArg()).AndReturn(
            test_instance_fault.fake_faults['fake-uuid'][0])
        self.mox.StubOutWithMock(rpc, 'get_notifier')
        notifier = self.mox.CreateMockAnything()
        rpc.get_notifier('scheduler').AndReturn(notifier)
        notifier.error(self.context, 'scheduler.run_instance', mox.IgnoreArg())
        self.mox.ReplayAll()

        driver.handle_schedule_error(self.context,
                                     exception.NoValidHost('test'),
                                     instance['uuid'], {})
    def schedule_run_instance(self, context, request_spec, admin_password,
                              injected_files, requested_networks,
                              is_first_time, filter_properties,
                              legacy_bdm_in_spec):
        """Provisions instances that needs to be scheduled

        Applies filters and weighters on request properties to get a list of
        compute hosts and calls them to spawn instance(s).
        """
        payload = dict(request_spec=request_spec)
        self.notifier.info(context, 'scheduler.run_instance.start', payload)

        instance_uuids = request_spec.get('instance_uuids')
        LOG.info(
            _("Attempting to build %(num_instances)d instance(s) "
              "uuids: %(instance_uuids)s"), {
                  'num_instances': len(instance_uuids),
                  'instance_uuids': instance_uuids
              })
        LOG.debug("Request Spec: %s" % request_spec)

        # check retry policy.  Rather ugly use of instance_uuids[0]...
        # but if we've exceeded max retries... then we really only
        # have a single instance.
        scheduler_utils.populate_retry(filter_properties, instance_uuids[0])
        weighed_hosts = self._schedule(context, request_spec,
                                       filter_properties)

        # NOTE: Pop instance_uuids as individual creates do not need the
        # set of uuids. Do not pop before here as the upper exception
        # handler fo NoValidHost needs the uuid to set error state
        instance_uuids = request_spec.pop('instance_uuids')

        # NOTE(comstud): Make sure we do not pass this through.  It
        # contains an instance of RpcContext that cannot be serialized.
        filter_properties.pop('context', None)

        for num, instance_uuid in enumerate(instance_uuids):
            request_spec['instance_properties']['launch_index'] = num

            try:
                try:
                    weighed_host = weighed_hosts.pop(0)
                    LOG.info(
                        _("Choosing host %(weighed_host)s "
                          "for instance %(instance_uuid)s"), {
                              'weighed_host': weighed_host,
                              'instance_uuid': instance_uuid
                          })
                except IndexError:
                    raise exception.NoValidHost(reason="")

                self._provision_resource(context,
                                         weighed_host,
                                         request_spec,
                                         filter_properties,
                                         requested_networks,
                                         injected_files,
                                         admin_password,
                                         is_first_time,
                                         instance_uuid=instance_uuid,
                                         legacy_bdm_in_spec=legacy_bdm_in_spec)
            except Exception as ex:
                # NOTE(vish): we don't reraise the exception here to make sure
                #             that all instances in the request get set to
                #             error properly
                driver.handle_schedule_error(context, ex, instance_uuid,
                                             request_spec)
            # scrub retry host list in case we're scheduling multiple
            # instances:
            retry = filter_properties.get('retry', {})
            retry['hosts'] = []

        self.notifier.info(context, 'scheduler.run_instance.end', payload)
    def schedule_run_instance(
        self,
        context,
        request_spec,
        admin_password,
        injected_files,
        requested_networks,
        is_first_time,
        filter_properties,
        legacy_bdm_in_spec,
    ):
        """This method is called from nova.compute.api to provision
        an instance.  We first create a build plan (a list of WeightedHosts)
        and then provision.

        Returns a list of the instances created.
        """
        payload = dict(request_spec=request_spec)
        self.notifier.info(context, "scheduler.run_instance.start", payload)

        instance_uuids = request_spec.get("instance_uuids")
        LOG.info(
            _("Attempting to build %(num_instances)d instance(s) " "uuids: %(instance_uuids)s"),
            {"num_instances": len(instance_uuids), "instance_uuids": instance_uuids},
        )
        LOG.debug(_("Request Spec: %s") % request_spec)

        weighed_hosts = self._schedule(context, request_spec, filter_properties, instance_uuids)

        # NOTE: Pop instance_uuids as individual creates do not need the
        # set of uuids. Do not pop before here as the upper exception
        # handler fo NoValidHost needs the uuid to set error state
        instance_uuids = request_spec.pop("instance_uuids")

        # NOTE(comstud): Make sure we do not pass this through.  It
        # contains an instance of RpcContext that cannot be serialized.
        filter_properties.pop("context", None)

        for num, instance_uuid in enumerate(instance_uuids):
            request_spec["instance_properties"]["launch_index"] = num

            try:
                try:
                    weighed_host = weighed_hosts.pop(0)
                    LOG.info(
                        _("Choosing host %(weighed_host)s " "for instance %(instance_uuid)s"),
                        {"weighed_host": weighed_host, "instance_uuid": instance_uuid},
                    )
                except IndexError:
                    raise exception.NoValidHost(reason="")

                self._provision_resource(
                    context,
                    weighed_host,
                    request_spec,
                    filter_properties,
                    requested_networks,
                    injected_files,
                    admin_password,
                    is_first_time,
                    instance_uuid=instance_uuid,
                    legacy_bdm_in_spec=legacy_bdm_in_spec,
                )
            except Exception as ex:
                # NOTE(vish): we don't reraise the exception here to make sure
                #             that all instances in the request get set to
                #             error properly
                driver.handle_schedule_error(context, ex, instance_uuid, request_spec)
            # scrub retry host list in case we're scheduling multiple
            # instances:
            retry = filter_properties.get("retry", {})
            retry["hosts"] = []

        self.notifier.info(context, "scheduler.run_instance.end", payload)
    def schedule_run_instance(self, context, request_spec,
                              admin_password, injected_files,
                              requested_networks, is_first_time,
                              filter_properties, legacy_bdm_in_spec):
        
        payload = dict(request_spec=request_spec)
        self.notifier.info(context, 'scheduler.run_instance.start', payload)

        instance_uuids = request_spec.get('instance_uuids')
        LOG.info(_("Attempting to build %(num_instances)d instance(s) "
                    "uuids: %(instance_uuids)s"),
                  {'num_instances': len(instance_uuids),
                   'instance_uuids': instance_uuids})
        LOG.debug(_("Request Spec: %s") % request_spec)
        weighed_hosts = self._schedule(context, request_spec,
                                       filter_properties, instance_uuids)

        """ EPA_QA part start """
        """  No Multitenancy algorithm  """
        if request_spec['instance_properties']['workload_type'] and \
                        filter_properties.has_key('pci_requests') and  weighed_hosts:
            selected_hosts = [ host.obj.nodename for host in  weighed_hosts]

            tenant_id = filter_properties['project_id']

            pci_requests = filter_properties['pci_requests']
            nmt = NoMultiTenancy() 
            node_cck_dict = nmt.execute_no_multitenancy(selected_hosts, tenant_id)
            node_cck_values = node_cck_dict.values()
            node_cck_list = []

            for i in range(len(node_cck_values)):
                for j in range(len(node_cck_values[i])):
                    node_cck_list.append(node_cck_values[i][j])
            cns = ComputeNodeSelection(weighed_hosts, pci_requests)
            if node_cck_list:
                """ Compute node selection """
                weighed_hosts = cns.execute_compute_node_selection(node_cck_dict)
            else:
                raise exception.NoValidHost(reason="Other tenancy found on all cave creeks")

        """ EPA QA part end """

        # NOTE: Pop instance_uuids as individual creates do not need the
        # set of uuids. Do not pop before here as the upper exception
        # handler fo NoValidHost needs the uuid to set error state
        instance_uuids = request_spec.pop('instance_uuids')

        # NOTE(comstud): Make sure we do not pass this through.  It
        # contains an instance of RpcContext that cannot be serialized.
        filter_properties.pop('context', None)

        for num, instance_uuid in enumerate(instance_uuids):
            request_spec['instance_properties']['launch_index'] = num

            try:
                try:
                    weighed_host = weighed_hosts.pop(0)
		    """ VF allocation algorithm """
                    if request_spec['instance_properties']['policy'] and request_spec['instance_properties']['workload_type'] \
                                               and filter_properties.has_key('pci_requests') and weighed_host:
                        weighed_host.obj.limits['selected_vfs'] = {}
                        req_vf = self.get_requested_number_of_vf_from_pci_requests(filter_properties['pci_requests'])
                        los =  request_spec['instance_properties']['policy']
                        req_work_load_type = request_spec['instance_properties']['workload_type']
                        cck_list = node_cck_dict[weighed_host.obj.nodename]
                        address_list =  vf_allocation.execute_vf_allocation(req_vf, los, req_work_load_type, cck_list)
                        if address_list == []:
                            raise exception.NoValidHost(reason="No suitable vf's found for the instance")
                        else:
                            weighed_host.obj.limits['selected_vfs'][PCI_ALIAS_NAME] = address_list
                    LOG.info(_("Choosing host %(weighed_host)s "
                                "for instance %(instance_uuid)s"),
                              {'weighed_host': weighed_host,
                               'instance_uuid': instance_uuid})
                except IndexError:
                    raise exception.NoValidHost(reason="")
                except Exception as ex:
                    print ex
                    raise ex
                
                self._provision_resource(context, weighed_host,
                                         request_spec,
                                         filter_properties,
                                         requested_networks,
                                         injected_files, admin_password,
                                         is_first_time,
                                         instance_uuid=instance_uuid,
                                         legacy_bdm_in_spec=legacy_bdm_in_spec)
            except Exception as ex:
                # NOTE(vish): we don't reraise the exception here to make sure
                #             that all instances in the request get set to
                #             error properly
                driver.handle_schedule_error(context, ex, instance_uuid,
                                             request_spec)
            # scrub retry host list in case we're scheduling multiple
            # instances:
            retry = filter_properties.get('retry', {})
            retry['hosts'] = []

        self.notifier.info(context, 'scheduler.run_instance.end', payload)
    def schedule_run_instance(self, context, request_spec,
                              admin_password, injected_files,
                              requested_networks, is_first_time,
                              filter_properties, legacy_bdm_in_spec):
        """This method is called from nova.compute.api to provision
        an instance.  We first create a build plan (a list of WeightedHosts)
        and then provision.

        Returns a list of the instances created.
        """
        payload = dict(request_spec=request_spec)
        self.notifier.info(context, 'scheduler.run_instance.start', payload)

        instance_uuids = request_spec.get('instance_uuids')
        LOG.info(_("Attempting to build %(num_instances)d instance(s) "
                    "uuids: %(instance_uuids)s"),
                  {'num_instances': len(instance_uuids),
                   'instance_uuids': instance_uuids})
        LOG.debug(_("Request Spec: %s") % request_spec)

        orig_filter_properties = copy.deepcopy(filter_properties)
        try:
            weighed_hosts = self._schedule(context, request_spec,
                                            filter_properties, instance_uuids)
        except solver_scheduler_exception.SolverFailed:
            if CONF.solver_scheduler.enable_fallback_scheduler:
                LOG.warn(_("Fallback scheduler used."))
                filter_properties = orig_filter_properties
                weighed_hosts = self.fallback_scheduler._schedule(context,
                            request_spec, filter_properties, instance_uuids)
            else:
                weighed_hosts = []

        # NOTE: Pop instance_uuids as individual creates do not need the
        # set of uuids. Do not pop before here as the upper exception
        # handler fo NoValidHost needs the uuid to set error state
        instance_uuids = request_spec.pop('instance_uuids')

        # NOTE(comstud): Make sure we do not pass this through.  It
        # contains an instance of RpcContext that cannot be serialized.
        filter_properties.pop('context', None)

        for num, instance_uuid in enumerate(instance_uuids):
            request_spec['instance_properties']['launch_index'] = num

            try:
                try:
                    weighed_host = weighed_hosts.pop(0)
                    LOG.info(_("Choosing host %(weighed_host)s "
                                "for instance %(instance_uuid)s"),
                              {'weighed_host': weighed_host,
                               'instance_uuid': instance_uuid})
                except IndexError:
                    raise exception.NoValidHost(reason="")

                self._provision_resource(context, weighed_host,
                                         request_spec,
                                         filter_properties,
                                         requested_networks,
                                         injected_files, admin_password,
                                         is_first_time,
                                         instance_uuid=instance_uuid,
                                         legacy_bdm_in_spec=legacy_bdm_in_spec)
            except Exception as ex:
                # NOTE(vish): we don't reraise the exception here to make sure
                #             that all instances in the request get set to
                #             error properly
                driver.handle_schedule_error(context, ex, instance_uuid,
                                             request_spec)
            # scrub retry host list in case we're scheduling multiple
            # instances:
            retry = filter_properties.get('retry', {})
            retry['hosts'] = []

        self.notifier.info(context, 'scheduler.run_instance.end', payload)
    def build_instances(self,
                        context,
                        instances,
                        image,
                        filter_properties,
                        admin_password,
                        injected_files,
                        requested_networks,
                        security_groups,
                        block_device_mapping=None,
                        legacy_bdm=True):
        # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
        #                 2.0 of the RPC API.
        request_spec = scheduler_utils.build_request_spec(
            context, image, instances)
        try:
            # check retry policy. Rather ugly use of instances[0]...
            # but if we've exceeded max retries... then we really only
            # have a single instance.
            scheduler_utils.populate_retry(filter_properties,
                                           instances[0].uuid)
            hosts = self.scheduler_rpcapi.select_destinations(
                context, request_spec, filter_properties)
        except Exception as exc:
            for instance in instances:
                scheduler_driver.handle_schedule_error(context, exc,
                                                       instance.uuid,
                                                       request_spec)
            return

        for (instance, host) in itertools.izip(instances, hosts):
            try:
                instance.refresh()
            except (exception.InstanceNotFound,
                    exception.InstanceInfoCacheNotFound):
                LOG.debug('Instance deleted during build', instance=instance)
                continue
            local_filter_props = copy.deepcopy(filter_properties)
            scheduler_utils.populate_filter_properties(local_filter_props,
                                                       host)
            print "Conductor/manager: local_filter_props: " + str(
                local_filter_props)
            # The block_device_mapping passed from the api doesn't contain
            # instance specific information
            bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
                context, instance.uuid)

            self.compute_rpcapi.build_and_run_instance(
                context,
                instance=instance,
                host=host['host'],
                image=image,
                request_spec=request_spec,
                filter_properties=local_filter_props,
                admin_password=admin_password,
                injected_files=injected_files,
                requested_networks=requested_networks,
                security_groups=security_groups,
                block_device_mapping=bdms,
                node=host['nodename'],
                limits=host['limits'])
Exemple #19
0
    def schedule_run_instance(self, context, request_spec,
                              admin_password, injected_files,
                              requested_networks, is_first_time,
                              filter_properties, legacy_bdm_in_spec):
        """Provisions instances that needs to be scheduled

        Applies filters and weighters on request properties to get a list of
        compute hosts and calls them to spawn instance(s).
        """
        payload = dict(request_spec=request_spec)
        self.notifier.info(context, 'scheduler.run_instance.start', payload)

        instance_uuids = request_spec.get('instance_uuids')
        LOG.info(_("Attempting to build %(num_instances)d instance(s) "
                    "uuids: %(instance_uuids)s"),
                  {'num_instances': len(instance_uuids),
                   'instance_uuids': instance_uuids})
        LOG.debug("Request Spec: %s" % request_spec)

        # check retry policy.  Rather ugly use of instance_uuids[0]...
        # but if we've exceeded max retries... then we really only
        # have a single instance.
        scheduler_utils.populate_retry(filter_properties,
                                       instance_uuids[0])
        weighed_hosts = self._schedule(context, request_spec,
                                       filter_properties)

        # NOTE: Pop instance_uuids as individual creates do not need the
        # set of uuids. Do not pop before here as the upper exception
        # handler fo NoValidHost needs the uuid to set error state
        instance_uuids = request_spec.pop('instance_uuids')

        # NOTE(comstud): Make sure we do not pass this through.  It
        # contains an instance of RpcContext that cannot be serialized.
        filter_properties.pop('context', None)

        for num, instance_uuid in enumerate(instance_uuids):
            request_spec['instance_properties']['launch_index'] = num

            try:
                try:
                    weighed_host = weighed_hosts.pop(0)
                    LOG.info(_("Choosing host %(weighed_host)s "
                                "for instance %(instance_uuid)s"),
                              {'weighed_host': weighed_host,
                               'instance_uuid': instance_uuid})
                except IndexError:
                    raise exception.NoValidHost(reason="")

                self._provision_resource(context, weighed_host,
                                         request_spec,
                                         filter_properties,
                                         requested_networks,
                                         injected_files, admin_password,
                                         is_first_time,
                                         instance_uuid=instance_uuid,
                                         legacy_bdm_in_spec=legacy_bdm_in_spec)
            except Exception as ex:
                # NOTE(vish): we don't reraise the exception here to make sure
                #             that all instances in the request get set to
                #             error properly
                driver.handle_schedule_error(context, ex, instance_uuid,
                                             request_spec)
            # scrub retry host list in case we're scheduling multiple
            # instances:
            retry = filter_properties.get('retry', {})
            retry['hosts'] = []

        self.notifier.info(context, 'scheduler.run_instance.end', payload)
    def build_instances(self,
                        context,
                        instances,
                        image,
                        filter_properties,
                        admin_password,
                        injected_files,
                        requested_networks,
                        security_groups,
                        block_device_mapping=None,
                        legacy_bdm=True):
        # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
        #                 2.0 of the RPC API.
        request_spec = scheduler_utils.build_request_spec(
            context, image, instances)
        # NOTE(sbauza): filter_properties['hints'] can be None
        hints = filter_properties.get('scheduler_hints', {}) or {}
        group_hint = hints.get('group')
        group_hosts = filter_properties.get('group_hosts')
        group_info = scheduler_utils.setup_instance_group(
            context, group_hint, group_hosts)
        if isinstance(group_info, tuple):
            filter_properties['group_updated'] = True
            (filter_properties['group_hosts'],
             filter_properties['group_policies']) = group_info
        # TODO(danms): Remove this in version 2.0 of the RPC API
        if (requested_networks and not isinstance(requested_networks,
                                                  objects.NetworkRequestList)):
            requested_networks = objects.NetworkRequestList(objects=[
                objects.NetworkRequest.from_tuple(t)
                for t in requested_networks
            ])

        try:
            # check retry policy. Rather ugly use of instances[0]...
            # but if we've exceeded max retries... then we really only
            # have a single instance.
            # (luzhq) 验证重试策略
            # 更新filter_properties中的重试属性,若当前为重试部署则同时检测当前
            # 的重试次数是否超过最大重试次数,需要注意的是:这里使用instances[0]
            # 表示如重试的话只会有一个instance重试
            scheduler_utils.populate_retry(filter_properties,
                                           instances[0].uuid)
            hosts = self.scheduler_client.select_destinations(
                context, request_spec, filter_properties)
        except Exception as exc:
            for instance in instances:
                scheduler_driver.handle_schedule_error(context, exc,
                                                       instance.uuid,
                                                       request_spec)
            return

        for (instance, host) in itertools.izip(instances, hosts):
            try:
                instance.refresh()
            except (exception.InstanceNotFound,
                    exception.InstanceInfoCacheNotFound):
                LOG.debug('Instance deleted during build', instance=instance)
                continue
            local_filter_props = copy.deepcopy(filter_properties)
            scheduler_utils.populate_filter_properties(local_filter_props,
                                                       host)
            # The block_device_mapping passed from the api doesn't contain
            # instance specific information
            bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
                context, instance.uuid)

            # (luzhq) self.compute_rpcapi = compute_rpcapi.ComputeAPI()
            self.compute_rpcapi.build_and_run_instance(
                context,
                instance=instance,
                host=host['host'],
                image=image,
                request_spec=request_spec,
                filter_properties=local_filter_props,
                admin_password=admin_password,
                injected_files=injected_files,
                requested_networks=requested_networks,
                security_groups=security_groups,
                block_device_mapping=bdms,
                node=host['nodename'],
                limits=host['limits'])
Exemple #21
0
    def schedule_run_instance(self, context, request_spec, admin_password,
                              injected_files, requested_networks,
                              is_first_time, filter_properties,
                              legacy_bdm_in_spec):
        """This method is called from nova.compute.api to provision
        an instance.  We first create a build plan (a list of WeightedHosts)
        and then provision.

        Returns a list of the instances created.
        """
        payload = dict(request_spec=request_spec)
        self.notifier.info(context, 'scheduler.run_instance.start', payload)

        instance_uuids = request_spec.get('instance_uuids')
        LOG.info(
            _("Attempting to build %(num_instances)d instance(s) "
              "uuids: %(instance_uuids)s"), {
                  'num_instances': len(instance_uuids),
                  'instance_uuids': instance_uuids
              })
        LOG.debug(_("Request Spec: %s") % request_spec)

        weighed_hosts = self._schedule(context, request_spec,
                                       filter_properties, instance_uuids)

        # NOTE: Pop instance_uuids as individual creates do not need the
        # set of uuids. Do not pop before here as the upper exception
        # handler fo NoValidHost needs the uuid to set error state
        instance_uuids = request_spec.pop('instance_uuids')

        # NOTE(comstud): Make sure we do not pass this through.  It
        # contains an instance of RpcContext that cannot be serialized.
        filter_properties.pop('context', None)

        for num, instance_uuid in enumerate(instance_uuids):
            request_spec['instance_properties']['launch_index'] = num

            try:
                try:
                    weighed_host = weighed_hosts.pop(0)
                    LOG.info(
                        _("Choosing host %(weighed_host)s "
                          "for instance %(instance_uuid)s"), {
                              'weighed_host': weighed_host,
                              'instance_uuid': instance_uuid
                          })
                except IndexError:
                    raise exception.NoValidHost(reason="")

                self._provision_resource(context,
                                         weighed_host,
                                         request_spec,
                                         filter_properties,
                                         requested_networks,
                                         injected_files,
                                         admin_password,
                                         is_first_time,
                                         instance_uuid=instance_uuid,
                                         legacy_bdm_in_spec=legacy_bdm_in_spec)
            except Exception as ex:
                # NOTE(vish): we don't reraise the exception here to make sure
                #             that all instances in the request get set to
                #             error properly
                driver.handle_schedule_error(context, ex, instance_uuid,
                                             request_spec)
            # scrub retry host list in case we're scheduling multiple
            # instances:
            retry = filter_properties.get('retry', {})
            retry['hosts'] = []

        self.notifier.info(context, 'scheduler.run_instance.end', payload)
Exemple #22
0
    def schedule_run_instance(
        self,
        context,
        request_spec,
        admin_password,
        injected_files,
        requested_networks,
        is_first_time,
        filter_properties,
    ):
        """This method is called from nova.compute.api to provision
        an instance.  We first create a build plan (a list of WeightedHosts)
        and then provision.

        Returns a list of the instances created.
        """
        elevated = context.elevated()
        instance_uuids = request_spec.get("instance_uuids")
        num_instances = len(instance_uuids)
        LOG.debug(_("Attempting to build %(num_instances)d instance(s)") % locals())

        payload = dict(request_spec=request_spec)
        notifier.notify(
            context, notifier.publisher_id("scheduler"), "scheduler.run_instance.start", notifier.INFO, payload
        )

        weighted_hosts = self._schedule(context, "compute", request_spec, filter_properties, instance_uuids)

        # NOTE(comstud): Make sure we do not pass this through.  It
        # contains an instance of RpcContext that cannot be serialized.
        filter_properties.pop("context", None)

        for num, instance_uuid in enumerate(instance_uuids):
            request_spec["instance_properties"]["launch_index"] = num

            try:
                try:
                    weighted_host = weighted_hosts.pop(0)
                except IndexError:
                    raise exception.NoValidHost(reason="")

                self._provision_resource(
                    elevated,
                    weighted_host,
                    request_spec,
                    filter_properties,
                    requested_networks,
                    injected_files,
                    admin_password,
                    is_first_time,
                    instance_uuid=instance_uuid,
                )
            except Exception as ex:
                # NOTE(vish): we don't reraise the exception here to make sure
                #             that all instances in the request get set to
                #             error properly
                driver.handle_schedule_error(context, ex, instance_uuid, request_spec)
            # scrub retry host list in case we're scheduling multiple
            # instances:
            retry = filter_properties.get("retry", {})
            retry["hosts"] = []

        notifier.notify(
            context, notifier.publisher_id("scheduler"), "scheduler.run_instance.end", notifier.INFO, payload
        )
Exemple #23
0
    def my_schedule_run_instance(self, context, request_spec,
                              admin_password, injected_files,
                              requested_networks, is_first_time,
                              filter_properties):
        ## die if no db connection
        if not self.db_ok:
            return None
        
        ## Same shit as in original method
        payload = dict(request_spec=request_spec)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.start', notifier.INFO, payload)

        instance_uuids = request_spec.pop('instance_uuids')
        num_instances = len(instance_uuids)
        LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
                locals())
#        #query database for hosts 
#        if self.first_time:
#            cur = self.db.cursor()
#            cur.execute("select hypervisor_hostname from compute_nodes")
#            results = cur.fetchall()
#            hostnames = []
#            URIs = []
#            for result in results:
#                hostnames.append(result[0])
#                URIs.append("qemu+ssh://"+result[0]+"/system")
#            
#            self.cloud = hypervisor_info.HypervisorInfo(URIs, hostnames)
        
        ##
        ## Now get hosts
        elevated = context.elevated()
        
        hosts = self.host_manager.get_all_host_states_copy(elevated)
        if (self.first_time):
            # create those physicalHost objects from my model in order to user algorithm in the future
            hostnames = []
            URIs = []
            for host in hosts:
                hostnames.append(host.nodename)
                URIs.append("qemu+ssh://"+host.nodename+"/system")
            self.cloud = hypervisor_info.HypervisorInfo(URIs, hostnames)
        
        self.algorithm.hosts = self.cloud.hosts
        
        ## Now should get the first most loaded one
        ## Now need to determine the sort of vm from metadata
        image = request_spec.pop('image_ref')
        vm_type = self.images_meta[image]
        cur = self.db.cursor()
        cur.execute("select id_hostname from instances where uuid="+request_spec[0])
        results = cur.fetchall()
        id = results[0]
        vm_name = "vm"+id[0]
        vm = vm_instance.VMInstance(vm_type, vm_name)
        
        selected_host = self.algorithm.first_fit(vm)
        success = False
        weighed_host = None
        for host in hosts:
            if host.nodename == selected_host.host_name:
                weighed_host = host
                success = True
        
        if not success:
           raise exception.NoValidHost(reason="")
        
        instance_uuid = instance_uuids[0]
        try:
            self._provision_resource(context, weighed_host,
                                         request_spec,
                                         filter_properties,
                                         requested_networks,
                                         injected_files, admin_password,
                                         is_first_time,
                                         instance_uuid=instance_uuid)
        except Exception as ex:
                # NOTE(vish): we don't reraise the exception here to make sure
                #             that all instances in the request get set to
                #             error properly
            driver.handle_schedule_error(context, ex, instance_uuid,
                                             request_spec)
        
        filter_properties.pop('context', None)

        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.end', notifier.INFO, payload)
    def run_scheduled_instance(self, context,
                                         request_spec,
                                         filter_properties,
                                         requested_networks,
                                         injected_files, admin_password,
                                         is_first_time,
                                         legacy_bdm_in_spec):
        # pdb.set_trace()
        print 'BBBBBBBBBBBBBBBBBBBBBBBBBBBBbb'

        scheduler_hints = filter_properties.get('scheduler_hints')
        # partner = DbAPI.partners_get_by_shortname(context, partner_shortname)
        partners = DbAPI.partners_get_all(context)

        if not partners:
            print "There is no partner!"
            return

        for partner in partners:
            print "Trying to send request to %s" % partner['shortname']
            partner_client = client.Client(partner['username'], partner['password'], 'demo', partner['auth_url'],
                                   service_type='compute', extensions=[
                                        Extension('scheduler_partner', scheduler_partner)
                                    ])

            data = {}
            data['flavor'] = request_spec['instance_type']['flavorid']
            data['num_instances'] = scheduler_hints['num_instances']

            result = partner_client.scheduler_partner.create(data)
            if not result or u'success' not in result:
                print "%s is now offline!" % partner['shortname']
                continue

            if result[u'success'] == 1:
                DbAPI.partners_update(context, partner['shortname'], {
                    'requested': result['points']
                })
                print result
                break

            print result

        return

        payload = dict(request_spec=request_spec)
        self.notifier.info(context, 'scheduler.run_instance.start', payload)

        instance_uuids = request_spec.get('instance_uuids')
        LOG.info(_("Attempting to build %(num_instances)d instance(s) "
                    "uuids: %(instance_uuids)s"),
                  {'num_instances': len(instance_uuids),
                   'instance_uuids': instance_uuids})
        LOG.debug(_("Request Spec: %s") % request_spec)

        weighed_hosts = self._schedule(context, request_spec,
                                       filter_properties, instance_uuids)

        # NOTE: Pop instance_uuids as individual creates do not need the
        # set of uuids. Do not pop before here as the upper exception
        # handler fo NoValidHost needs the uuid to set error state
        instance_uuids = request_spec.pop('instance_uuids')

        # NOTE(comstud): Make sure we do not pass this through.  It
        # contains an instance of RpcContext that cannot be serialized.
        filter_properties.pop('context', None)

        for num, instance_uuid in enumerate(instance_uuids):
            request_spec['instance_properties']['launch_index'] = num

            try:
                try:
                    weighed_host = weighed_hosts.pop(0)
                    LOG.info(_("Choosing host %(weighed_host)s "
                                "for instance %(instance_uuid)s"),
                              {'weighed_host': weighed_host,
                               'instance_uuid': instance_uuid})
                except IndexError:
                    raise exception.NoValidHost(reason="")

                self._provision_resource(context, weighed_host,
                                         request_spec,
                                         filter_properties,
                                         requested_networks,
                                         injected_files, admin_password,
                                         is_first_time,
                                         instance_uuid=instance_uuid,
                                         legacy_bdm_in_spec=legacy_bdm_in_spec)
            except Exception as ex:
                # NOTE(vish): we don't reraise the exception here to make sure
                #             that all instances in the request get set to
                #             error properly
                driver.handle_schedule_error(context, ex, instance_uuid,
                                             request_spec)
            # scrub retry host list in case we're scheduling multiple
            # instances:
            retry = filter_properties.get('retry', {})
            retry['hosts'] = []

        self.notifier.info(context, 'scheduler.run_instance.end', payload)