def test_basic_schedule_run_instance(self): ctxt = context.RequestContext('fake', 'fake', False) ctxt_elevated = 'fake-context-elevated' instance_opts = {'fake_opt1': 'meow', 'launch_index': -1} instance1 = {'uuid': 'fake-uuid1'} instance2 = {'uuid': 'fake-uuid2'} request_spec = { 'instance_uuids': ['fake-uuid1', 'fake-uuid2'], 'instance_properties': instance_opts } def inc_launch_index(*args): request_spec['instance_properties']['launch_index'] = ( request_spec['instance_properties']['launch_index'] + 1) self.mox.StubOutWithMock(ctxt, 'elevated') self.mox.StubOutWithMock(self.driver, 'hosts_up') self.mox.StubOutWithMock(random, 'choice') self.mox.StubOutWithMock(driver, 'instance_update_db') self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance') ctxt.elevated().AndReturn(ctxt_elevated) # instance 1 hosts_full = ['host1', 'host2', 'host3', 'host4'] self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full) random.choice(hosts_full).AndReturn('host3') driver.instance_update_db(ctxt, instance1['uuid']).WithSideEffects( inc_launch_index).AndReturn(instance1) compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host3', instance=instance1, requested_networks=None, injected_files=None, admin_password=None, is_first_time=None, request_spec=request_spec, filter_properties={}, legacy_bdm_in_spec=False) # instance 2 ctxt.elevated().AndReturn(ctxt_elevated) self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full) random.choice(hosts_full).AndReturn('host1') driver.instance_update_db(ctxt, instance2['uuid']).WithSideEffects( inc_launch_index).AndReturn(instance2) compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host1', instance=instance2, requested_networks=None, injected_files=None, admin_password=None, is_first_time=None, request_spec=request_spec, filter_properties={}, legacy_bdm_in_spec=False) self.mox.ReplayAll() self.driver.schedule_run_instance(ctxt, request_spec, None, None, None, None, {}, False)
def test_basic_schedule_run_instance(self): ctxt = context.RequestContext("fake", "fake", False) ctxt_elevated = "fake-context-elevated" instance_opts = {"fake_opt1": "meow", "launch_index": -1} instance1 = {"uuid": "fake-uuid1"} instance2 = {"uuid": "fake-uuid2"} request_spec = {"instance_uuids": ["fake-uuid1", "fake-uuid2"], "instance_properties": instance_opts} def inc_launch_index(*args): request_spec["instance_properties"]["launch_index"] = ( request_spec["instance_properties"]["launch_index"] + 1 ) self.mox.StubOutWithMock(ctxt, "elevated") self.mox.StubOutWithMock(self.driver, "hosts_up") self.mox.StubOutWithMock(random, "choice") self.mox.StubOutWithMock(driver, "instance_update_db") self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, "run_instance") ctxt.elevated().AndReturn(ctxt_elevated) # instance 1 hosts_full = ["host1", "host2", "host3", "host4"] self.driver.hosts_up(ctxt_elevated, "compute").AndReturn(hosts_full) random.choice(hosts_full).AndReturn("host3") driver.instance_update_db(ctxt, instance1["uuid"]).WithSideEffects(inc_launch_index).AndReturn(instance1) compute_rpcapi.ComputeAPI.run_instance( ctxt, host="host3", instance=instance1, requested_networks=None, injected_files=None, admin_password=None, is_first_time=None, request_spec=request_spec, filter_properties={}, legacy_bdm_in_spec=False, ) # instance 2 ctxt.elevated().AndReturn(ctxt_elevated) self.driver.hosts_up(ctxt_elevated, "compute").AndReturn(hosts_full) random.choice(hosts_full).AndReturn("host1") driver.instance_update_db(ctxt, instance2["uuid"]).WithSideEffects(inc_launch_index).AndReturn(instance2) compute_rpcapi.ComputeAPI.run_instance( ctxt, host="host1", instance=instance2, requested_networks=None, injected_files=None, admin_password=None, is_first_time=None, request_spec=request_spec, filter_properties={}, legacy_bdm_in_spec=False, ) self.mox.ReplayAll() self.driver.schedule_run_instance(ctxt, request_spec, None, None, None, None, {}, False)
def test_basic_schedule_run_instance(self): ctxt = context.RequestContext('fake', 'fake', False) ctxt_elevated = 'fake-context-elevated' fake_args = (1, 2, 3) instance_opts = {'fake_opt1': 'meow', 'launch_index': -1} instance1 = {'uuid': 'fake-uuid1'} instance2 = {'uuid': 'fake-uuid2'} request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'], 'instance_properties': instance_opts} instance1_encoded = {'uuid': 'fake-uuid1', '_is_precooked': False} instance2_encoded = {'uuid': 'fake-uuid2', '_is_precooked': False} reservations = ['resv1', 'resv2'] def inc_launch_index(*args): request_spec['instance_properties']['launch_index'] = ( request_spec['instance_properties']['launch_index'] + 1) self.mox.StubOutWithMock(ctxt, 'elevated') self.mox.StubOutWithMock(self.driver, 'hosts_up') self.mox.StubOutWithMock(random, 'random') self.mox.StubOutWithMock(driver, 'encode_instance') self.mox.StubOutWithMock(driver, 'instance_update_db') self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance') ctxt.elevated().AndReturn(ctxt_elevated) # instance 1 self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn( ['host1', 'host2', 'host3', 'host4']) random.random().AndReturn(.5) driver.instance_update_db(ctxt, instance1['uuid'], 'host3').WithSideEffects(inc_launch_index).AndReturn(instance1) compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host3', instance=instance1, requested_networks=None, injected_files=None, admin_password=None, is_first_time=None, request_spec=request_spec, filter_properties={}) driver.encode_instance(instance1).AndReturn(instance1_encoded) # instance 2 ctxt.elevated().AndReturn(ctxt_elevated) self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn( ['host1', 'host2', 'host3', 'host4']) random.random().AndReturn(.2) driver.instance_update_db(ctxt, instance2['uuid'], 'host1').WithSideEffects(inc_launch_index).AndReturn(instance2) compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host1', instance=instance2, requested_networks=None, injected_files=None, admin_password=None, is_first_time=None, request_spec=request_spec, filter_properties={}) driver.encode_instance(instance2).AndReturn(instance2_encoded) self.mox.ReplayAll() result = self.driver.schedule_run_instance(ctxt, request_spec, None, None, None, None, {}) expected = [instance1_encoded, instance2_encoded] self.assertEqual(result, expected)
def test_basic_schedule_run_instance(self): ctxt = context.RequestContext('fake', 'fake', False) ctxt_elevated = 'fake-context-elevated' fake_args = (1, 2, 3) instance_opts = {'fake_opt1': 'meow', 'launch_index': -1} instance1 = {'uuid': 'fake-uuid1'} instance2 = {'uuid': 'fake-uuid2'} request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'], 'instance_properties': instance_opts} instance1_encoded = {'uuid': 'fake-uuid1', '_is_precooked': False} instance2_encoded = {'uuid': 'fake-uuid2', '_is_precooked': False} reservations = ['resv1', 'resv2'] def inc_launch_index(*args): request_spec['instance_properties']['launch_index'] = ( request_spec['instance_properties']['launch_index'] + 1) self.mox.StubOutWithMock(ctxt, 'elevated') self.mox.StubOutWithMock(self.driver, 'hosts_up') self.mox.StubOutWithMock(random, 'random') self.mox.StubOutWithMock(driver, 'instance_update_db') self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance') ctxt.elevated().AndReturn(ctxt_elevated) # instance 1 self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn( ['host1', 'host2', 'host3', 'host4']) random.random().AndReturn(.5) driver.instance_update_db(ctxt, instance1['uuid']).WithSideEffects( inc_launch_index).AndReturn(instance1) compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host3', instance=instance1, requested_networks=None, injected_files=None, admin_password=None, is_first_time=None, request_spec=request_spec, filter_properties={}) # instance 2 ctxt.elevated().AndReturn(ctxt_elevated) self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn( ['host1', 'host2', 'host3', 'host4']) random.random().AndReturn(.2) driver.instance_update_db(ctxt, instance2['uuid']).WithSideEffects( inc_launch_index).AndReturn(instance2) compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host1', instance=instance2, requested_networks=None, injected_files=None, admin_password=None, is_first_time=None, request_spec=request_spec, filter_properties={}) self.mox.ReplayAll() self.driver.schedule_run_instance(ctxt, request_spec, None, None, None, None, {})
def _provision_resource(self, context, weighed_host, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, instance_uuid=None): """Create the requested resource in this Zone.""" # NOTE(vish): add our current instance back into the request spec request_spec['instance_uuids'] = [instance_uuid] payload = dict(request_spec=request_spec, weighted_host=weighed_host.to_dict(), instance_id=instance_uuid) notifier.notify(context, notifier.publisher_id("scheduler"), 'scheduler.run_instance.scheduled', notifier.INFO, payload) updated_instance = driver.instance_update_db(context, instance_uuid) self._post_select_populate_filter_properties(filter_properties, weighed_host.obj) self.compute_rpcapi.run_instance(context, instance=updated_instance, host=weighed_host.obj.host, request_spec=request_spec, filter_properties=filter_properties, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, is_first_time=is_first_time, node=weighed_host.obj.nodename)
def schedule_run_instance(self, context, request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties, legacy_bdm_in_spec): """Create and run an instance or instances.""" instance_uuids = request_spec.get('instance_uuids') for num, instance_uuid in enumerate(instance_uuids): request_spec['instance_properties']['launch_index'] = num try: #LOG.info("jach:context = %(context)s" % {'context': context.__dict__}) #LOG.info("jach:request_spec = %(request_spec)s" % locals()) #LOG.info("jach:filter_properties = %(filter_properties)s" % locals()) host = self._schedule(context, CONF.compute_topic, request_spec, filter_properties) updated_instance = driver.instance_update_db(context, instance_uuid) self.compute_rpcapi.run_instance(context, instance=updated_instance, host=host, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, is_first_time=is_first_time, request_spec=request_spec, filter_properties=filter_properties, legacy_bdm_in_spec=legacy_bdm_in_spec) except Exception as ex: # NOTE(vish): we don't reraise the exception here to make sure # that all instances in the request get set to # error properly driver.handle_schedule_error(context, ex, instance_uuid, request_spec)
def _provision_resource(self, context, weighted_host, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, instance_uuid=None): """Create the requested resource in this Zone.""" # Add a retry entry for the selected compute host: self._add_retry_host(filter_properties, weighted_host.host_state.host) payload = dict(request_spec=request_spec, weighted_host=weighted_host.to_dict(), instance_id=instance_uuid) notifier.notify(context, notifier.publisher_id("scheduler"), 'scheduler.run_instance.scheduled', notifier.INFO, payload) updated_instance = driver.instance_update_db( context, instance_uuid, weighted_host.host_state.host) self.compute_rpcapi.run_instance(context, instance=updated_instance, host=weighted_host.host_state.host, request_spec=request_spec, filter_properties=filter_properties, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, is_first_time=is_first_time)
def _provision_resource(self, context, weighed_host, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, instance_uuid=None, legacy_bdm_in_spec=True): """Create the requested resource in this Zone.""" # NOTE(vish): add our current instance back into the request spec request_spec['instance_uuids'] = [instance_uuid] payload = dict(request_spec=request_spec, weighted_host=weighed_host.to_dict(), instance_id=instance_uuid) self.notifier.info(context, 'scheduler.run_instance.scheduled', payload) # Update the metadata if necessary try: updated_instance = driver.instance_update_db(context, instance_uuid) except exception.InstanceNotFound: LOG.warning(_("Instance disappeared during scheduling"), context=context, instance_uuid=instance_uuid) else: scheduler_utils.populate_filter_properties(filter_properties, weighed_host.obj) self.compute_rpcapi.run_instance(context, instance=updated_instance, host=weighed_host.obj.host, request_spec=request_spec, filter_properties=filter_properties, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, is_first_time=is_first_time, node=weighed_host.obj.nodename, legacy_bdm_in_spec=legacy_bdm_in_spec)
def _provision_resource(self, context, weighed_host, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, instance_uuid=None): """Create the requested resource in this Zone.""" payload = dict(request_spec=request_spec, weighted_host=weighed_host.to_dict(), instance_id=instance_uuid) notifier.notify(context, notifier.publisher_id("scheduler"), 'scheduler.run_instance.scheduled', notifier.INFO, payload) # TODO(NTTdocomo): Combine the next two updates into one driver.db_instance_node_set(context, instance_uuid, weighed_host.obj.nodename) updated_instance = driver.instance_update_db(context, instance_uuid) self._post_select_populate_filter_properties(filter_properties, weighed_host.obj) self.compute_rpcapi.run_instance(context, instance=updated_instance, host=weighed_host.obj.host, request_spec=request_spec, filter_properties=filter_properties, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, is_first_time=is_first_time)
def _provision_resource(self, context, weighed_host, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, instance_uuid=None, legacy_bdm_in_spec=True): """Create the requested resource in this Zone.""" # NOTE(vish): add our current instance back into the request spec request_spec['instance_uuids'] = [instance_uuid] payload = dict(request_spec=request_spec, weighted_host=weighed_host.to_dict(), instance_id=instance_uuid) self.notifier.info(context, 'scheduler.run_instance.scheduled', payload) # Update the metadata if necessary try: updated_instance = driver.instance_update_db(context, instance_uuid) except exception.InstanceNotFound: LOG.warning(_LW("Instance disappeared during scheduling"), context=context, instance_uuid=instance_uuid) else: scheduler_utils.populate_filter_properties(filter_properties, weighed_host.obj) self.compute_rpcapi.run_instance(context, instance=updated_instance, host=weighed_host.obj.host, request_spec=request_spec, filter_properties=filter_properties, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, is_first_time=is_first_time, node=weighed_host.obj.nodename, legacy_bdm_in_spec=legacy_bdm_in_spec)
def schedule_run_instance(self, context, request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties, reservations): """Create and run an instance or instances""" num_instances = request_spec.get('num_instances', 1) instances = [] for num in xrange(num_instances): host = self._schedule(context, 'compute', request_spec, filter_properties) request_spec['instance_properties']['launch_index'] = num instance = self.create_instance_db_entry(context, request_spec, reservations) updated_instance = driver.instance_update_db( context, instance['uuid'], host) self.compute_rpcapi.run_instance( context, instance=updated_instance, host=host, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, is_first_time=is_first_time, request_spec=request_spec, filter_properties=filter_properties) instances.append(driver.encode_instance(updated_instance)) # So if we loop around, create_instance_db_entry will actually # create a new entry, instead of assume it's been created # already del request_spec['instance_properties']['uuid'] return instances
def schedule_run_instance(self, context, request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties): """Create and run an instance or instances""" instance_uuids = request_spec.get('instance_uuids') for num, instance_uuid in enumerate(instance_uuids): request_spec['instance_properties']['launch_index'] = num try: host = self._schedule(context, FLAGS.compute_topic, request_spec, filter_properties) updated_instance = driver.instance_update_db(context, instance_uuid) self.compute_rpcapi.run_instance(context, instance=updated_instance, host=host, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, is_first_time=is_first_time, request_spec=request_spec, filter_properties=filter_properties) except Exception as ex: # NOTE(vish): we don't reraise the exception here to make sure # that all instances in the request get set to # error properly driver.handle_schedule_error(context, ex, instance_uuid, request_spec)
def _provision_resource(self, context, weighted_host, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, instance_uuid=None): """Create the requested resource in this Zone.""" # Add a retry entry for the selected compute host: self._add_retry_host(filter_properties, weighted_host.host_state.host) self._add_oversubscription_policy(filter_properties, weighted_host.host_state) payload = dict(request_spec=request_spec, weighted_host=weighted_host.to_dict(), instance_id=instance_uuid) notifier.notify(context, notifier.publisher_id("scheduler"), 'scheduler.run_instance.scheduled', notifier.INFO, payload) updated_instance = driver.instance_update_db(context, instance_uuid) self.compute_rpcapi.run_instance(context, instance=updated_instance, host=weighted_host.host_state.host, request_spec=request_spec, filter_properties=filter_properties, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, is_first_time=is_first_time)
def schedule_run_instance(self, context, request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties): """Create and run an instance or instances.""" instance_uuids = request_spec.get('instance_uuids') for num, instance_uuid in enumerate(instance_uuids): request_spec['instance_properties']['launch_index'] = num try: host = self._schedule(context, CONF.compute_topic, request_spec, filter_properties) updated_instance = driver.instance_update_db( context, instance_uuid) self.compute_rpcapi.run_instance( context, instance=updated_instance, host=host, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, is_first_time=is_first_time, request_spec=request_spec, filter_properties=filter_properties) except Exception as ex: # NOTE(vish): we don't reraise the exception here to make sure # that all instances in the request get set to # error properly driver.handle_schedule_error(context, ex, instance_uuid, request_spec)
def _provision_resource(self, context, weighted_host, request_spec, reservations, filter_properties, requested_networks, injected_files, admin_password, is_first_time): """Create the requested resource in this Zone.""" instance = self.create_instance_db_entry(context, request_spec, reservations) # Add a retry entry for the selected compute host: self._add_retry_host(filter_properties, weighted_host.host_state.host) payload = dict(request_spec=request_spec, weighted_host=weighted_host.to_dict(), instance_id=instance['uuid']) notifier.notify(context, notifier.publisher_id("scheduler"), 'scheduler.run_instance.scheduled', notifier.INFO, payload) updated_instance = driver.instance_update_db(context, instance['uuid'], weighted_host.host_state.host) self.compute_rpcapi.run_instance(context, instance=updated_instance, host=weighted_host.host_state.host, request_spec=request_spec, filter_properties=filter_properties, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, is_first_time=is_first_time) inst = driver.encode_instance(updated_instance, local=True) # So if another instance is created, create_instance_db_entry will # actually create a new entry, instead of assume it's been created # already del request_spec['instance_properties']['uuid'] return inst
def _provision_resource(self, context, weighed_host, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, instance_uuid=None): """Create the requested resource in this Zone.""" # NOTE(vish): add our current instance back into the request spec request_spec['instance_uuids'] = [instance_uuid] payload = dict(request_spec=request_spec, weighted_host=weighed_host.to_dict(), instance_id=instance_uuid) notifier.notify(context, notifier.publisher_id("scheduler"), 'scheduler.run_instance.scheduled', notifier.INFO, payload) # Update the metadata if necessary scheduler_hints = filter_properties.get('scheduler_hints') or {} group = scheduler_hints.get('group', None) values = None if group: values = request_spec['instance_properties']['system_metadata'] values.update({'group': group}) values = {'system_metadata': values} updated_instance = driver.instance_update_db(context, instance_uuid, extra_values=values) self._post_select_populate_filter_properties(filter_properties, weighed_host.obj) self.compute_rpcapi.run_instance(context, instance=updated_instance, host=weighed_host.obj.host, request_spec=request_spec, filter_properties=filter_properties, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, is_first_time=is_first_time, node=weighed_host.obj.nodename)
def schedule_run_instance(self, context, request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties, reservations): """Create and run an instance or instances""" if 'instance_uuids' not in request_spec: return self._legacy_schedule_run_instance(context, request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties, reservations) instances = [] instance_uuids = request_spec.get('instance_uuids') for num, instance_uuid in enumerate(instance_uuids): host = self._schedule(context, 'compute', request_spec, filter_properties) request_spec['instance_properties']['launch_index'] = num updated_instance = driver.instance_update_db(context, instance_uuid, host) self.compute_rpcapi.run_instance(context, instance=updated_instance, host=host, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, is_first_time=is_first_time, request_spec=request_spec, filter_properties=filter_properties) instances.append(driver.encode_instance(updated_instance)) return instances
def schedule_run_instance(self, context, request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties, reservations): """Create and run an instance or instances""" num_instances = request_spec.get('num_instances', 1) instances = [] for num in xrange(num_instances): host = self._schedule(context, 'compute', request_spec, filter_properties) request_spec['instance_properties']['launch_index'] = num instance = self.create_instance_db_entry(context, request_spec, reservations) updated_instance = driver.instance_update_db(context, instance['uuid'], host) self.compute_rpcapi.run_instance(context, instance=updated_instance, host=host, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, is_first_time=is_first_time, request_spec=request_spec, filter_properties=filter_properties) instances.append(driver.encode_instance(updated_instance)) # So if we loop around, create_instance_db_entry will actually # create a new entry, instead of assume it's been created # already del request_spec['instance_properties']['uuid'] return instances
def test_basic_schedule_run_instance(self): ctxt = context.RequestContext('fake', 'fake', False) ctxt_elevated = 'fake-context-elevated' instance_opts = {'fake_opt1': 'meow', 'launch_index': -1} instance1 = {'uuid': 'fake-uuid1'} instance2 = {'uuid': 'fake-uuid2'} request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'], 'instance_properties': instance_opts} def inc_launch_index(*args): request_spec['instance_properties']['launch_index'] = ( request_spec['instance_properties']['launch_index'] + 1) self.mox.StubOutWithMock(ctxt, 'elevated') self.mox.StubOutWithMock(self.driver, 'hosts_up') self.mox.StubOutWithMock(random, 'choice') self.mox.StubOutWithMock(driver, 'instance_update_db') self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance') ctxt.elevated().AndReturn(ctxt_elevated) # instance 1 hosts_full = ['host1', 'host2', 'host3', 'host4'] self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full) random.choice(hosts_full).AndReturn('host3') driver.instance_update_db(ctxt, instance1['uuid']).WithSideEffects( inc_launch_index).AndReturn(instance1) compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host3', instance=instance1, requested_networks=None, injected_files=None, admin_password=None, is_first_time=None, request_spec=request_spec, filter_properties={}, legacy_bdm_in_spec=False) # instance 2 ctxt.elevated().AndReturn(ctxt_elevated) self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full) random.choice(hosts_full).AndReturn('host1') driver.instance_update_db(ctxt, instance2['uuid']).WithSideEffects( inc_launch_index).AndReturn(instance2) compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host1', instance=instance2, requested_networks=None, injected_files=None, admin_password=None, is_first_time=None, request_spec=request_spec, filter_properties={}, legacy_bdm_in_spec=False) self.mox.ReplayAll() self.driver.schedule_run_instance(ctxt, request_spec, None, None, None, None, {}, False)
def schedule_prep_resize(self, context, image, update_db, request_spec, filter_properties, instance, instance_type): """Select a target for resize.""" host = self._schedule(context, 'compute', request_spec, filter_properties) updated_instance = driver.instance_update_db(context, instance['uuid'], host.host_state.host) self.compute_rpcapi.prep_resize(context, image, updated_instance, instance_type, host)
def _provision_resource( self, context, weighed_host, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, instance_uuid=None, legacy_bdm_in_spec=True, ): """Create the requested resource in this Zone.""" # NOTE(vish): add our current instance back into the request spec request_spec["instance_uuids"] = [instance_uuid] payload = dict(request_spec=request_spec, weighted_host=weighed_host.to_dict(), instance_id=instance_uuid) self.notifier.info(context, "scheduler.run_instance.scheduled", payload) # Update the metadata if necessary scheduler_hints = filter_properties.get("scheduler_hints") or {} group = scheduler_hints.get("group", None) values = None if group: values = request_spec["instance_properties"]["system_metadata"] values.update({"group": group}) values = {"system_metadata": values} try: updated_instance = driver.instance_update_db(context, instance_uuid, extra_values=values) except exception.InstanceNotFound: LOG.warning(_("Instance disappeared during scheduling"), context=context, instance_uuid=instance_uuid) else: scheduler_utils.populate_filter_properties(filter_properties, weighed_host.obj) self.compute_rpcapi.run_instance( context, instance=updated_instance, host=weighed_host.obj.host, request_spec=request_spec, filter_properties=filter_properties, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, is_first_time=is_first_time, node=weighed_host.obj.nodename, legacy_bdm_in_spec=legacy_bdm_in_spec, )
def _provision_resource(self, context, selected_host, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, instance_uuid=None): """Create the requested resource in this Zone.""" # NOTE(vish): add our current instance back into the request spec request_spec['instance_uuids'] = [instance_uuid] #NOTE(Yathi): Not using weights in solver scheduler, #but leaving in the payload payload = dict(request_spec=request_spec, weighted_host=dict(host=selected_host.host, weight=1), instance_id=instance_uuid) notifier.notify(context, notifier.publisher_id("scheduler"), 'scheduler.run_instance.scheduled', notifier.INFO, payload) # Update the metadata if necessary scheduler_hints = filter_properties.get('scheduler_hints') or {} group = scheduler_hints.get('group', None) values = None if group: values = request_spec['instance_properties']['system_metadata'] values.update({'group': group}) values = {'system_metadata': values} try: updated_instance = driver.instance_update_db(context, instance_uuid, extra_values=values) except exception.InstanceNotFound: LOG.warning(_("Instance disappeared during scheduling"), context=context, instance_uuid=instance_uuid) else: scheduler_utils.populate_filter_properties(filter_properties, selected_host) self.compute_rpcapi.run_instance(context, instance=updated_instance, host=selected_host.host, request_spec=request_spec, filter_properties=filter_properties, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, is_first_time=is_first_time, node=selected_host.nodename)
def schedule_prep_resize(self, context, image, update_db, request_spec, filter_properties, instance, instance_type): """Select a target for resize. Selects a target host for the instance, post-resize, and casts the prep_resize operation to it. """ hosts = self._schedule(context, 'compute', request_spec, filter_properties) if not hosts: raise exception.NoValidHost(reason="") host = hosts.pop(0) # Forward off to the host updated_instance = driver.instance_update_db(context, instance['uuid'], host.host_state.host) self.compute_rpcapi.prep_resize(context, image, updated_instance, instance_type, host.host_state.host)
def _provision_resource(self, context, weighted_host, request_spec, reservations, filter_properties, requested_networks, injected_files, admin_password, is_first_time): """Create the requested resource in this Zone.""" instance = self.create_instance_db_entry(context, request_spec, reservations) # Add a retry entry for the selected compute host: self._add_retry_host(filter_properties, weighted_host.host_state.host) payload = dict(request_spec=request_spec, weighted_host=weighted_host.to_dict(), instance_id=instance['uuid']) notifier.notify(context, notifier.publisher_id("scheduler"), 'scheduler.run_instance.scheduled', notifier.INFO, payload) updated_instance = driver.instance_update_db( context, instance['uuid'], weighted_host.host_state.host) self.compute_rpcapi.run_instance(context, instance=updated_instance, host=weighted_host.host_state.host, request_spec=request_spec, filter_properties=filter_properties, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, is_first_time=is_first_time) inst = driver.encode_instance(updated_instance, local=True) # So if another instance is created, create_instance_db_entry will # actually create a new entry, instead of assume it's been created # already del request_spec['instance_properties']['uuid'] return inst
def schedule_run_instance(self, context, request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties): """Create and run an instance or instances""" instances = [] instance_uuids = request_spec.get('instance_uuids') for num, instance_uuid in enumerate(instance_uuids): host = self._schedule(context, 'compute', request_spec, filter_properties) request_spec['instance_properties']['launch_index'] = num updated_instance = driver.instance_update_db( context, instance_uuid, host) self.compute_rpcapi.run_instance( context, instance=updated_instance, host=host, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, is_first_time=is_first_time, request_spec=request_spec, filter_properties=filter_properties) instances.append(driver.encode_instance(updated_instance)) return instances
def _provision_resource(self, context, weighted_host, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, instance_uuid=None): """Create the requested resource in this Zone.""" # Add a retry entry for the selected compute host: self._add_retry_host(filter_properties, weighted_host.host_state.host) self._add_oversubscription_policy(filter_properties, weighted_host.host_state) payload = dict(request_spec=request_spec, weighted_host=weighted_host.to_dict(), instance_id=instance_uuid) notifier.notify(context, notifier.publisher_id("scheduler"), 'scheduler.run_instance.scheduled', notifier.INFO, payload) if weighted_host.host_state.nodename is not None: smd_dic = db.instance_system_metadata_get(context, instance_uuid) smd_dic['node'] = weighted_host.host_state.nodename else: # update is not needed smd_dic = None updated_instance = driver.instance_update_db(context, instance_uuid, weighted_host.host_state.host, system_metadata=smd_dic) # Ensure system_metadata is loaded and included in rpc payload updated_instance.get('system_metadata') self.compute_rpcapi.run_instance(context, instance=updated_instance, host=weighted_host.host_state.host, request_spec=request_spec, filter_properties=filter_properties, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, is_first_time=is_first_time)
def test_basic_schedule_run_instances_anti_affinity(self): filter_properties = {"scheduler_hints": {"group": "cats"}} # Request spec 1 instance_opts1 = { "project_id": 1, "os_type": "Linux", "memory_mb": 512, "root_gb": 512, "ephemeral_gb": 0, "vcpus": 1, "system_metadata": {"system": "metadata"}, } request_spec1 = { "instance_uuids": ["fake-uuid1-1", "fake-uuid1-2"], "instance_properties": instance_opts1, "instance_type": {"memory_mb": 512, "root_gb": 512, "ephemeral_gb": 0, "vcpus": 1}, } self.next_weight = 1.0 def _fake_weigh_objects(_self, functions, hosts, options): self.next_weight += 2.0 host_state = hosts[0] return [weights.WeighedHost(host_state, self.next_weight)] sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext("user", "project", is_admin=True) self.stubs.Set(sched.host_manager, "get_filtered_hosts", fake_get_group_filtered_hosts) self.stubs.Set(weights.HostWeightHandler, "get_weighed_objects", _fake_weigh_objects) fakes.mox_host_manager_db_calls(self.mox, fake_context) self.mox.StubOutWithMock(driver, "instance_update_db") self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, "run_instance") self.mox.StubOutWithMock(sched, "group_hosts") instance1_1 = {"uuid": "fake-uuid1-1"} instance1_2 = {"uuid": "fake-uuid1-2"} sched.group_hosts(mox.IgnoreArg(), "cats").AndReturn([]) def inc_launch_index1(*args, **kwargs): request_spec1["instance_properties"]["launch_index"] = ( request_spec1["instance_properties"]["launch_index"] + 1 ) expected_metadata = {"system_metadata": {"system": "metadata", "group": "cats"}} driver.instance_update_db(fake_context, instance1_1["uuid"], extra_values=expected_metadata).WithSideEffects( inc_launch_index1 ).AndReturn(instance1_1) compute_rpcapi.ComputeAPI.run_instance( fake_context, host="host3", instance=instance1_1, requested_networks=None, injected_files=None, admin_password=None, is_first_time=None, request_spec=request_spec1, filter_properties=mox.IgnoreArg(), node="node3", legacy_bdm_in_spec=False, ) driver.instance_update_db(fake_context, instance1_2["uuid"], extra_values=expected_metadata).WithSideEffects( inc_launch_index1 ).AndReturn(instance1_2) compute_rpcapi.ComputeAPI.run_instance( fake_context, host="host4", instance=instance1_2, requested_networks=None, injected_files=None, admin_password=None, is_first_time=None, request_spec=request_spec1, filter_properties=mox.IgnoreArg(), node="node4", legacy_bdm_in_spec=False, ) self.mox.ReplayAll() sched.schedule_run_instance(fake_context, request_spec1, None, None, None, None, filter_properties, False)
def test_scheduler_includes_launch_index(self): ctxt = context.RequestContext('fake', 'fake', False) instance_opts = {'fake_opt1': 'meow'} request_spec = { 'num_instances': 2, 'instance_properties': instance_opts } instance1 = {'uuid': 'fake-uuid1'} instance2 = {'uuid': 'fake-uuid2'} # create_instance_db_entry() usually does this, but we're # stubbing it. def _add_uuid(num): """Return a function that adds the provided uuid number.""" def _add_uuid_num(_, spec, reservations): spec['instance_properties']['uuid'] = 'fake-uuid%d' % num return _add_uuid_num def _has_launch_index(expected_index): """Return a function that verifies the expected index.""" def _check_launch_index(value): if 'instance_properties' in value: if 'launch_index' in value['instance_properties']: index = value['instance_properties']['launch_index'] if index == expected_index: return True return False return _check_launch_index self.mox.StubOutWithMock(self.driver, '_schedule') self.mox.StubOutWithMock(self.driver, 'create_instance_db_entry') self.mox.StubOutWithMock(driver, 'encode_instance') self.mox.StubOutWithMock(driver, 'instance_update_db') self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance') # instance 1 self.driver._schedule(ctxt, 'compute', request_spec, {}).AndReturn('host') self.driver.create_instance_db_entry( ctxt, mox.Func(_has_launch_index(0)), None).WithSideEffects(_add_uuid(1)).AndReturn(instance1) driver.instance_update_db(ctxt, instance1['uuid'], 'host').AndReturn(instance1) compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host', instance=instance1, requested_networks=None, injected_files=None, admin_password=None, is_first_time=None, request_spec=request_spec, filter_properties={}) driver.encode_instance(instance1).AndReturn(instance1) # instance 2 self.driver._schedule(ctxt, 'compute', request_spec, {}).AndReturn('host') self.driver.create_instance_db_entry( ctxt, mox.Func(_has_launch_index(1)), None).WithSideEffects(_add_uuid(2)).AndReturn(instance2) driver.instance_update_db(ctxt, instance2['uuid'], 'host').AndReturn(instance2) compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host', instance=instance2, requested_networks=None, injected_files=None, admin_password=None, is_first_time=None, request_spec=request_spec, filter_properties={}) driver.encode_instance(instance2).AndReturn(instance2) self.mox.ReplayAll() self.driver.schedule_run_instance(ctxt, request_spec, None, None, None, None, {}, None)
def test_basic_schedule_run_instance(self): ctxt = context.RequestContext('fake', 'fake', False) ctxt_elevated = 'fake-context-elevated' fake_args = (1, 2, 3) instance_opts = {'fake_opt1': 'meow'} request_spec = { 'num_instances': 2, 'instance_properties': instance_opts } instance1 = {'uuid': 'fake-uuid1'} instance2 = {'uuid': 'fake-uuid2'} instance1_encoded = {'uuid': 'fake-uuid1', '_is_precooked': False} instance2_encoded = {'uuid': 'fake-uuid2', '_is_precooked': False} reservations = ['resv1', 'resv2'] # create_instance_db_entry() usually does this, but we're # stubbing it. def _add_uuid1(ctxt, request_spec, reservations): request_spec['instance_properties']['uuid'] = 'fake-uuid1' def _add_uuid2(ctxt, request_spec, reservations): request_spec['instance_properties']['uuid'] = 'fake-uuid2' self.mox.StubOutWithMock(ctxt, 'elevated') self.mox.StubOutWithMock(self.driver, 'hosts_up') self.mox.StubOutWithMock(random, 'random') self.mox.StubOutWithMock(self.driver, 'create_instance_db_entry') self.mox.StubOutWithMock(driver, 'encode_instance') self.mox.StubOutWithMock(driver, 'instance_update_db') self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance') ctxt.elevated().AndReturn(ctxt_elevated) # instance 1 self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn( ['host1', 'host2', 'host3', 'host4']) random.random().AndReturn(.5) self.driver.create_instance_db_entry( ctxt, request_spec, reservations).WithSideEffects(_add_uuid1).AndReturn(instance1) driver.instance_update_db(ctxt, instance1['uuid'], 'host3').AndReturn(instance1) compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host3', instance=instance1, requested_networks=None, injected_files=None, admin_password=None, is_first_time=None, request_spec=request_spec, filter_properties={}) driver.encode_instance(instance1).AndReturn(instance1_encoded) # instance 2 ctxt.elevated().AndReturn(ctxt_elevated) self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn( ['host1', 'host2', 'host3', 'host4']) random.random().AndReturn(.2) self.driver.create_instance_db_entry( ctxt, request_spec, reservations).WithSideEffects(_add_uuid2).AndReturn(instance2) driver.instance_update_db(ctxt, instance2['uuid'], 'host1').AndReturn(instance2) compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host1', instance=instance2, requested_networks=None, injected_files=None, admin_password=None, is_first_time=None, request_spec=request_spec, filter_properties={}) driver.encode_instance(instance2).AndReturn(instance2_encoded) self.mox.ReplayAll() result = self.driver.schedule_run_instance(ctxt, request_spec, None, None, None, None, {}, reservations) expected = [instance1_encoded, instance2_encoded] self.assertEqual(result, expected)
def test_basic_schedule_run_instances_anti_affinity(self): filter_properties = {'scheduler_hints': {'group': 'cats'}} # Request spec 1 instance_opts1 = {'project_id': 1, 'os_type': 'Linux', 'memory_mb': 512, 'root_gb': 512, 'ephemeral_gb': 0, 'vcpus': 1, 'system_metadata': {'system': 'metadata'}} request_spec1 = {'instance_uuids': ['fake-uuid1-1', 'fake-uuid1-2'], 'instance_properties': instance_opts1, 'instance_type': {'memory_mb': 512, 'root_gb': 512, 'ephemeral_gb': 0, 'vcpus': 1}} self.next_weight = 1.0 def _fake_weigh_objects(_self, functions, hosts, options): self.next_weight += 2.0 host_state = hosts[0] return [weights.WeighedHost(host_state, self.next_weight)] sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project', is_admin=True) self.stubs.Set(sched.host_manager, 'get_filtered_hosts', fake_get_group_filtered_hosts) self.stubs.Set(weights.HostWeightHandler, 'get_weighed_objects', _fake_weigh_objects) fakes.mox_host_manager_db_calls(self.mox, fake_context) self.mox.StubOutWithMock(driver, 'instance_update_db') self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance') self.mox.StubOutWithMock(sched, 'group_hosts') instance1_1 = {'uuid': 'fake-uuid1-1'} instance1_2 = {'uuid': 'fake-uuid1-2'} sched.group_hosts(mox.IgnoreArg(), 'cats').AndReturn([]) def inc_launch_index1(*args, **kwargs): request_spec1['instance_properties']['launch_index'] = ( request_spec1['instance_properties']['launch_index'] + 1) expected_metadata = {'system_metadata': {'system': 'metadata', 'group': 'cats'}} driver.instance_update_db(fake_context, instance1_1['uuid'], extra_values=expected_metadata).WithSideEffects( inc_launch_index1).AndReturn(instance1_1) compute_rpcapi.ComputeAPI.run_instance(fake_context, host='host3', instance=instance1_1, requested_networks=None, injected_files=None, admin_password=None, is_first_time=None, request_spec=request_spec1, filter_properties=mox.IgnoreArg(), node='node3') driver.instance_update_db(fake_context, instance1_2['uuid'], extra_values=expected_metadata).WithSideEffects( inc_launch_index1).AndReturn(instance1_2) compute_rpcapi.ComputeAPI.run_instance(fake_context, host='host4', instance=instance1_2, requested_networks=None, injected_files=None, admin_password=None, is_first_time=None, request_spec=request_spec1, filter_properties=mox.IgnoreArg(), node='node4') self.mox.ReplayAll() sched.schedule_run_instance(fake_context, request_spec1, None, None, None, None, filter_properties)
def test_scheduler_includes_launch_index(self): ctxt = context.RequestContext('fake', 'fake', False) instance_opts = {'fake_opt1': 'meow'} request_spec = {'num_instances': 2, 'instance_properties': instance_opts} instance1 = {'uuid': 'fake-uuid1'} instance2 = {'uuid': 'fake-uuid2'} # create_instance_db_entry() usually does this, but we're # stubbing it. def _add_uuid(num): """Return a function that adds the provided uuid number.""" def _add_uuid_num(_, spec, reservations): spec['instance_properties']['uuid'] = 'fake-uuid%d' % num return _add_uuid_num def _has_launch_index(expected_index): """Return a function that verifies the expected index.""" def _check_launch_index(value): if 'instance_properties' in value: if 'launch_index' in value['instance_properties']: index = value['instance_properties']['launch_index'] if index == expected_index: return True return False return _check_launch_index self.mox.StubOutWithMock(self.driver, '_schedule') self.mox.StubOutWithMock(self.driver, 'create_instance_db_entry') self.mox.StubOutWithMock(driver, 'encode_instance') self.mox.StubOutWithMock(driver, 'instance_update_db') self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance') # instance 1 self.driver._schedule(ctxt, 'compute', request_spec, {}).AndReturn('host') self.driver.create_instance_db_entry( ctxt, mox.Func(_has_launch_index(0)), None ).WithSideEffects(_add_uuid(1)).AndReturn(instance1) driver.instance_update_db(ctxt, instance1['uuid'], 'host').AndReturn(instance1) compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host', instance=instance1, requested_networks=None, injected_files=None, admin_password=None, is_first_time=None, request_spec=request_spec, filter_properties={}) driver.encode_instance(instance1).AndReturn(instance1) # instance 2 self.driver._schedule(ctxt, 'compute', request_spec, {}).AndReturn('host') self.driver.create_instance_db_entry( ctxt, mox.Func(_has_launch_index(1)), None ).WithSideEffects(_add_uuid(2)).AndReturn(instance2) driver.instance_update_db(ctxt, instance2['uuid'], 'host').AndReturn(instance2) compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host', instance=instance2, requested_networks=None, injected_files=None, admin_password=None, is_first_time=None, request_spec=request_spec, filter_properties={}) driver.encode_instance(instance2).AndReturn(instance2) self.mox.ReplayAll() self.driver.schedule_run_instance(ctxt, request_spec, None, None, None, None, {}, None)
def test_basic_schedule_run_instances_anti_affinity(self): filter_properties = {'scheduler_hints': {'group': 'cats'}} # Request spec 1 instance_opts1 = { 'project_id': 1, 'os_type': 'Linux', 'memory_mb': 512, 'root_gb': 512, 'ephemeral_gb': 0, 'vcpus': 1, 'system_metadata': { 'system': 'metadata' } } request_spec1 = { 'instance_uuids': ['fake-uuid1-1', 'fake-uuid1-2'], 'instance_properties': instance_opts1, 'instance_type': { 'memory_mb': 512, 'root_gb': 512, 'ephemeral_gb': 0, 'vcpus': 1 } } self.next_weight = 1.0 def _fake_weigh_objects(_self, functions, hosts, options): self.next_weight += 2.0 host_state = hosts[0] return [weights.WeighedHost(host_state, self.next_weight)] sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project', is_admin=True) self.stubs.Set(sched.host_manager, 'get_filtered_hosts', fake_get_group_filtered_hosts) self.stubs.Set(weights.HostWeightHandler, 'get_weighed_objects', _fake_weigh_objects) fakes.mox_host_manager_db_calls(self.mox, fake_context) self.mox.StubOutWithMock(driver, 'instance_update_db') self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance') self.mox.StubOutWithMock(sched, 'group_hosts') instance1_1 = {'uuid': 'fake-uuid1-1'} instance1_2 = {'uuid': 'fake-uuid1-2'} sched.group_hosts(mox.IgnoreArg(), 'cats').AndReturn([]) def inc_launch_index1(*args, **kwargs): request_spec1['instance_properties']['launch_index'] = ( request_spec1['instance_properties']['launch_index'] + 1) expected_metadata = { 'system_metadata': { 'system': 'metadata', 'group': 'cats' } } driver.instance_update_db( fake_context, instance1_1['uuid'], extra_values=expected_metadata).WithSideEffects( inc_launch_index1).AndReturn(instance1_1) compute_rpcapi.ComputeAPI.run_instance( fake_context, host='host3', instance=instance1_1, requested_networks=None, injected_files=None, admin_password=None, is_first_time=None, request_spec=request_spec1, filter_properties=mox.IgnoreArg(), node='node3', legacy_bdm_in_spec=False) driver.instance_update_db( fake_context, instance1_2['uuid'], extra_values=expected_metadata).WithSideEffects( inc_launch_index1).AndReturn(instance1_2) compute_rpcapi.ComputeAPI.run_instance( fake_context, host='host4', instance=instance1_2, requested_networks=None, injected_files=None, admin_password=None, is_first_time=None, request_spec=request_spec1, filter_properties=mox.IgnoreArg(), node='node4', legacy_bdm_in_spec=False) self.mox.ReplayAll() sched.schedule_run_instance(fake_context, request_spec1, None, None, None, None, filter_properties, False)