def prep_resize(self, context, image, request_spec, filter_properties, instance, instance_type, reservations): """Tries to call schedule_prep_resize on the driver. Sets instance vm_state to ACTIVE on NoHostFound Sets vm_state to ERROR on other exceptions """ instance_uuid = instance['uuid'] with compute_utils.EventReporter(context, conductor_api.LocalAPI(), 'schedule', instance_uuid): try: kwargs = { 'context': context, 'image': image, 'request_spec': request_spec, 'filter_properties': filter_properties, 'instance': instance, 'instance_type': instance_type, 'reservations': reservations, } return self.driver.schedule_prep_resize(**kwargs) except exception.NoValidHost as ex: self._set_vm_state_and_notify('prep_resize', { 'vm_state': vm_states.ACTIVE, 'task_state': None }, context, ex, request_spec) if reservations: QUOTAS.rollback(context, reservations) except Exception as ex: with excutils.save_and_reraise_exception(): self._set_vm_state_and_notify('prep_resize', { 'vm_state': vm_states.ERROR, 'task_state': None }, context, ex, request_spec) if reservations: QUOTAS.rollback(context, reservations)
def run_instance(self, context, request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties, legacy_bdm_in_spec=True): """Tries to call schedule_run_instance on the driver. Sets instance vm_state to ERROR on exceptions """ instance_uuids = request_spec['instance_uuids'] with compute_utils.EventReporter(context, conductor_api.LocalAPI(), 'schedule', *instance_uuids): try: return self.driver.schedule_run_instance( context, request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties, legacy_bdm_in_spec) except exception.NoValidHost as ex: # don't re-raise self._set_vm_state_and_notify('run_instance', { 'vm_state': vm_states.ERROR, 'task_state': None }, context, ex, request_spec) except Exception as ex: with excutils.save_and_reraise_exception(): self._set_vm_state_and_notify('run_instance', { 'vm_state': vm_states.ERROR, 'task_state': None }, context, ex, request_spec)
def handle_schedule_error(context, ex, instance_uuid, request_spec): if not isinstance(ex, exception.NoValidHost): LOG.exception(_("Exception during scheduler.run_instance")) state = vm_states.ERROR.upper() LOG.warning(_('Setting instance to %s state.'), state, instance_uuid=instance_uuid) # update instance state and notify on the transition (old_ref, new_ref) = db.instance_update_and_get_original(context, instance_uuid, {'vm_state': vm_states.ERROR, 'task_state': None}) notifications.send_update(context, old_ref, new_ref, service="scheduler") compute_utils.add_instance_fault_from_exc(context, conductor_api.LocalAPI(), new_ref, ex, sys.exc_info()) properties = request_spec.get('instance_properties', {}) payload = dict(request_spec=request_spec, instance_properties=properties, instance_id=instance_uuid, state=vm_states.ERROR, method='run_instance', reason=ex) notifier.get_notifier('scheduler').error(context, 'scheduler.run_instance', payload)
def prep_resize(self, context, image, request_spec, filter_properties, instance, instance_type, reservations): """Tries to call schedule_prep_resize on the driver. Sets instance vm_state to ACTIVE on NoHostFound Sets vm_state to ERROR on other exceptions """ instance_uuid = instance['uuid'] with compute_utils.EventReporter(context, conductor_api.LocalAPI(), 'schedule', instance_uuid): try: request_spec['num_instances'] = len( request_spec['instance_uuids']) hosts = self.driver.select_destinations( context, request_spec, filter_properties) host_state = hosts[0] scheduler_utils.populate_filter_properties( filter_properties, host_state) # context is not serializable filter_properties.pop('context', None) (host, node) = (host_state['host'], host_state['nodename']) attrs = [ 'metadata', 'system_metadata', 'info_cache', 'security_groups' ] inst_obj = instance_obj.Instance._from_db_object( context, instance_obj.Instance(), instance, expected_attrs=attrs) self.compute_rpcapi.prep_resize( context, image, inst_obj, instance_type, host, reservations, request_spec=request_spec, filter_properties=filter_properties, node=node) except exception.NoValidHost as ex: vm_state = instance.get('vm_state', vm_states.ACTIVE) self._set_vm_state_and_notify('prep_resize', { 'vm_state': vm_state, 'task_state': None }, context, ex, request_spec) if reservations: QUOTAS.rollback(context, reservations) except Exception as ex: with excutils.save_and_reraise_exception(): self._set_vm_state_and_notify('prep_resize', { 'vm_state': vm_states.ERROR, 'task_state': None }, context, ex, request_spec) if reservations: QUOTAS.rollback(context, reservations)
def test_format_instance_mapping(self): # Make sure that _format_instance_mappings works. ctxt = None instance_ref0 = objects.Instance(**{'id': 0, 'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85', 'root_device_name': None, 'default_ephemeral_device': None, 'default_swap_device': None}) instance_ref1 = objects.Instance(**{'id': 0, 'uuid': 'b65cee2f-8c69-4aeb-be2f-f79742548fc2', 'root_device_name': '/dev/sda1', 'default_ephemeral_device': None, 'default_swap_device': None}) def fake_bdm_get(ctxt, uuid, use_slave=False): return [fake_block_device.FakeDbBlockDeviceDict( {'volume_id': 87654321, 'snapshot_id': None, 'no_device': None, 'source_type': 'volume', 'destination_type': 'volume', 'delete_on_termination': True, 'device_name': '/dev/sdh'}), fake_block_device.FakeDbBlockDeviceDict( {'volume_id': None, 'snapshot_id': None, 'no_device': None, 'source_type': 'blank', 'destination_type': 'local', 'guest_format': 'swap', 'delete_on_termination': None, 'device_name': '/dev/sdc'}), fake_block_device.FakeDbBlockDeviceDict( {'volume_id': None, 'snapshot_id': None, 'no_device': None, 'source_type': 'blank', 'destination_type': 'local', 'guest_format': None, 'delete_on_termination': None, 'device_name': '/dev/sdb'})] self.stubs.Set(db, 'block_device_mapping_get_all_by_instance', fake_bdm_get) expected = {'ami': 'sda1', 'root': '/dev/sda1', 'ephemeral0': '/dev/sdb', 'swap': '/dev/sdc', 'ebs0': '/dev/sdh'} conductor_api.LocalAPI() self.assertEqual(base._format_instance_mapping(ctxt, instance_ref0), block_device._DEFAULT_MAPPINGS) self.assertEqual(base._format_instance_mapping(ctxt, instance_ref1), expected)
def test_format_instance_mapping(self): # Make sure that _format_instance_mappings works. ctxt = None instance_ref0 = { 'id': 0, 'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85', 'root_device_name': None } instance_ref1 = { 'id': 0, 'uuid': 'b65cee2f-8c69-4aeb-be2f-f79742548fc2', 'root_device_name': '/dev/sda1' } def fake_bdm_get(ctxt, uuid): return [{ 'volume_id': 87654321, 'snapshot_id': None, 'no_device': None, 'virtual_name': None, 'delete_on_termination': True, 'device_name': '/dev/sdh' }, { 'volume_id': None, 'snapshot_id': None, 'no_device': None, 'virtual_name': 'swap', 'delete_on_termination': None, 'device_name': '/dev/sdc' }, { 'volume_id': None, 'snapshot_id': None, 'no_device': None, 'virtual_name': 'ephemeral0', 'delete_on_termination': None, 'device_name': '/dev/sdb' }] self.stubs.Set(db, 'block_device_mapping_get_all_by_instance', fake_bdm_get) expected = { 'ami': 'sda1', 'root': '/dev/sda1', 'ephemeral0': '/dev/sdb', 'swap': '/dev/sdc', 'ebs0': '/dev/sdh' } capi = conductor_api.LocalAPI() self.assertEqual( base._format_instance_mapping(capi, ctxt, instance_ref0), block_device._DEFAULT_MAPPINGS) self.assertEqual( base._format_instance_mapping(capi, ctxt, instance_ref1), expected)
def _set_vm_state_and_notify(self, method, updates, context, ex, request_spec): """changes VM state and notifies.""" # FIXME(comstud): Re-factor this somehow. Not sure this belongs in the # scheduler manager like this. We should make this easier. # run_instance only sends a request_spec, and an instance may or may # not have been created in the API (or scheduler) already. If it was # created, there's a 'uuid' set in the instance_properties of the # request_spec. # (littleidea): I refactored this a bit, and I agree # it should be easier :) # The refactoring could go further but trying to minimize changes # for essex timeframe LOG.warning(_("Failed to schedule_%(method)s: %(ex)s") % locals()) vm_state = updates['vm_state'] properties = request_spec.get('instance_properties', {}) # NOTE(vish): We shouldn't get here unless we have a catastrophic # failure, so just set all instances to error. if uuid # is not set, instance_uuids will be set to [None], this # is solely to preserve existing behavior and can # be removed along with the 'if instance_uuid:' if we can # verify that uuid is always set. uuids = [properties.get('uuid')] for instance_uuid in request_spec.get('instance_uuids') or uuids: if instance_uuid: state = vm_state.upper() LOG.warning(_('Setting instance to %(state)s state.'), locals(), instance_uuid=instance_uuid) # update instance state and notify on the transition (old_ref, new_ref) = self.db.instance_update_and_get_original( context, instance_uuid, updates) notifications.send_update(context, old_ref, new_ref, service="scheduler") compute_utils.add_instance_fault_from_exc( context, conductor_api.LocalAPI(), new_ref, ex, sys.exc_info()) payload = dict(request_spec=request_spec, instance_properties=properties, instance_id=instance_uuid, state=vm_state, method=method, reason=ex) notifier.notify(context, notifier.publisher_id("scheduler"), 'scheduler.' + method, notifier.ERROR, payload)
def set_vm_state_and_notify(context, service, method, updates, ex, request_spec, db): """changes VM state and notifies.""" LOG.warning(_("Failed to %(service)s_%(method)s: %(ex)s"), { 'service': service, 'method': method, 'ex': ex }) vm_state = updates['vm_state'] properties = request_spec.get('instance_properties', {}) # NOTE(vish): We shouldn't get here unless we have a catastrophic # failure, so just set all instances to error. if uuid # is not set, instance_uuids will be set to [None], this # is solely to preserve existing behavior and can # be removed along with the 'if instance_uuid:' if we can # verify that uuid is always set. uuids = [properties.get('uuid')] from nova.conductor import api as conductor_api conductor = conductor_api.LocalAPI() notifier = rpc.get_notifier(service) for instance_uuid in request_spec.get('instance_uuids') or uuids: if instance_uuid: state = vm_state.upper() LOG.warning(_('Setting instance to %s state.'), state, instance_uuid=instance_uuid) # update instance state and notify on the transition (old_ref, new_ref) = db.instance_update_and_get_original( context, instance_uuid, updates) notifications.send_update(context, old_ref, new_ref, service=service) compute_utils.add_instance_fault_from_exc(context, conductor, new_ref, ex, sys.exc_info()) payload = dict(request_spec=request_spec, instance_properties=properties, instance_id=instance_uuid, state=vm_state, method=method, reason=ex) event_type = '%s.%s' % (service, method) notifier.error(context, event_type, payload)
def test_all_allowed_keys(self): def fake_db_instance_update(self, *args, **kwargs): return None, None self.stubs.Set(db, 'instance_update_and_get_original', fake_db_instance_update) ctxt = context.RequestContext('fake-user', 'fake-project') conductor = conductor_api.LocalAPI() updates = {} for key in conductor_manager.allowed_updates: if key in conductor_manager.datetime_fields: updates[key] = timeutils.utcnow() else: updates[key] = 'foo' conductor.instance_update(ctxt, 'fake-instance', **updates)
def setUp(self): super(ConductorLocalAPITestCase, self).setUp() self.conductor = conductor_api.LocalAPI() self.db = db self.stub_out_client_exceptions()
def setUp(self): super(ConductorLocalAPITestCase, self).setUp() self.conductor = conductor_api.LocalAPI() self.db = db