def wrapped_func(*args, **kwarg): body = {} body['args'] = [] body['kwarg'] = {} original_args = args if len(args) >= 2: #body['self'] = args[0] body['context'] = args[1] args = args[3:] for arg in args[3:]: body['args'].append(arg) for key in kwarg: body['kwarg'][key] = kwarg[key] api.notify(FLAGS.default_publisher_id, name, FLAGS.default_notification_level, body) ret = None try: ret = fn(*original_args, **kwarg) except Error as e: body['error'] = "%s" % e api.notify(FLAGS.default_publisher_id, name, 'ERROR', body) raise e return ret
def notify_about_instance_usage(context, instance, event_suffix, network_info=None, system_metadata=None, extra_usage_info=None, host=None): """ Send a notification about an instance. :param event_suffix: Event type like "delete.start" or "exists" :param network_info: Networking information, if provided. :param system_metadata: system_metadata DB entries for the instance, if provided. :param extra_usage_info: Dictionary containing extra values to add or override in the notification. :param host: Compute host for the instance, if specified. Default is FLAGS.host """ if not host: host = FLAGS.host if not extra_usage_info: extra_usage_info = {} usage_info = _usage_from_instance(context, instance, network_info, system_metadata, **extra_usage_info) notifier_api.notify(context, 'compute.%s' % host, 'compute.instance.%s' % event_suffix, notifier_api.INFO, usage_info)
def _send_instance_update_notification(context, instance, old_vm_state, old_task_state, new_vm_state, new_task_state, service=None, host=None): """Send 'compute.instance.exists' notification to inform observers about instance state changes""" payload = usage_from_instance(context, instance, None, None) states_payload = { "old_state": old_vm_state, "state": new_vm_state, "old_task_state": old_task_state, "new_task_state": new_task_state, } payload.update(states_payload) # add audit fields: (audit_start, audit_end) = audit_period_bounds(current_period=True) payload["audit_period_beginning"] = audit_start payload["audit_period_ending"] = audit_end # add bw usage info: bw = bandwidth_usage(instance, audit_start) payload["bandwidth"] = bw # if the service name (e.g. api/scheduler/compute) is not provided, default # to "compute" if not service: service = "compute" publisher_id = notifier_api.publisher_id(service, host) notifier_api.notify(context, publisher_id, 'compute.instance.update', notifier_api.INFO, payload)
def notify_usage_exists(instance_ref, current_period=False): """ Generates 'exists' notification for an instance for usage auditing purposes. Generates usage for last completed period, unless 'current_period' is True.""" admin_context = context.get_admin_context() begin, end = utils.current_audit_period() bw = {} if current_period: audit_start = end audit_end = utils.utcnow() else: audit_start = begin audit_end = end for b in db.bw_usage_get_by_instance(admin_context, instance_ref['id'], audit_start): bw[b.network_label] = dict(bw_in=b.bw_in, bw_out=b.bw_out) usage_info = utils.usage_from_instance( instance_ref, audit_period_beginning=str(audit_start), audit_period_ending=str(audit_end), bandwidth=bw) notifier_api.notify('compute.%s' % FLAGS.host, 'compute.instance.exists', notifier_api.INFO, usage_info)
def notify_usage_exists(instance_ref, current_period=False): """ Generates 'exists' notification for an instance for usage auditing purposes. Generates usage for last completed period, unless 'current_period' is True.""" admin_context = context.get_admin_context() begin, end = utils.current_audit_period() bw = {} if current_period: audit_start = end audit_end = utils.utcnow() else: audit_start = begin audit_end = end for b in db.bw_usage_get_by_instance(admin_context, instance_ref['id'], audit_start): bw[b.network_label] = dict(bw_in=b.bw_in, bw_out=b.bw_out) usage_info = utils.usage_from_instance(instance_ref, audit_period_beginning=str(audit_start), audit_period_ending=str(audit_end), bandwidth=bw) notifier_api.notify('compute.%s' % FLAGS.host, 'compute.instance.exists', notifier_api.INFO, usage_info)
def _set_instance_error(self, method, context, ex, *args, **kwargs): """Sets VM to Error state""" LOG.warning(_("Failed to schedule_%(method)s: %(ex)s") % locals()) # FIXME(comstud): Re-factor this somehow. Not sure this belongs # in the scheduler manager like this. Needs to support more than # run_instance if method != "run_instance": return # FIXME(comstud): We should make this easier. run_instance # only sends a request_spec, and an instance may or may not # have been created in the API (or scheduler) already. If it # was created, there's a 'uuid' set in the instance_properties # of the request_spec. request_spec = kwargs.get('request_spec', {}) properties = request_spec.get('instance_properties', {}) instance_uuid = properties.get('uuid', {}) if instance_uuid: LOG.warning(_("Setting instance %(instance_uuid)s to " "ERROR state.") % locals()) db.instance_update(context, instance_uuid, {'vm_state': vm_states.ERROR}) payload = dict(request_spec=request_spec, instance_properties=properties, instance_id=instance_uuid, state=vm_states.ERROR, method=method, reason=ex) notifier.notify(notifier.publisher_id("scheduler"), 'scheduler.run_instance', notifier.ERROR, payload)
def _set_vm_state_and_notify(self, method, updates, context, ex, *args, **kwargs): """changes VM state and notifies""" # FIXME(comstud): Re-factor this somehow. Not sure this belongs in the # scheduler manager like this. We should make this easier. # run_instance only sends a request_spec, and an instance may or may # not have been created in the API (or scheduler) already. If it was # created, there's a 'uuid' set in the instance_properties of the # request_spec. # (littleidea): I refactored this a bit, and I agree # it should be easier :) # The refactoring could go further but trying to minimize changes # for essex timeframe LOG.warning(_("Failed to schedule_%(method)s: %(ex)s") % locals()) vm_state = updates["vm_state"] request_spec = kwargs.get("request_spec", {}) properties = request_spec.get("instance_properties", {}) instance_uuid = properties.get("uuid", {}) if instance_uuid: state = vm_state.upper() LOG.warning(_("Setting instance to %(state)s state."), locals(), instance_uuid=instance_uuid) db.instance_update(context, instance_uuid, updates) payload = dict( request_spec=request_spec, instance_properties=properties, instance_id=instance_uuid, state=vm_state, method=method, reason=ex, ) notifier.notify(context, notifier.publisher_id("scheduler"), "scheduler." + method, notifier.ERROR, payload)
def _set_instance_error(self, method, context, ex, *args, **kwargs): """Sets VM to Error state""" LOG.warning(_("Failed to schedule_%(method)s: %(ex)s") % locals()) # FIXME(comstud): Re-factor this somehow. Not sure this belongs # in the scheduler manager like this. Needs to support more than # run_instance if method != "run_instance": return # FIXME(comstud): We should make this easier. run_instance # only sends a request_spec, and an instance may or may not # have been created in the API (or scheduler) already. If it # was created, there's a 'uuid' set in the instance_properties # of the request_spec. request_spec = kwargs.get('request_spec', {}) properties = request_spec.get('instance_properties', {}) instance_uuid = properties.get('uuid', {}) if instance_uuid: LOG.warning( _("Setting instance %(instance_uuid)s to " "ERROR state.") % locals()) db.instance_update(context, instance_uuid, {'vm_state': vm_states.ERROR}) payload = dict(request_spec=request_spec, instance_properties=properties, instance_id=instance_uuid, state=vm_states.ERROR, method=method, reason=ex) notifier.notify(notifier.publisher_id("scheduler"), 'scheduler.run_instance', notifier.ERROR, payload)
def notify_about_instance_usage(context, instance, event_suffix, network_info=None, system_metadata=None, extra_usage_info=None, host=None): """ Send a notification about an instance. :param event_suffix: Event type like "delete.start" or "exists" :param network_info: Networking information, if provided. :param system_metadata: system_metadata DB entries for the instance, if provided. :param extra_usage_info: Dictionary containing extra values to add or override in the notification. :param host: Compute host for the instance, if specified. Default is FLAGS.host """ if not host: host = FLAGS.host if not extra_usage_info: extra_usage_info = {} usage_info = notifications.usage_from_instance(context, instance, network_info, system_metadata, **extra_usage_info) notifier_api.notify(context, 'compute.%s' % host, 'compute.instance.%s' % event_suffix, notifier_api.INFO, usage_info)
def test_send_notifications_with_errors(self): self.flags(notification_driver='nova.notifier.list_notifier', list_notifier_drivers=['nova.notifier.no_op_notifier', 'nova.notifier.log_notifier']) notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3)) self.assertEqual(self.notify_count, 1) self.assertEqual(self.exception_count, 1)
def test_when_driver_fails_to_import(self): self.flags(notification_driver='nova.notifier.list_notifier', list_notifier_drivers=['nova.notifier.no_op_notifier', 'nova.notifier.logo_notifier', 'fdsjgsdfhjkhgsfkj']) notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3)) self.assertEqual(self.exception_count, 2) self.assertEqual(self.notify_count, 1)
def test_when_driver_fails_to_import(self): self.flags(notification_driver='nova.notifier.list_notifier', list_notifier_drivers=[ 'nova.notifier.no_op_notifier', 'nova.notifier.logo_notifier', 'fdsjgsdfhjkhgsfkj' ]) notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3)) self.assertEqual(self.exception_count, 2) self.assertEqual(self.notify_count, 1)
def test_send_notifications_successfully(self): self.flags(notification_driver='nova.notifier.list_notifier', list_notifier_drivers=[ 'nova.notifier.no_op_notifier', 'nova.notifier.no_op_notifier' ]) notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3)) self.assertEqual(self.notify_count, 2) self.assertEqual(self.exception_count, 0)
def test_send_notification(self): self.notify_called = False def mock_notify(cls, *args): self.notify_called = True self.stubs.Set(nova.notifier.no_op_notifier, 'notify', mock_notify) notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3)) self.assertEqual(self.notify_called, True)
def alert_once(self): # TODO: dispose timeout worker here print "*" * 400 print "[WARNING]worker", self.worker_id, "is dead. email sendto admin" print "*" * 400 payload = dict() payload["host"] = FLAGS.my_ip payload["message"] = "kanyun-worker is dead" notifier.notify(notifier.publisher_id("compute"), "kanyun.worker", notifier.WARN, payload) self.alerted = True
def alert_once(self): # TODO: dispose timeout worker here print '*' * 400 print '[WARNING]worker', self.worker_id, "is dead. email sendto admin" print '*' * 400 payload = dict() payload['host'] = FLAGS.my_ip payload['message'] = 'kanyun-worker is dead' notifier.notify(notifier.publisher_id('compute'), 'kanyun.worker', notifier.WARN, payload) self.alerted = True
def test_rabbit_priority_queue(self): self.stubs.Set(nova.flags.FLAGS, "notification_driver", "nova.notifier.rabbit_notifier") self.stubs.Set(nova.flags.FLAGS, "notification_topic", "testnotify") self.test_topic = None def mock_cast(context, topic, msg): self.test_topic = topic self.stubs.Set(nova.rpc, "cast", mock_cast) notify("publisher_id", "event_type", "DEBUG", dict(a=3)) self.assertEqual(self.test_topic, "testnotify.debug")
def test_send_rabbit_notification(self): self.stubs.Set(nova.flags.FLAGS, 'notification_driver', 'nova.notifier.rabbit_notifier') self.mock_notify = False def mock_notify(cls, *args): self.mock_notify = True self.stubs.Set(nova.rpc, 'notify', mock_notify) notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3)) self.assertEqual(self.mock_notify, True)
def test_rabbit_priority_queue(self): self.stubs.Set(nova.flags.FLAGS, 'notification_driver', 'nova.notifier.rabbit_notifier') self.stubs.Set(nova.flags.FLAGS, 'notification_topic', 'testnotify') self.test_topic = None def mock_cast(context, topic, msg): self.test_topic = topic self.stubs.Set(nova.rpc, 'cast', mock_cast) notify('publisher_id', 'event_type', 'DEBUG', dict(a=3)) self.assertEqual(self.test_topic, 'testnotify.debug')
def test_send_notification(self): self.notify_called = False def mock_notify(cls, *args): self.notify_called = True self.stubs.Set(nova.notifier.no_op_notifier, "notify", mock_notify) class Mock(object): pass notify("publisher_id", "event_type", nova.notifier.api.WARN, dict(a=3)) self.assertEqual(self.notify_called, True)
def _send_instance_update_notification(context, instance, old_vm_state, old_task_state, new_vm_state, new_task_state, service=None, host=None): """Send 'compute.instance.exists' notification to inform observers about instance state changes""" payload = usage_from_instance(context, instance, None, None) states_payload = { "old_state": old_vm_state, "state": new_vm_state, "old_task_state": old_task_state, "new_task_state": new_task_state, } payload.update(states_payload) # add audit fields: (audit_start, audit_end) = audit_period_bounds(current_period=True) payload["audit_period_beginning"] = audit_start payload["audit_period_ending"] = audit_end # add bw usage info: bw = bandwidth_usage(instance, audit_start) payload["bandwidth"] = bw try: system_metadata = db.instance_system_metadata_get( context, instance.uuid) except exception.NotFound: system_metadata = {} # add image metadata image_meta_props = image_meta(system_metadata) payload["image_meta"] = image_meta_props # if the service name (e.g. api/scheduler/compute) is not provided, default # to "compute" if not service: service = "compute" publisher_id = notifier_api.publisher_id(service, host) notifier_api.notify(context, publisher_id, 'compute.instance.update', notifier_api.INFO, payload)
def test_verify_message_format(self): """A test to ensure changing the message format is prohibitively annoying""" def message_assert(message): fields = [('publisher_id', 'publisher_id'), ('event_type', 'event_type'), ('priority', 'WARN'), ('payload', dict(a=3))] for k, v in fields: self.assertEqual(message[k], v) self.assertTrue(len(message['message_id']) > 0) self.assertTrue(len(message['timestamp']) > 0) self.stubs.Set(nova.notifier.no_op_notifier, 'notify', message_assert) notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3))
def test_send_rabbit_notification(self): self.stubs.Set(nova.flags.FLAGS, "notification_driver", "nova.notifier.rabbit_notifier") self.mock_cast = False def mock_cast(cls, *args): self.mock_cast = True class Mock(object): pass self.stubs.Set(nova.rpc, "cast", mock_cast) notify("publisher_id", "event_type", nova.notifier.api.WARN, dict(a=3)) self.assertEqual(self.mock_cast, True)
def notify_about_volume_usage(context, volume, event_suffix, extra_usage_info=None, host=None): if not host: host = FLAGS.host if not extra_usage_info: extra_usage_info = {} usage_info = _usage_from_volume( context, volume, **extra_usage_info) notifier_api.notify(context, 'volume.%s' % host, 'volume.%s' % event_suffix, notifier_api.INFO, usage_info)
def test_rabbit_priority_queue(self): self.stubs.Set(nova.flags.FLAGS, 'notification_driver', 'nova.notifier.rabbit_notifier') self.stubs.Set(nova.flags.FLAGS, 'notification_topics', ['testnotify', ]) self.test_topic = None def mock_notify(context, topic, msg): self.test_topic = topic self.stubs.Set(nova.rpc, 'notify', mock_notify) notify('publisher_id', 'event_type', 'DEBUG', dict(a=3)) self.assertEqual(self.test_topic, 'testnotify.debug')
def notify_about_instance_usage(context, instance, event_suffix, network_info=None, extra_usage_info=None, host=None): if not host: host = FLAGS.host if not extra_usage_info: extra_usage_info = {} usage_info = _usage_from_instance( context, instance, network_info=network_info, **extra_usage_info) notifier_api.notify(context, 'compute.%s' % host, 'compute.instance.%s' % event_suffix, notifier_api.INFO, usage_info)
def test_send_rabbit_notification(self): self.stubs.Set(nova.flags.FLAGS, 'notification_driver', 'nova.notifier.rabbit_notifier') self.mock_cast = False def mock_cast(cls, *args): self.mock_cast = True class Mock(object): pass self.stubs.Set(nova.rpc, 'cast', mock_cast) notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3)) self.assertEqual(self.mock_cast, True)
def notify_about_volume_usage(context, volume, event_suffix, extra_usage_info=None, host=None): if not host: host = FLAGS.host if not extra_usage_info: extra_usage_info = {} usage_info = _usage_from_volume(context, volume, **extra_usage_info) notifier_api.notify(context, 'volume.%s' % host, 'volume.%s' % event_suffix, notifier_api.INFO, usage_info)
def test_rabbit_priority_queue(self): flags.DECLARE('notification_topics', 'nova.notifier.rabbit_notifier') self.stubs.Set(nova.flags.FLAGS, 'notification_driver', 'nova.notifier.rabbit_notifier') self.stubs.Set(nova.flags.FLAGS, 'notification_topics', [ 'testnotify', ]) self.test_topic = None def mock_notify(context, topic, msg): self.test_topic = topic self.stubs.Set(nova.openstack.common.rpc, 'notify', mock_notify) notifier_api.notify(ctxt, 'publisher_id', 'event_type', 'DEBUG', dict(a=3)) self.assertEqual(self.test_topic, 'testnotify.debug')
def schedule_run_instance(self, context, request_spec, reservations, *args, **kwargs): """This method is called from nova.compute.api to provision an instance. We first create a build plan (a list of WeightedHosts) and then provision. Returns a list of the instances created. """ elevated = context.elevated() num_instances = request_spec.get('num_instances', 1) LOG.debug( _("Attempting to build %(num_instances)d instance(s)") % locals()) payload = dict(request_spec=request_spec) notifier.notify(context, notifier.publisher_id("scheduler"), 'scheduler.run_instance.start', notifier.INFO, payload) weighted_hosts = self._schedule(context, "compute", request_spec, *args, **kwargs) if not weighted_hosts: raise exception.NoValidHost(reason="") # NOTE(comstud): Make sure we do not pass this through. It # contains an instance of RpcContext that cannot be serialized. kwargs.pop('filter_properties', None) instances = [] for num in xrange(num_instances): if not weighted_hosts: break weighted_host = weighted_hosts.pop(0) request_spec['instance_properties']['launch_index'] = num instance = self._provision_resource(elevated, weighted_host, request_spec, reservations, kwargs) if instance: instances.append(instance) notifier.notify(context, notifier.publisher_id("scheduler"), 'scheduler.run_instance.end', notifier.INFO, payload) return instances
def schedule_run_instance(self, context, request_spec, reservations, *args, **kwargs): """This method is called from nova.compute.api to provision an instance. We first create a build plan (a list of WeightedHosts) and then provision. Returns a list of the instances created. """ elevated = context.elevated() num_instances = request_spec.get('num_instances', 1) LOG.debug(_("Attempting to build %(num_instances)d instance(s)") % locals()) payload = dict(request_spec=request_spec) notifier.notify(context, notifier.publisher_id("scheduler"), 'scheduler.run_instance.start', notifier.INFO, payload) weighted_hosts = self._schedule(context, "compute", request_spec, *args, **kwargs) if not weighted_hosts: raise exception.NoValidHost(reason="") # NOTE(comstud): Make sure we do not pass this through. It # contains an instance of RpcContext that cannot be serialized. kwargs.pop('filter_properties', None) instances = [] for num in xrange(num_instances): if not weighted_hosts: break weighted_host = weighted_hosts.pop(0) request_spec['instance_properties']['launch_index'] = num instance = self._provision_resource(elevated, weighted_host, request_spec, reservations, kwargs) if instance: instances.append(instance) notifier.notify(context, notifier.publisher_id("scheduler"), 'scheduler.run_instance.end', notifier.INFO, payload) return instances
def test_verify_message_format(self): """A test to ensure changing the message format is prohibitively annoying""" def message_assert(message): fields = [ ("publisher_id", "publisher_id"), ("event_type", "event_type"), ("priority", "WARN"), ("payload", dict(a=3)), ] for k, v in fields: self.assertEqual(message[k], v) self.assertTrue(len(message["message_id"]) > 0) self.assertTrue(len(message["timestamp"]) > 0) self.stubs.Set(nova.notifier.no_op_notifier, "notify", message_assert) notify("publisher_id", "event_type", nova.notifier.api.WARN, dict(a=3))
def notify_usage_exists(instance_ref, current_period=False): """ Generates 'exists' notification for an instance for usage auditing purposes. Generates usage for last completed period, unless 'current_period' is True.""" admin_context = context.get_admin_context(read_deleted='yes') begin, end = utils.current_audit_period() bw = {} if current_period: audit_start = end audit_end = utils.utcnow() else: audit_start = begin audit_end = end if (instance_ref.get('info_cache') and instance_ref['info_cache'].get('network_info')): cached_info = instance_ref['info_cache']['network_info'] nw_info = network_model.NetworkInfo.hydrate(cached_info) else: nw_info = network.API().get_instance_nw_info(admin_context, instance_ref) macs = [vif['address'] for vif in nw_info] for b in db.bw_usage_get_by_macs(admin_context, macs, audit_start): label = 'net-name-not-found-%s' % b['mac'] for vif in nw_info: if vif['address'] == b['mac']: label = vif['network']['label'] break bw[label] = dict(bw_in=b.bw_in, bw_out=b.bw_out) usage_info = utils.usage_from_instance(instance_ref, audit_period_beginning=str(audit_start), audit_period_ending=str(audit_end), bandwidth=bw) notifier_api.notify('compute.%s' % FLAGS.host, 'compute.instance.exists', notifier_api.INFO, usage_info)
def bless_instance(self, context, instance_uuid, migration_url=None): """ Construct the blessed instance, with the uuid instance_uuid. If migration_url is specified then bless will ensure a memory server is available at the given migration url. """ LOG.debug(_("bless instance called: instance_uuid=%s, migration_url=%s"), instance_uuid, migration_url) instance_ref = self.db.instance_get_by_uuid(context, instance_uuid) if migration_url: # Tweak only this instance directly. source_instance_ref = instance_ref migration = True else: usage_info = utils.usage_from_instance(instance_ref) notifier.notify('gridcentric.%s' % self.host, 'gridcentric.instance.bless.start', notifier.INFO, usage_info) source_instance_ref = self._get_source_instance(context, instance_uuid) migration = False self._instance_update(context, instance_ref.id, vm_state=vm_states.BUILDING) try: # Create a new 'blessed' VM with the given name. name, migration_url, blessed_files = self.vms_conn.bless(context, source_instance_ref.name, instance_ref, migration_url=migration_url, use_image_service=FLAGS.gridcentric_use_image_service) if not(migration): usage_info = utils.usage_from_instance(instance_ref) notifier.notify('gridcentric.%s' % self.host, 'gridcentric.instance.bless.end', notifier.INFO, usage_info) self._instance_update(context, instance_ref.id, vm_state="blessed", task_state=None, launched_at=utils.utcnow()) except Exception, e: LOG.debug(_("Error during bless %s: %s"), str(e), traceback.format_exc()) self._instance_update(context, instance_ref.id, vm_state=vm_states.ERROR, task_state=None) # Short-circuit, nothing to be done. return
def notify_usage_exists(instance_ref, current_period=False): """ Generates 'exists' notification for an instance for usage auditing purposes. Generates usage for last completed period, unless 'current_period' is True.""" admin_context = context.get_admin_context(read_deleted='yes') begin, end = utils.current_audit_period() bw = {} if current_period: audit_start = end audit_end = utils.utcnow() else: audit_start = begin audit_end = end if (instance_ref.get('info_cache') and instance_ref['info_cache'].get('network_info')): cached_info = instance_ref['info_cache']['network_info'] nw_info = network_model.NetworkInfo.hydrate(cached_info) else: nw_info = network.API().get_instance_nw_info(admin_context, instance_ref) for b in db.bw_usage_get_by_instance(admin_context, instance_ref['id'], audit_start): label = 'net-name-not-found-%s' % b['mac'] for vif in nw_info: if vif['address'] == b['mac']: label = vif['network']['label'] break bw[label] = dict(bw_in=b.bw_in, bw_out=b.bw_out) usage_info = utils.usage_from_instance(instance_ref, audit_period_beginning=str(audit_start), audit_period_ending=str(audit_end), bandwidth=bw) notifier_api.notify('compute.%s' % FLAGS.host, 'compute.instance.exists', notifier_api.INFO, usage_info)
def _provision_resource(self, context, weighted_host, request_spec, kwargs): """Create the requested resource in this Zone.""" instance = self.create_instance_db_entry(context, request_spec) payload = dict(request_spec=request_spec, weighted_host=weighted_host.to_dict(), instance_id=instance['uuid']) notifier.notify(notifier.publisher_id("scheduler"), 'scheduler.run_instance.scheduled', notifier.INFO, payload) driver.cast_to_compute_host(context, weighted_host.host_state.host, 'run_instance', instance_uuid=instance['uuid'], **kwargs) inst = driver.encode_instance(instance, local=True) # So if another instance is created, create_instance_db_entry will # actually create a new entry, instead of assume it's been created # already del request_spec['instance_properties']['uuid'] return inst
def _set_vm_state_and_notify(self, method, updates, context, ex, *args, **kwargs): """changes VM state and notifies""" # FIXME(comstud): Re-factor this somehow. Not sure this belongs in the # scheduler manager like this. We should make this easier. # run_instance only sends a request_spec, and an instance may or may # not have been created in the API (or scheduler) already. If it was # created, there's a 'uuid' set in the instance_properties of the # request_spec. # (littleidea): I refactored this a bit, and I agree # it should be easier :) # The refactoring could go further but trying to minimize changes # for essex timeframe LOG.warning(_("Failed to schedule_%(method)s: %(ex)s") % locals()) vm_state = updates['vm_state'] request_spec = kwargs.get('request_spec', {}) properties = request_spec.get('instance_properties', {}) instance_uuid = properties.get('uuid', {}) if instance_uuid: state = vm_state.upper() LOG.warning(_('Setting instance to %(state)s state.'), locals(), instance_uuid=instance_uuid) # update instance state and notify on the transition (old_ref, new_ref) = db.instance_update_and_get_original(context, instance_uuid, updates) notifications.send_update(context, old_ref, new_ref, service="scheduler") payload = dict(request_spec=request_spec, instance_properties=properties, instance_id=instance_uuid, state=vm_state, method=method, reason=ex) notifier.notify(context, notifier.publisher_id("scheduler"), 'scheduler.' + method, notifier.ERROR, payload)
def _provision_resource(self, context, weighted_host, request_spec, reservations, kwargs): """Create the requested resource in this Zone.""" instance = self.create_instance_db_entry(context, request_spec, reservations) payload = dict(request_spec=request_spec, weighted_host=weighted_host.to_dict(), instance_id=instance['uuid']) notifier.notify(context, notifier.publisher_id("scheduler"), 'scheduler.run_instance.scheduled', notifier.INFO, payload) driver.cast_to_compute_host(context, weighted_host.host_state.host, 'run_instance', instance_uuid=instance['uuid'], **kwargs) inst = driver.encode_instance(instance, local=True) # So if another instance is created, create_instance_db_entry will # actually create a new entry, instead of assume it's been created # already del request_spec['instance_properties']['uuid'] return inst
def _send_instance_update_notification(context, instance, old_vm_state, old_task_state, new_vm_state, new_task_state, host=None): """Send 'compute.instance.exists' notification to inform observers about instance state changes""" payload = usage_from_instance(context, instance, None, None) states_payload = { "old_state": old_vm_state, "state": new_vm_state, "old_task_state": old_task_state, "new_task_state": new_task_state, } payload.update(states_payload) # add audit fields: (audit_start, audit_end) = audit_period_bounds(current_period=True) payload["audit_period_beginning"] = audit_start payload["audit_period_ending"] = audit_end # add bw usage info: bw = bandwidth_usage(instance, audit_start) payload["bandwidth"] = bw try: system_metadata = db.instance_system_metadata_get( context, instance.uuid) except exception.NotFound: system_metadata = {} # add image metadata image_meta_props = image_meta(system_metadata) payload["image_meta"] = image_meta_props if not host: host = FLAGS.host notifier_api.notify(context, host, 'compute.instance.update', notifier_api.INFO, payload)
def discard_instance(self, context, instance_uuid): """ Discards an instance so that and no further instances maybe be launched from it. """ LOG.debug(_("discard instance called: instance_uuid=%s"), instance_uuid) context.elevated() # Grab the DB representation for the VM. instance_ref = self.db.instance_get_by_uuid(context, instance_uuid) usage_info = utils.usage_from_instance(instance_ref) notifier.notify('gridcentric.%s' % self.host, 'gridcentric.instance.discard.start', notifier.INFO, usage_info) metadata = self._instance_metadata(context, instance_uuid) image_refs = self._extract_image_refs(metadata) # Call discard in the backend. self.vms_conn.discard(context, instance_ref.name, use_image_service=FLAGS.gridcentric_use_image_service, image_refs=image_refs) # Update the instance metadata (for completeness). metadata['blessed'] = False self._instance_metadata_update(context, instance_uuid, metadata) # Remove the instance. self._instance_update(context, instance_uuid, vm_state=vm_states.DELETED, task_state=None, terminated_at=timeutils.utcnow()) self.db.instance_destroy(context, instance_uuid) usage_info = utils.usage_from_instance(instance_ref) notifier.notify('gridcentric.%s' % self.host, 'gridcentric.instance.discard.end', notifier.INFO, usage_info)
'disk': None}}, timeout=FLAGS.gridcentric_compute_timeout) self.vms_conn.launch(context, source_instance_ref.name, str(target), instance_ref, network_info, migration_url=migration_url, use_image_service=FLAGS.gridcentric_use_image_service, image_refs=image_refs, params=params) # Perform our database update. if migration_url == None: usage_info = utils.usage_from_instance(instance_ref, network_info=network_info) notifier.notify('gridcentric.%s' % self.host, 'gridcentric.instance.launch.end', notifier.INFO, usage_info) self._instance_update(context, instance_ref['uuid'], vm_state=vm_states.ACTIVE, host=self.host, launched_at=utils.utcnow(), task_state=None) except Exception, e: LOG.debug(_("Error during launch %s: %s"), str(e), traceback.format_exc()) self._instance_update(context, instance_ref['uuid'], vm_state=vm_states.ERROR, task_state=None) # Raise the error up. raise e
def emit(self, record): api.notify('nova.error.publisher', 'error_notification', api.ERROR, dict(error=self.format(record).split('\n')))
def launch_instance(self, context, instance_uuid, params={}, migration_url=None): """ Construct the launched instance, with uuid instance_uuid. If migration_url is not none then the instance will be launched using the memory server at the migration_url """ LOG.debug(_("Launching new instance: instance_uuid=%s, migration_url=%s"), instance_uuid, migration_url) # Grab the DB representation for the VM. instance_ref = self.db.instance_get_by_uuid(context, instance_uuid) if migration_url: # Just launch the given blessed instance. source_instance_ref = instance_ref # Load the old network info. network_info = self.network_api.get_instance_nw_info(context, instance_ref) # Update the instance state to be migrating. This will be set to # active again once it is completed in do_launch() as per all # normal launched instances. self._instance_update(context, instance_ref['uuid'], vm_state=vm_states.MIGRATING, task_state=task_states.SPAWNING, host=self.host) instance_ref['host'] = self.host else: usage_info = utils.usage_from_instance(instance_ref) notifier.notify('gridcentric.%s' % self.host, 'gridcentric.instance.launch.start', notifier.INFO, usage_info) # Create a new launched instance. source_instance_ref = self._get_source_instance(context, instance_uuid) if not FLAGS.stub_network: # TODO(dscannell): We need to set the is_vpn parameter correctly. # This information might come from the instance, or the user might # have to specify it. Also, we might be able to convert this to a # cast because we are not waiting on any return value. LOG.debug(_("Making call to network for launching instance=%s"), \ instance_ref.name) self._instance_update(context, instance_ref.id, vm_state=vm_states.BUILDING, task_state=task_states.NETWORKING, host=self.host) instance_ref['host'] = self.host is_vpn = False requested_networks = None try: network_info = self.network_api.allocate_for_instance(context, instance_ref, vpn=is_vpn, requested_networks=requested_networks) except Exception, e: LOG.debug(_("Error during network allocation: %s"), str(e)) self._instance_update(context, instance_ref['uuid'], vm_state=vm_states.ERROR, task_state=None) # Short-circuit, can't proceed. return LOG.debug(_("Made call to network for launching instance=%s, network_info=%s"), instance_ref.name, network_info) else: