Beispiel #1
0
def notify_about_instance_usage(
    context, instance, event_suffix, network_info=None, system_metadata=None, extra_usage_info=None, host=None
):
    """
    Send a notification about an instance.

    :param event_suffix: Event type like "delete.start" or "exists"
    :param network_info: Networking information, if provided.
    :param system_metadata: system_metadata DB entries for the instance,
        if provided.
    :param extra_usage_info: Dictionary containing extra values to add or
        override in the notification.
    :param host: Compute host for the instance, if specified.  Default is
        FLAGS.host
    """

    if not host:
        host = FLAGS.host

    if not extra_usage_info:
        extra_usage_info = {}

    usage_info = notifications.usage_from_instance(context, instance, network_info, system_metadata, **extra_usage_info)

    notifier_api.notify(
        context, "compute.%s" % host, "compute.instance.%s" % event_suffix, notifier_api.INFO, usage_info
    )
Beispiel #2
0
def handle_schedule_error(context, ex, instance_uuid, request_spec):
    if not isinstance(ex, exception.NoValidHost):
        LOG.exception(_("Exception during scheduler.run_instance"))
    compute_utils.add_instance_fault_from_exc(context,
            instance_uuid, ex, sys.exc_info())
    state = vm_states.ERROR.upper()
    LOG.warning(_('Setting instance to %(state)s state.'),
                locals(), instance_uuid=instance_uuid)

    # update instance state and notify on the transition
    (old_ref, new_ref) = db.instance_update_and_get_original(context,
            instance_uuid, {'vm_state': vm_states.ERROR,
                            'task_state': None})
    notifications.send_update(context, old_ref, new_ref,
            service="scheduler")

    properties = request_spec.get('instance_properties', {})
    payload = dict(request_spec=request_spec,
                   instance_properties=properties,
                   instance_id=instance_uuid,
                   state=vm_states.ERROR,
                   method='run_instance',
                   reason=ex)

    notifier.notify(context, notifier.publisher_id("scheduler"),
                    'scheduler.run_instance', notifier.ERROR, payload)
Beispiel #3
0
    def _provision_resource(self, context, weighted_host, request_spec,
            reservations, filter_properties, requested_networks,
            injected_files, admin_password, is_first_time):
        """Create the requested resource in this Zone."""
        instance = self.create_instance_db_entry(context, request_spec,
                                                 reservations)

        # Add a retry entry for the selected compute host:
        self._add_retry_host(filter_properties, weighted_host.host_state.host)

        payload = dict(request_spec=request_spec,
                       weighted_host=weighted_host.to_dict(),
                       instance_id=instance['uuid'])
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        updated_instance = driver.instance_update_db(context, instance['uuid'],
                weighted_host.host_state.host)

        self.compute_rpcapi.run_instance(context, instance=updated_instance,
                host=weighted_host.host_state.host,
                request_spec=request_spec, filter_properties=filter_properties,
                requested_networks=requested_networks,
                injected_files=injected_files,
                admin_password=admin_password, is_first_time=is_first_time)

        inst = driver.encode_instance(updated_instance, local=True)

        # So if another instance is created, create_instance_db_entry will
        # actually create a new entry, instead of assume it's been created
        # already
        del request_spec['instance_properties']['uuid']

        return inst
Beispiel #4
0
        def do_associate():
            # associate floating ip
            fixed = self.db.floating_ip_fixed_ip_associate(context,
                                                           floating_address,
                                                           fixed_address,
                                                           self.host)
            if not fixed:
                # NOTE(vish): ip was already associated
                return
            try:
                # gogo driver time
                self.l3driver.add_floating_ip(floating_address, fixed_address,
                        interface, fixed['network'])
            except exception.ProcessExecutionError as e:
                self.db.floating_ip_disassociate(context, floating_address)
                if "Cannot find device" in str(e):
                    LOG.error(_('Interface %(interface)s not found'), locals())
                    raise exception.NoFloatingIpInterface(interface=interface)

            payload = dict(project_id=context.project_id,
                           instance_id=instance_uuid,
                           floating_ip=floating_address)
            notifier.notify(context,
                            notifier.publisher_id("network"),
                            'network.floating_ip.associate',
                        notifier.INFO, payload=payload)
Beispiel #5
0
    def _notify_ha_instance_failure(self, ctxt, instance, level):
        """
        notify ha module that some ha instance is failure.

        :param instance: instance info got from db.
        """
        uuid = instance['uuid']
        try:
            if level == 0:
                method = 'reboot'
            elif level == 1:
                method = 'rebuild'
            elif level < 5:
                method = 'move'
            else:
                return
            LOG.info(_('notify ha(level:%(level)s) %(method)s'
                       ' instance %(uuid)s'), locals())
            payload = dict(instance, method=method)
            notifier.notify(ctxt,
                            notifier.publisher_id("monitor"),
                            'monitor.vm.down',
                            notifier.ERROR, payload)
        except Exception as ex:
            LOG.exception(_('notifying ha module that instance'
                            ' %(uuid)s is abnormal failed, ex: %(ex)s'),
                          locals())
def _send_instance_update_notification(context, instance, old_vm_state=None,
            old_task_state=None, new_vm_state=None, new_task_state=None,
            service="compute", host=None):
    """Send 'compute.instance.update' notification to inform observers
    about instance state changes"""

    payload = info_from_instance(context, instance, None, None)

    if not new_vm_state:
        new_vm_state = instance["vm_state"]
    if not new_task_state:
        new_task_state = instance["task_state"]

    states_payload = {
        "old_state": old_vm_state,
        "state": new_vm_state,
        "old_task_state": old_task_state,
        "new_task_state": new_task_state,
    }

    payload.update(states_payload)

    # add audit fields:
    (audit_start, audit_end) = audit_period_bounds(current_period=True)
    payload["audit_period_beginning"] = audit_start
    payload["audit_period_ending"] = audit_end

    # add bw usage info:
    bw = bandwidth_usage(instance, audit_start)
    payload["bandwidth"] = bw

    publisher_id = notifier_api.publisher_id(service, host)

    notifier_api.notify(context, publisher_id, 'compute.instance.update',
            notifier_api.INFO, payload)
Beispiel #7
0
    def _provision_resource(self, context, weighed_host, request_spec,
            filter_properties, requested_networks, injected_files,
            admin_password, is_first_time, instance_uuid=None):
        """Create the requested resource in this Zone."""
        payload = dict(request_spec=request_spec,
                       weighted_host=weighed_host.to_dict(),
                       instance_id=instance_uuid)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        # TODO(NTTdocomo): Combine the next two updates into one
        driver.db_instance_node_set(context,
                instance_uuid, weighed_host.obj.nodename)
        updated_instance = driver.instance_update_db(context,
                instance_uuid)

        self._post_select_populate_filter_properties(filter_properties,
                weighed_host.obj)

        self.compute_rpcapi.run_instance(context, instance=updated_instance,
                host=weighed_host.obj.host,
                request_spec=request_spec, filter_properties=filter_properties,
                requested_networks=requested_networks,
                injected_files=injected_files,
                admin_password=admin_password, is_first_time=is_first_time)
Beispiel #8
0
    def _provision_resource(self, context, weighted_host, request_spec,
            filter_properties, requested_networks, injected_files,
            admin_password, is_first_time, instance_uuid=None):
        """Create the requested resource in this Zone."""
        # Add a retry entry for the selected compute host:
        self._add_retry_host(filter_properties, weighted_host.host_state.host)

        self._add_oversubscription_policy(filter_properties,
                weighted_host.host_state)

        payload = dict(request_spec=request_spec,
                       weighted_host=weighted_host.to_dict(),
                       instance_id=instance_uuid)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        updated_instance = driver.instance_update_db(context, instance_uuid)

        self.compute_rpcapi.run_instance(context, instance=updated_instance,
                host=weighted_host.host_state.host,
                request_spec=request_spec, filter_properties=filter_properties,
                requested_networks=requested_networks,
                injected_files=injected_files,
                admin_password=admin_password, is_first_time=is_first_time)
Beispiel #9
0
def _send_instance_update_notification(context, instance, old_vm_state,
        old_task_state, new_vm_state, new_task_state, service=None, host=None):
    """Send 'compute.instance.exists' notification to inform observers
    about instance state changes"""

    payload = usage_from_instance(context, instance, None, None)

    states_payload = {
        "old_state": old_vm_state,
        "state": new_vm_state,
        "old_task_state": old_task_state,
        "new_task_state": new_task_state,
    }

    payload.update(states_payload)

    # add audit fields:
    (audit_start, audit_end) = audit_period_bounds(current_period=True)
    payload["audit_period_beginning"] = audit_start
    payload["audit_period_ending"] = audit_end

    # add bw usage info:
    bw = bandwidth_usage(instance, audit_start)
    payload["bandwidth"] = bw

    # if the service name (e.g. api/scheduler/compute) is not provided, default
    # to "compute"
    if not service:
        service = "compute"

    publisher_id = notifier_api.publisher_id(service, host)

    notifier_api.notify(context, publisher_id, 'compute.instance.update',
            notifier_api.INFO, payload)
Beispiel #10
0
    def _provision_resource(self, context, weighed_host, request_spec,
            filter_properties, requested_networks, injected_files,
            admin_password, is_first_time, instance_uuid=None):
        """Create the requested resource in this Zone."""
        # NOTE(vish): add our current instance back into the request spec
        request_spec['instance_uuids'] = [instance_uuid]
        payload = dict(request_spec=request_spec,
                       weighted_host=weighed_host.to_dict(),
                       instance_id=instance_uuid)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        updated_instance = driver.instance_update_db(context,
                instance_uuid)

        self._post_select_populate_filter_properties(filter_properties,
                weighed_host.obj)

        self.compute_rpcapi.run_instance(context, instance=updated_instance,
                host=weighed_host.obj.host,
                request_spec=request_spec, filter_properties=filter_properties,
                requested_networks=requested_networks,
                injected_files=injected_files,
                admin_password=admin_password, is_first_time=is_first_time,
                node=weighed_host.obj.nodename)
Beispiel #11
0
def notify(context, message):
    if message["event_type"] != "compute.instance.delete.start":
        LOG.debug("ignoring %s", message["event_type"])
        return
    LOG.info("processing %s", message["event_type"])
    gatherer = initialize_gatherer()

    instance_id = message["payload"]["instance_id"]
    LOG.debug("polling final stats for %r", instance_id)

    # Ask for the instance details
    instance_ref = instance_info_source.instance_get_by_uuid(context, instance_id)

    # Get the default notification payload
    payload = notifications.info_from_instance(context, instance_ref, None, None)

    # Extend the payload with samples from our plugins.  We only need
    # to send some of the data from the counter objects, since a lot
    # of the fields are the same.
    instance = Instance(instance_ref)
    counters = gatherer(instance)
    payload["samples"] = [{"name": c.name, "type": c.type, "unit": c.unit, "volume": c.volume} for c in counters]

    publisher_id = notifier_api.publisher_id("compute", None)

    # We could simply modify the incoming message payload, but we
    # can't be sure that this notifier will be called before the RPC
    # notifier. Modifying the content may also break the message
    # signature. So, we start a new message publishing. We will be
    # called again recursively as a result, but we ignore the event we
    # generate so it doesn't matter.
    notifier_api.notify(context, publisher_id, "compute.instance.delete.samples", notifier_api.INFO, payload)
Beispiel #12
0
    def vol_usage_update(
        self, context, vol_id, rd_req, rd_bytes, wr_req, wr_bytes, instance, last_refreshed=None, update_totals=False
    ):
        vol_usage = self.db.vol_usage_update(
            context,
            vol_id,
            rd_req,
            rd_bytes,
            wr_req,
            wr_bytes,
            instance["uuid"],
            instance["project_id"],
            instance["user_id"],
            instance["availability_zone"],
            update_totals,
        )

        # We have just updated the database, so send the notification now
        notifier.notify(
            context,
            "conductor.%s" % self.host,
            "volume.usage",
            notifier.INFO,
            compute_utils.usage_volume_info(vol_usage),
        )
Beispiel #13
0
    def _provision_resource(self, context, weighted_host, request_spec,
            reservations, filter_properties, kwargs):
        """Create the requested resource in this Zone."""
        instance = self.create_instance_db_entry(context, request_spec,
                                                 reservations)

        # Add a retry entry for the selected compute host:
        self._add_retry_host(filter_properties, weighted_host.host_state.host)

        payload = dict(request_spec=request_spec,
                       weighted_host=weighted_host.to_dict(),
                       instance_id=instance['uuid'])
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        driver.cast_to_compute_host(context, weighted_host.host_state.host,
                'run_instance', instance_uuid=instance['uuid'],
                request_spec=request_spec, filter_properties=filter_properties,
                **kwargs)
        inst = driver.encode_instance(instance, local=True)

        # So if another instance is created, create_instance_db_entry will
        # actually create a new entry, instead of assume it's been created
        # already
        del request_spec['instance_properties']['uuid']

        return inst
Beispiel #14
0
    def _failover(self, req, id, body):
        """ failover a server """
        context = req.environ['nova.context']

        try:
            method = body["failover"]
        except (TypeError, KeyError):
            method = None

        if method is not None and method not in ('reboot', 'rebuild', 'move'):
            msg = _("Method must be in (reboot, rebuild, move)")
            raise exc.HTTPBadRequest(explanation=msg)

        try:
            server = dict(self.compute_api.get(context, id))
            if method:
                server['method'] = method
            publisher_id = 'nova-api'
            event_type = 'user.vm.down'
            priority = 'INFO'
            notifier_api.notify(context, publisher_id, event_type, priority,
                                server)
        except Exception:
            readable = traceback.format_exc()
            LOG.exception(_("Compute.api::failover %s"), readable)
            raise exc.HTTPUnprocessableEntity()
        return webob.Response(status_int=202)
    def schedule_run_instance(self, context, request_spec,
                              admin_password, injected_files,
                              requested_networks, is_first_time,
                              filter_properties):
        """This method is called from nova.compute.api to provision
        an instance.  We first create a build plan (a list of WeightedHosts)
        and then provision.

        Returns a list of the instances created.
        """
        payload = dict(request_spec=request_spec)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.start', notifier.INFO, payload)

        instance_uuids = request_spec.get('instance_uuids')
        num_instances = len(instance_uuids)
        LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
                locals())

        weighed_hosts = self._schedule(context, request_spec,
                filter_properties, instance_uuids)

        # NOTE: Pop instance_uuids as individual creates do not need the
        # set of uuids. Do not pop before here as the upper exception
        # handler fo NoValidHost needs the uuid to set error state
        instance_uuids = request_spec.pop('instance_uuids')

        # NOTE(comstud): Make sure we do not pass this through.  It
        # contains an instance of RpcContext that cannot be serialized.
        filter_properties.pop('context', None)

        for num, instance_uuid in enumerate(instance_uuids):
            request_spec['instance_properties']['launch_index'] = num

            try:
                try:
                    weighed_host = weighed_hosts.pop(0)
                except IndexError:
                    raise exception.NoValidHost(reason="")

                self._provision_resource(context, weighed_host,
                                         request_spec,
                                         filter_properties,
                                         requested_networks,
                                         injected_files, admin_password,
                                         is_first_time,
                                         instance_uuid=instance_uuid)
            except Exception as ex:
                # NOTE(vish): we don't reraise the exception here to make sure
                #             that all instances in the request get set to
                #             error properly
                driver.handle_schedule_error(context, ex, instance_uuid,
                                             request_spec)
            # scrub retry host list in case we're scheduling multiple
            # instances:
            retry = filter_properties.get('retry', {})
            retry['hosts'] = []

        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.end', notifier.INFO, payload)
Beispiel #16
0
    def vol_usage_update(
        self, context, vol_id, rd_req, rd_bytes, wr_req, wr_bytes, instance, last_refreshed=None, update_totals=False
    ):
        # The session object is needed here, as the vol_usage object returned
        # needs to bound to it in order to refresh its data
        session = db_session.get_session()
        vol_usage = self.db.vol_usage_update(
            context,
            vol_id,
            rd_req,
            rd_bytes,
            wr_req,
            wr_bytes,
            instance["uuid"],
            instance["project_id"],
            instance["user_id"],
            instance["availability_zone"],
            update_totals,
            session,
        )

        # We have just updated the database, so send the notification now
        notifier.notify(
            context,
            "conductor.%s" % self.host,
            "volume.usage",
            notifier.INFO,
            compute_utils.usage_volume_info(vol_usage),
        )
Beispiel #17
0
    def _provision_resource(self, context, weighed_host, request_spec,
            filter_properties, requested_networks, injected_files,
            admin_password, is_first_time, instance_uuid=None):
        """Create the requested resource in this Zone."""
        # NOTE(vish): add our current instance back into the request spec
        request_spec['instance_uuids'] = [instance_uuid]
        payload = dict(request_spec=request_spec,
                       weighted_host=weighed_host.to_dict(),
                       instance_id=instance_uuid)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        # Update the metadata if necessary
        scheduler_hints = filter_properties.get('scheduler_hints') or {}
        group = scheduler_hints.get('group', None)
        values = None
        if group:
            values = request_spec['instance_properties']['system_metadata']
            values.update({'group': group})
            values = {'system_metadata': values}

        updated_instance = driver.instance_update_db(context,
                instance_uuid, extra_values=values)

        self._post_select_populate_filter_properties(filter_properties,
                weighed_host.obj)

        self.compute_rpcapi.run_instance(context, instance=updated_instance,
                host=weighed_host.obj.host,
                request_spec=request_spec, filter_properties=filter_properties,
                requested_networks=requested_networks,
                injected_files=injected_files,
                admin_password=admin_password, is_first_time=is_first_time,
                node=weighed_host.obj.nodename)
Beispiel #18
0
    def allocate_floating_ip(self, context, project_id, auto_assigned=False,
                             pool=None):
        """Gets a floating ip from the pool."""
        # NOTE(tr3buchet): all network hosts in zone now use the same pool
        pool = pool or CONF.default_floating_pool
        use_quota = not auto_assigned

        # Check the quota; can't put this in the API because we get
        # called into from other places
        try:
            if use_quota:
                reservations = QUOTAS.reserve(context, floating_ips=1)
        except exception.OverQuota:
            LOG.warn(_("Quota exceeded for %s, tried to allocate "
                       "floating IP"), context.project_id)
            raise exception.FloatingIpLimitExceeded()

        try:
            floating_ip = self.db.floating_ip_allocate_address(
                context, project_id, pool, auto_assigned=auto_assigned)
            payload = dict(project_id=project_id, floating_ip=floating_ip)
            notifier.notify(context,
                            notifier.publisher_id("network"),
                            'network.floating_ip.allocate',
                            notifier.INFO, payload)

            # Commit the reservations
            if use_quota:
                QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                if use_quota:
                    QUOTAS.rollback(context, reservations)

        return floating_ip
Beispiel #19
0
    def schedule_run_instance(self, context, request_spec,
                              admin_password, injected_files,
                              requested_networks, is_first_time,
                              filter_properties, reservations):
        """This method is called from nova.compute.api to provision
        an instance.  We first create a build plan (a list of WeightedHosts)
        and then provision.

        Returns a list of the instances created.
        """
        elevated = context.elevated()
        num_instances = request_spec.get('num_instances', 1)
        LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
                locals())

        payload = dict(request_spec=request_spec)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.start', notifier.INFO, payload)

        weighted_hosts = self._schedule(context, "compute", request_spec,
                                        filter_properties)

        if not weighted_hosts:
            raise exception.NoValidHost(reason="")

        # NOTE(comstud): Make sure we do not pass this through.  It
        # contains an instance of RpcContext that cannot be serialized.
        filter_properties.pop('context', None)

        instances = []
        for num in xrange(num_instances):
            if not weighted_hosts:
                break
            weighted_host = weighted_hosts.pop(0)

            request_spec['instance_properties']['launch_index'] = num

            instance = self._provision_resource(elevated, weighted_host,
                                                request_spec, reservations,
                                                filter_properties,
                                                requested_networks,
                                                injected_files, admin_password,
                                                is_first_time)
            # scrub retry host list in case we're scheduling multiple
            # instances:
            retry = filter_properties.get('retry', {})
            retry['hosts'] = []

            if instance:
                instances.append(instance)

        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.end', notifier.INFO, payload)

        return instances
Beispiel #20
0
def send_api_fault(url, status, exception):
    """Send an api.fault notification."""

    if not CONF.notify_api_faults:
        return

    payload = {"url": url, "exception": str(exception), "status": status}

    publisher_id = notifier_api.publisher_id("api")

    notifier_api.notify(None, publisher_id, "api.fault", notifier_api.ERROR, payload)
Beispiel #21
0
 def notify(self, context, status, state_name, chain, error=None,
            result=None):
     event_type = 'orc_%s' % _get_chain_state_name(chain, state_name,
                                                   sep=".")
     payload = dict(status=status, result=result, error=error)
     if status == states.ERRORED:
         notifier.notify(context, notifier.publisher_id("orc"), event_type,
                         notifier.ERROR, payload)
     else:
         notifier.notify(context, notifier.publisher_id("orc"), event_type,
                         notifier.INFO, payload)
Beispiel #22
0
    def test_handle_schedule_error_adds_instance_fault(self):
        instance = {"uuid": "fake-uuid"}
        self.mox.StubOutWithMock(db, "instance_update_and_get_original")
        self.mox.StubOutWithMock(db, "instance_fault_create")
        self.mox.StubOutWithMock(notifier, "notify")
        db.instance_update_and_get_original(self.context, instance["uuid"], mox.IgnoreArg()).AndReturn((None, instance))
        db.instance_fault_create(self.context, mox.IgnoreArg())
        notifier.notify(self.context, mox.IgnoreArg(), "scheduler.run_instance", notifier.ERROR, mox.IgnoreArg())
        self.mox.ReplayAll()

        driver.handle_schedule_error(self.context, exception.NoValidHost("test"), instance["uuid"], {})
Beispiel #23
0
    def process_request(self, request):
        request.environ['HTTP_X_SERVICE_NAME'] = \
            self.service_name or request.host
        payload = {
            'request': self.environ_to_dict(request.environ),
        }

        api.notify(context.get_admin_context(),
                   api.publisher_id(os.path.basename(sys.argv[0])),
                   'http.request',
                   api.INFO,
                   payload)
Beispiel #24
0
def send_api_fault(url, status, exception):
    """Send an api.fault notification."""

    if not FLAGS.notify_api_faults:
        return

    payload = {'url': url, 'exception': str(exception), 'status': status}

    publisher_id = notifier_api.publisher_id("api")

    notifier_api.notify(None, publisher_id, 'api.fault', notifier_api.ERROR,
                        payload)
Beispiel #25
0
 def _notify_NBS_connection_failure(self, context, url):
     """Send a message to notification about NBS connection failure"""
     try:
         LOG.info(_('notify NBS connection failure'))
         payload = dict({'url': url})
         notifier_api.notify(context,
                             notifier_api.publisher_id('api_nbs'),
                             'api_nbs.nvs_connect_nbs_failure',
                             notifier_api.ERROR, payload)
     except Exception:
         LOG.exception(_('notification module error when do notifying '
                         'NVS connect NBS failed.'))
Beispiel #26
0
    def test_set_vm_state_and_notify_adds_instance_fault(self):
        request = {"instance_properties": {"uuid": "fake-uuid"}}
        updates = {"vm_state": "foo"}
        fake_inst = {"uuid": "fake-uuid"}

        self.mox.StubOutWithMock(db, "instance_update_and_get_original")
        self.mox.StubOutWithMock(db, "instance_fault_create")
        self.mox.StubOutWithMock(notifier, "notify")
        db.instance_update_and_get_original(self.context, "fake-uuid", updates).AndReturn((None, fake_inst))
        db.instance_fault_create(self.context, mox.IgnoreArg())
        notifier.notify(self.context, mox.IgnoreArg(), "scheduler.foo", notifier.ERROR, mox.IgnoreArg())
        self.mox.ReplayAll()

        self.manager._set_vm_state_and_notify("foo", {"vm_state": "foo"}, self.context, None, request)
Beispiel #27
0
 def _notify_NOS_connection_failure(self, context, keypair):
     """
     Send a message to notification about NOS connection failure.
     """
     try:
         LOG.info(_('notify keypairs NOS connection failure'))
         payload = dict(keypair)
         notifier_api.notify(context,
                             notifier_api.publisher_id('api_keypairs'),
                             'api_keypairs.nos_connection_failure',
                             notifier_api.ERROR, payload)
     except Exception:
         LOG.exception(_('notification module error when do notifying '
                         'keypairs connect NOS failed.'))
Beispiel #28
0
    def _set_vm_state_and_notify(self, method, updates, context, ex,
                                 request_spec):
        """changes VM state and notifies."""
        # FIXME(comstud): Re-factor this somehow. Not sure this belongs in the
        # scheduler manager like this. We should make this easier.
        # run_instance only sends a request_spec, and an instance may or may
        # not have been created in the API (or scheduler) already. If it was
        # created, there's a 'uuid' set in the instance_properties of the
        # request_spec.
        # (littleidea): I refactored this a bit, and I agree
        # it should be easier :)
        # The refactoring could go further but trying to minimize changes
        # for essex timeframe

        LOG.warning(_("Failed to schedule_%(method)s: %(ex)s"),
                    {'method': method, 'ex': ex})

        vm_state = updates['vm_state']
        properties = request_spec.get('instance_properties', {})
        # NOTE(vish): We shouldn't get here unless we have a catastrophic
        #             failure, so just set all instances to error. if uuid
        #             is not set, instance_uuids will be set to [None], this
        #             is solely to preserve existing behavior and can
        #             be removed along with the 'if instance_uuid:' if we can
        #             verify that uuid is always set.
        uuids = [properties.get('uuid')]
        for instance_uuid in request_spec.get('instance_uuids') or uuids:
            if instance_uuid:
                state = vm_state.upper()
                LOG.warning(_('Setting instance to %s state.'), state,
                            instance_uuid=instance_uuid)

                # update instance state and notify on the transition
                (old_ref, new_ref) = self.db.instance_update_and_get_original(
                        context, instance_uuid, updates)
                notifications.send_update(context, old_ref, new_ref,
                        service="scheduler")
                compute_utils.add_instance_fault_from_exc(context,
                        conductor_api.LocalAPI(),
                        new_ref, ex, sys.exc_info())

            payload = dict(request_spec=request_spec,
                           instance_properties=properties,
                           instance_id=instance_uuid,
                           state=vm_state,
                           method=method,
                           reason=ex)

            notifier.notify(context, notifier.publisher_id("scheduler"),
                            'scheduler.' + method, notifier.ERROR, payload)
Beispiel #29
0
def notify_about_volume_usage(context, volume, event_suffix,
                                extra_usage_info=None, host=None):
    if not host:
        host = FLAGS.host

    if not extra_usage_info:
        extra_usage_info = {}

    usage_info = _usage_from_volume(
            context, volume, **extra_usage_info)

    notifier_api.notify(context, 'volume.%s' % host,
                        'volume.%s' % event_suffix,
                        notifier_api.INFO, usage_info)
    def _test_set_vm_state_and_notify(self, request_spec,
                                      expected_uuids):
        updates = dict(vm_state='fake-vm-state')
        service = 'fake-service'
        method = 'fake-method'
        exc_info = 'exc_info'
        publisher_id = 'fake-publisher-id'

        self.mox.StubOutWithMock(compute_utils,
                                 'add_instance_fault_from_exc')
        self.mox.StubOutWithMock(notifications, 'send_update')
        self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
        self.mox.StubOutWithMock(notifier, 'publisher_id')

        old_ref = 'old_ref'
        new_ref = 'new_ref'

        for uuid in expected_uuids:
            db.instance_update_and_get_original(
                    self.context, uuid, updates).AndReturn((old_ref, new_ref))
            notifications.send_update(self.context, old_ref, new_ref,
                                      service=service)
            compute_utils.add_instance_fault_from_exc(
                    self.context,
                    mox.IsA(conductor_api.LocalAPI),
                    new_ref, exc_info, mox.IsA(tuple))

            payload = dict(request_spec=request_spec,
                           instance_properties=request_spec.get(
                               'instance_properties'),
                           instance_id=uuid,
                           state='fake-vm-state',
                           method=method,
                           reason=exc_info)
            event_type = '%s.%s' % (service, method)
            notifier.publisher_id(service).AndReturn(publisher_id)
            notifier.notify(self.context, publisher_id,
                            event_type, notifier.ERROR, payload)

        self.mox.ReplayAll()

        scheduler_utils.set_vm_state_and_notify(self.context,
                                                service,
                                                method,
                                                updates,
                                                exc_info,
                                                request_spec,
                                                db)
Beispiel #31
0
def set_vm_state_and_notify(context, service, method, updates, ex,
                            request_spec, db):
    """changes VM state and notifies."""
    LOG.warning(_("Failed to %(service)s_%(method)s: %(ex)s"), {
        'service': service,
        'method': method,
        'ex': ex
    })

    vm_state = updates['vm_state']
    properties = request_spec.get('instance_properties', {})
    # NOTE(vish): We shouldn't get here unless we have a catastrophic
    #             failure, so just set all instances to error. if uuid
    #             is not set, instance_uuids will be set to [None], this
    #             is solely to preserve existing behavior and can
    #             be removed along with the 'if instance_uuid:' if we can
    #             verify that uuid is always set.
    uuids = [properties.get('uuid')]
    from nova.conductor import api as conductor_api
    for instance_uuid in request_spec.get('instance_uuids') or uuids:
        if instance_uuid:
            state = vm_state.upper()
            LOG.warning(_('Setting instance to %s state.'),
                        state,
                        instance_uuid=instance_uuid)

            # update instance state and notify on the transition
            (old_ref, new_ref) = db.instance_update_and_get_original(
                context, instance_uuid, updates)
            notifications.send_update(context,
                                      old_ref,
                                      new_ref,
                                      service=service)
            compute_utils.add_instance_fault_from_exc(context,
                                                      conductor_api.LocalAPI(),
                                                      new_ref, ex,
                                                      sys.exc_info())

        payload = dict(request_spec=request_spec,
                       instance_properties=properties,
                       instance_id=instance_uuid,
                       state=vm_state,
                       method=method,
                       reason=ex)

        event_type = '%s.%s' % (service, method)
        notifier.notify(context, notifier.publisher_id(service), event_type,
                        notifier.ERROR, payload)
Beispiel #32
0
def notify_about_volume_usage(context,
                              volume,
                              event_suffix,
                              extra_usage_info=None,
                              host=None):
    if not host:
        host = FLAGS.host

    if not extra_usage_info:
        extra_usage_info = {}

    usage_info = _usage_from_volume(context, volume, **extra_usage_info)

    notifier_api.notify(context, 'volume.%s' % host,
                        'volume.%s' % event_suffix, notifier_api.INFO,
                        usage_info)
Beispiel #33
0
    def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
                         wr_bytes, instance, last_refreshed=None,
                         update_totals=False):
        vol_usage = self.db.vol_usage_update(context, vol_id,
                                             rd_req, rd_bytes,
                                             wr_req, wr_bytes,
                                             instance['uuid'],
                                             instance['project_id'],
                                             instance['user_id'],
                                             instance['availability_zone'],
                                             update_totals)

        # We have just updated the database, so send the notification now
        notifier.notify(context, 'conductor.%s' % self.host, 'volume.usage',
                        notifier.INFO,
                        compute_utils.usage_volume_info(vol_usage))
Beispiel #34
0
    def deallocate_floating_ip(self,
                               context,
                               address,
                               affect_auto_assigned=False):
        """Returns a floating ip to the pool."""
        floating_ip = self.db.floating_ip_get_by_address(context, address)

        # handle auto_assigned
        if not affect_auto_assigned and floating_ip.get('auto_assigned'):
            return
        use_quota = not floating_ip.get('auto_assigned')

        # make sure project owns this floating ip (allocated)
        self._floating_ip_owned_by_project(context, floating_ip)

        # make sure floating ip is not associated
        if floating_ip['fixed_ip_id']:
            floating_address = floating_ip['address']
            raise exception.FloatingIpAssociated(address=floating_address)

        # clean up any associated DNS entries
        self._delete_all_entries_for_ip(context, floating_ip['address'])
        payload = dict(project_id=floating_ip['project_id'],
                       floating_ip=floating_ip['address'])
        notifier.notify(context,
                        notifier.publisher_id("network"),
                        'network.floating_ip.deallocate',
                        notifier.INFO,
                        payload=payload)

        # Get reservations...
        try:
            if use_quota:
                reservations = QUOTAS.reserve(context, floating_ips=-1)
            else:
                reservations = None
        except Exception:
            reservations = None
            LOG.exception(
                _("Failed to update usages deallocating "
                  "floating IP"))

        self.db.floating_ip_deallocate(context, address)

        # Commit the reservations
        if reservations:
            QUOTAS.commit(context, reservations)
Beispiel #35
0
    def _provision_resource(self,
                            context,
                            weighed_host,
                            request_spec,
                            filter_properties,
                            requested_networks,
                            injected_files,
                            admin_password,
                            is_first_time,
                            instance_uuid=None):
        """Create the requested resource in this Zone."""
        # NOTE(vish): add our current instance back into the request spec
        request_spec['instance_uuids'] = [instance_uuid]
        payload = dict(request_spec=request_spec,
                       weighted_host=weighed_host.to_dict(),
                       instance_id=instance_uuid)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        # Update the metadata if necessary
        scheduler_hints = filter_properties.get('scheduler_hints') or {}
        group = scheduler_hints.get('group', None)
        values = None
        if group:
            values = request_spec['instance_properties']['system_metadata']
            values.update({'group': group})
            values = {'system_metadata': values}

        updated_instance = driver.instance_update_db(context,
                                                     instance_uuid,
                                                     extra_values=values)

        self._post_select_populate_filter_properties(filter_properties,
                                                     weighed_host.obj)

        self.compute_rpcapi.run_instance(context,
                                         instance=updated_instance,
                                         host=weighed_host.obj.host,
                                         request_spec=request_spec,
                                         filter_properties=filter_properties,
                                         requested_networks=requested_networks,
                                         injected_files=injected_files,
                                         admin_password=admin_password,
                                         is_first_time=is_first_time,
                                         node=weighed_host.obj.nodename)
Beispiel #36
0
def _send_instance_update_notification(context,
                                       instance,
                                       old_vm_state=None,
                                       old_task_state=None,
                                       new_vm_state=None,
                                       new_task_state=None,
                                       service="compute",
                                       host=None,
                                       old_display_name=None):
    """Send 'compute.instance.update' notification to inform observers
    about instance state changes.
    """

    payload = info_from_instance(context, instance, None, None)

    if not new_vm_state:
        new_vm_state = instance["vm_state"]
    if not new_task_state:
        new_task_state = instance["task_state"]

    states_payload = {
        "old_state": old_vm_state,
        "state": new_vm_state,
        "old_task_state": old_task_state,
        "new_task_state": new_task_state,
    }

    payload.update(states_payload)

    # add audit fields:
    (audit_start, audit_end) = audit_period_bounds(current_period=True)
    payload["audit_period_beginning"] = audit_start
    payload["audit_period_ending"] = audit_end

    # add bw usage info:
    bw = bandwidth_usage(instance, audit_start)
    payload["bandwidth"] = bw

    # add old display name if it is changed
    if old_display_name:
        payload["old_display_name"] = old_display_name

    publisher_id = notifier_api.publisher_id(service, host)

    notifier_api.notify(context, publisher_id, 'compute.instance.update',
                        notifier_api.INFO, payload)
Beispiel #37
0
    def test_handle_schedule_error_adds_instance_fault(self):
        instance = {'uuid': 'fake-uuid'}
        self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
        self.mox.StubOutWithMock(db, 'instance_fault_create')
        self.mox.StubOutWithMock(notifier, 'notify')
        db.instance_update_and_get_original(self.context, instance['uuid'],
                                            mox.IgnoreArg()).AndReturn(
                                                (None, instance))
        db.instance_fault_create(self.context, mox.IgnoreArg())
        notifier.notify(self.context, mox.IgnoreArg(),
                        'scheduler.run_instance', notifier.ERROR,
                        mox.IgnoreArg())
        self.mox.ReplayAll()

        driver.handle_schedule_error(self.context,
                                     exception.NoValidHost('test'),
                                     instance['uuid'], {})
Beispiel #38
0
 def _notify(self, context, instance_ref, operation, network_info=None):
     try:
         usage_info = notifications.info_from_instance(
             context,
             instance_ref,
             network_info=network_info,
             system_metadata=None)
         notifier.notify(context, 'gridcentric.%s' % self.host,
                         'gridcentric.instance.%s' % operation,
                         notifier.INFO, usage_info)
     except:
         # (amscanne): We do not put the instance into an error state during a notify exception.
         # It doesn't seem reasonable to do this, as the instance may still be up and running,
         # using resources, etc. and the ACTIVE state more accurately reflects this than
         # the ERROR state. So if there are real systems scanning instances in addition to
         # using notification events, they will eventually pick up the instance and correct
         # for their missing notification.
         _log_error("notify %s" % operation)
Beispiel #39
0
    def test_set_vm_state_and_notify_adds_instance_fault(self):
        request = {'instance_properties': {'uuid': 'fake-uuid'}}
        updates = {'vm_state': 'foo'}
        fake_inst = {'uuid': 'fake-uuid'}

        self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
        self.mox.StubOutWithMock(db, 'instance_fault_create')
        self.mox.StubOutWithMock(notifier, 'notify')
        db.instance_update_and_get_original(self.context, 'fake-uuid',
                                            updates).AndReturn(
                                                (None, fake_inst))
        db.instance_fault_create(self.context, mox.IgnoreArg())
        notifier.notify(self.context, mox.IgnoreArg(), 'scheduler.foo',
                        notifier.ERROR, mox.IgnoreArg())
        self.mox.ReplayAll()

        self.manager._set_vm_state_and_notify('foo', {'vm_state': 'foo'},
                                              self.context, None, request)
Beispiel #40
0
    def _test_set_vm_state_and_notify(self, request_spec, expected_uuids):
        updates = dict(vm_state='fake-vm-state')
        service = 'fake-service'
        method = 'fake-method'
        exc_info = 'exc_info'
        publisher_id = 'fake-publisher-id'

        self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
        self.mox.StubOutWithMock(notifications, 'send_update')
        self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
        self.mox.StubOutWithMock(notifier, 'publisher_id')

        old_ref = 'old_ref'
        new_ref = 'new_ref'

        for uuid in expected_uuids:
            db.instance_update_and_get_original(self.context, uuid,
                                                updates).AndReturn(
                                                    (old_ref, new_ref))
            notifications.send_update(self.context,
                                      old_ref,
                                      new_ref,
                                      service=service)
            compute_utils.add_instance_fault_from_exc(
                self.context, mox.IsA(conductor_api.LocalAPI), new_ref,
                exc_info, mox.IsA(tuple))

            payload = dict(
                request_spec=request_spec,
                instance_properties=request_spec.get('instance_properties'),
                instance_id=uuid,
                state='fake-vm-state',
                method=method,
                reason=exc_info)
            event_type = '%s.%s' % (service, method)
            notifier.publisher_id(service).AndReturn(publisher_id)
            notifier.notify(self.context, publisher_id, event_type,
                            notifier.ERROR, payload)

        self.mox.ReplayAll()

        scheduler_utils.set_vm_state_and_notify(self.context, service, method,
                                                updates, exc_info,
                                                request_spec, db)
Beispiel #41
0
    def _set_vm_state_and_notify(self, method, updates, context, ex, *args,
                                 **kwargs):
        """changes VM state and notifies"""
        # FIXME(comstud): Re-factor this somehow. Not sure this belongs in the
        # scheduler manager like this. We should make this easier.
        # run_instance only sends a request_spec, and an instance may or may
        # not have been created in the API (or scheduler) already. If it was
        # created, there's a 'uuid' set in the instance_properties of the
        # request_spec.
        # (littleidea): I refactored this a bit, and I agree
        # it should be easier :)
        # The refactoring could go further but trying to minimize changes
        # for essex timeframe

        LOG.warning(_("Failed to schedule_%(method)s: %(ex)s") % locals())

        vm_state = updates['vm_state']
        request_spec = kwargs.get('request_spec', {})
        properties = request_spec.get('instance_properties', {})
        instance_uuid = properties.get('uuid', {})

        if instance_uuid:
            state = vm_state.upper()
            LOG.warning(_('Setting instance to %(state)s state.'),
                        locals(),
                        instance_uuid=instance_uuid)

            # update instance state and notify on the transition
            (old_ref, new_ref) = db.instance_update_and_get_original(
                context, instance_uuid, updates)
            notifications.send_update(context,
                                      old_ref,
                                      new_ref,
                                      service="scheduler")

        payload = dict(request_spec=request_spec,
                       instance_properties=properties,
                       instance_id=instance_uuid,
                       state=vm_state,
                       method=method,
                       reason=ex)

        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.' + method, notifier.ERROR, payload)
Beispiel #42
0
def notify_about_aggregate_update(context, event_suffix, aggregate_payload):
    """
    Send a notification about aggregate update.

    :param event_suffix: Event type like "create.start" or "create.end"
    :param aggregate_payload: payload for aggregate update
    """
    aggregate_identifier = aggregate_payload.get('aggregate_id', None)
    if not aggregate_identifier:
        aggregate_identifier = aggregate_payload.get('name', None)
        if not aggregate_identifier:
            LOG.debug(
                _("No aggregate id or name specified for this "
                  "notification and it will be ignored"))
            return

    notifier_api.notify(context, 'aggregate.%s' % aggregate_identifier,
                        'aggregate.%s' % event_suffix, notifier_api.INFO,
                        aggregate_payload)
Beispiel #43
0
def notify(context, message):
    if message['event_type'] != 'compute.instance.delete.start':
        LOG.debug(_('ignoring %s'), message['event_type'])
        return
    LOG.info(_('processing %s'), message['event_type'])
    gatherer = initialize_gatherer()

    instance_id = message['payload']['instance_id']
    LOG.debug(_('polling final stats for %r'), instance_id)

    # Ask for the instance details
    instance_ref = conductor_api.instance_get_by_uuid(
        context,
        instance_id,
    )

    # Get the default notification payload
    payload = notifications.info_from_instance(context, instance_ref, None,
                                               None)

    # Extend the payload with samples from our plugins.  We only need
    # to send some of the data from the sample objects, since a lot
    # of the fields are the same.
    instance = Instance(context, instance_ref)
    samples = gatherer(instance)
    payload['samples'] = [{
        'name': s.name,
        'type': s.type,
        'unit': s.unit,
        'volume': s.volume
    } for s in samples]

    publisher_id = notifier_api.publisher_id('compute', None)

    # We could simply modify the incoming message payload, but we
    # can't be sure that this notifier will be called before the RPC
    # notifier. Modifying the content may also break the message
    # signature. So, we start a new message publishing. We will be
    # called again recursively as a result, but we ignore the event we
    # generate so it doesn't matter.
    notifier_api.notify(context, publisher_id,
                        'compute.instance.delete.samples', notifier_api.INFO,
                        payload)
Beispiel #44
0
 def _get_host_metrics(self, context, nodename):
     """Get the metrics from monitors and
     notify information to message bus.
     """
     metrics = []
     metrics_info = {}
     for monitor in self.monitors:
         try:
             metrics += monitor.get_metrics(nodename=nodename)
         except Exception:
             LOG.warn(_("Cannot get the metrics from %s."), monitors)
     if metrics:
         metrics_info['nodename'] = nodename
         metrics_info['metrics'] = metrics
         metrics_info['host'] = self.host
         metrics_info['host_ip'] = CONF.my_ip
         notifier.notify(context, 'compute.%s' % nodename,
                         'compute.metrics.update', notifier.INFO,
                         metrics_info)
     return metrics
Beispiel #45
0
    def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
                         wr_bytes, instance, last_refreshed=None,
                         update_totals=False):
        # The session object is needed here, as the vol_usage object returned
        # needs to bound to it in order to refresh its data
        session = db_session.get_session()
        vol_usage = self.db.vol_usage_update(context, vol_id,
                                             rd_req, rd_bytes,
                                             wr_req, wr_bytes,
                                             instance['uuid'],
                                             instance['project_id'],
                                             instance['user_id'],
                                             instance['availability_zone'],
                                             last_refreshed, update_totals,
                                             session)

        # We have just updated the database, so send the notification now
        notifier.notify(context, 'conductor.%s' % self.host, 'volume.usage',
                        notifier.INFO,
                        compute_utils.usage_volume_info(vol_usage))
Beispiel #46
0
def _send_instance_update_notification(context,
                                       instance,
                                       old_vm_state,
                                       old_task_state,
                                       new_vm_state,
                                       new_task_state,
                                       service=None,
                                       host=None):
    """Send 'compute.instance.update' notification to inform observers
    about instance state changes"""

    payload = usage_from_instance(context, instance, None, None)

    states_payload = {
        "old_state": old_vm_state,
        "state": new_vm_state,
        "old_task_state": old_task_state,
        "new_task_state": new_task_state,
    }

    payload.update(states_payload)

    # add audit fields:
    (audit_start, audit_end) = audit_period_bounds(current_period=True)
    payload["audit_period_beginning"] = audit_start
    payload["audit_period_ending"] = audit_end

    # add bw usage info:
    bw = bandwidth_usage(instance, audit_start)
    payload["bandwidth"] = bw

    # if the service name (e.g. api/scheduler/compute) is not provided, default
    # to "compute"
    if not service:
        service = "compute"

    publisher_id = notifier_api.publisher_id(service, host)

    notifier_api.notify(context, publisher_id, 'compute.instance.update',
                        notifier_api.INFO, payload)
Beispiel #47
0
def notify_about_instance_usage(context,
                                instance,
                                event_suffix,
                                network_info=None,
                                system_metadata=None,
                                extra_usage_info=None,
                                host=None):
    """
    Send a notification about an instance.

    :param event_suffix: Event type like "delete.start" or "exists"
    :param network_info: Networking information, if provided.
    :param system_metadata: system_metadata DB entries for the instance,
        if provided.
    :param extra_usage_info: Dictionary containing extra values to add or
        override in the notification.
    :param host: Compute host for the instance, if specified.  Default is
        CONF.host
    """

    if not host:
        host = CONF.host

    if not extra_usage_info:
        extra_usage_info = {}

    usage_info = notifications.info_from_instance(context, instance,
                                                  network_info,
                                                  system_metadata,
                                                  **extra_usage_info)

    if event_suffix.endswith("error"):
        level = notifier_api.ERROR
    else:
        level = notifier_api.INFO

    notifier_api.notify(context, 'compute.%s' % host,
                        'compute.instance.%s' % event_suffix, level,
                        usage_info)
Beispiel #48
0
    def allocate_floating_ip(self,
                             context,
                             project_id,
                             auto_assigned=False,
                             pool=None):
        """Gets a floating ip from the pool."""
        # NOTE(tr3buchet): all network hosts in zone now use the same pool
        pool = pool or CONF.default_floating_pool
        use_quota = not auto_assigned

        # Check the quota; can't put this in the API because we get
        # called into from other places
        try:
            if use_quota:
                reservations = QUOTAS.reserve(context, floating_ips=1)
        except exception.OverQuota:
            pid = context.project_id
            LOG.warn(
                _("Quota exceeded for %(pid)s, tried to allocate "
                  "floating IP") % locals())
            raise exception.FloatingIpLimitExceeded()

        try:
            floating_ip = self.db.floating_ip_allocate_address(
                context, project_id, pool)
            payload = dict(project_id=project_id, floating_ip=floating_ip)
            notifier.notify(context, notifier.publisher_id("network"),
                            'network.floating_ip.allocate', notifier.INFO,
                            payload)

            # Commit the reservations
            if use_quota:
                QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                if use_quota:
                    QUOTAS.rollback(context, reservations)

        return floating_ip
Beispiel #49
0
    def _provision_resource(self, context, weighted_host, request_spec,
                            reservations, filter_properties,
                            requested_networks, injected_files, admin_password,
                            is_first_time):
        """Create the requested resource in this Zone."""
        instance = self.create_instance_db_entry(context, request_spec,
                                                 reservations)

        # Add a retry entry for the selected compute host:
        self._add_retry_host(filter_properties, weighted_host.host_state.host)

        payload = dict(request_spec=request_spec,
                       weighted_host=weighted_host.to_dict(),
                       instance_id=instance['uuid'])
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        updated_instance = driver.instance_update_db(
            context, instance['uuid'], weighted_host.host_state.host)

        self.compute_rpcapi.run_instance(context,
                                         instance=updated_instance,
                                         host=weighted_host.host_state.host,
                                         request_spec=request_spec,
                                         filter_properties=filter_properties,
                                         requested_networks=requested_networks,
                                         injected_files=injected_files,
                                         admin_password=admin_password,
                                         is_first_time=is_first_time)

        inst = driver.encode_instance(updated_instance, local=True)

        # So if another instance is created, create_instance_db_entry will
        # actually create a new entry, instead of assume it's been created
        # already
        del request_spec['instance_properties']['uuid']

        return inst
Beispiel #50
0
        def do_disassociate():
            # NOTE(vish): Note that we are disassociating in the db before we
            #             actually remove the ip address on the host. We are
            #             safe from races on this host due to the decorator,
            #             but another host might grab the ip right away. We
            #             don't worry about this case because the minuscule
            #             window where the ip is on both hosts shouldn't cause
            #             any problems.
            fixed = self.db.floating_ip_disassociate(context, address)

            if not fixed:
                # NOTE(vish): ip was already disassociated
                return
            if interface:
                # go go driver time
                self.l3driver.remove_floating_ip(address, fixed['address'],
                                                 interface, fixed['network'])
            payload = dict(project_id=context.project_id,
                           instance_id=instance_uuid,
                           floating_ip=address)
            notifier.notify(context,
                            notifier.publisher_id("network"),
                            'network.floating_ip.disassociate',
                            notifier.INFO, payload=payload)
Beispiel #51
0
    def _provision_resource(self,
                            context,
                            weighted_host,
                            request_spec,
                            filter_properties,
                            requested_networks,
                            injected_files,
                            admin_password,
                            is_first_time,
                            instance_uuid=None):
        """Create the requested resource in this Zone."""
        # Add a retry entry for the selected compute host:
        self._add_retry_host(filter_properties, weighted_host.host_state.host)

        self._add_oversubscription_policy(filter_properties,
                                          weighted_host.host_state)

        payload = dict(request_spec=request_spec,
                       weighted_host=weighted_host.to_dict(),
                       instance_id=instance_uuid)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        updated_instance = driver.instance_update_db(
            context, instance_uuid, weighted_host.host_state.host)

        self.compute_rpcapi.run_instance(context,
                                         instance=updated_instance,
                                         host=weighted_host.host_state.host,
                                         request_spec=request_spec,
                                         filter_properties=filter_properties,
                                         requested_networks=requested_networks,
                                         injected_files=injected_files,
                                         admin_password=admin_password,
                                         is_first_time=is_first_time)
Beispiel #52
0
    def process_response(self,
                         request,
                         response,
                         exception=None,
                         traceback=None):
        payload = {
            'request': self.environ_to_dict(request.environ),
        }

        if response:
            payload['response'] = {
                'status': response.status,
                'headers': response.headers,
            }

        if exception:
            payload['exception'] = {
                'value': repr(exception),
                'traceback': tb.format_tb(traceback)
            }

        api.notify(context.get_admin_context(),
                   api.publisher_id(os.path.basename(sys.argv[0])),
                   'http.response', api.INFO, payload)
Beispiel #53
0
    def _provision_resource(self, context, weighed_host, request_spec,
            filter_properties, requested_networks, injected_files,
            admin_password, is_first_time, instance_uuid=None):
        """Create the requested resource in this Zone."""
        payload = dict(request_spec=request_spec,
                       weighted_host=weighed_host.to_dict(),
                       instance_id=instance_uuid)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        updated_instance = driver.instance_update_db(context,
                instance_uuid)

        self._post_select_populate_filter_properties(filter_properties,
                weighed_host.obj)

        self.compute_rpcapi.run_instance(context, instance=updated_instance,
                host=weighed_host.obj.host,
                request_spec=request_spec, filter_properties=filter_properties,
                requested_networks=requested_networks,
                injected_files=injected_files,
                admin_password=admin_password, is_first_time=is_first_time,
                node=weighed_host.obj.nodename)
Beispiel #54
0
    def _provision_resource(self, context, weighted_host, request_spec,
            filter_properties, requested_networks, injected_files,
            admin_password, is_first_time, instance_uuid=None):
        """Create the requested resource in this Zone."""
        # Add a retry entry for the selected compute host:
        self._add_retry_host(filter_properties, weighted_host.host_state.host)

        self._add_oversubscription_policy(filter_properties,
                weighted_host.host_state)

        payload = dict(request_spec=request_spec,
                       weighted_host=weighted_host.to_dict(),
                       instance_id=instance_uuid)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        if weighted_host.host_state.nodename is not None:
            smd_dic = db.instance_system_metadata_get(context, instance_uuid)
            smd_dic['node'] = weighted_host.host_state.nodename
        else:
            # update is not needed
            smd_dic = None

        updated_instance = driver.instance_update_db(context,
                instance_uuid, weighted_host.host_state.host,
                system_metadata=smd_dic)
        # Ensure system_metadata is loaded and included in rpc payload
        updated_instance.get('system_metadata')

        self.compute_rpcapi.run_instance(context, instance=updated_instance,
                host=weighted_host.host_state.host,
                request_spec=request_spec, filter_properties=filter_properties,
                requested_networks=requested_networks,
                injected_files=injected_files,
                admin_password=admin_password, is_first_time=is_first_time)
Beispiel #55
0
    def schedule_run_instance(self, context, request_spec, admin_password,
                              injected_files, requested_networks,
                              is_first_time, filter_properties):
        """This method is called from nova.compute.api to provision
        an instance.  We first create a build plan (a list of WeightedHosts)
        and then provision.

        Returns a list of the instances created.
        """
        payload = dict(request_spec=request_spec)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.start', notifier.INFO, payload)

        instance_uuids = request_spec.get('instance_uuids')
        LOG.info(
            _("Attempting to build %(num_instances)d instance(s) "
              "uuids: %(instance_uuids)s"), {
                  'num_instances': len(instance_uuids),
                  'instance_uuids': instance_uuids
              })
        LOG.debug(_("Request Spec: %s") % request_spec)

        weighed_hosts = self._schedule(context, request_spec,
                                       filter_properties, instance_uuids)

        # NOTE: Pop instance_uuids as individual creates do not need the
        # set of uuids. Do not pop before here as the upper exception
        # handler fo NoValidHost needs the uuid to set error state
        instance_uuids = request_spec.pop('instance_uuids')

        # NOTE(comstud): Make sure we do not pass this through.  It
        # contains an instance of RpcContext that cannot be serialized.
        filter_properties.pop('context', None)

        for num, instance_uuid in enumerate(instance_uuids):
            request_spec['instance_properties']['launch_index'] = num

            try:
                try:
                    weighed_host = weighed_hosts.pop(0)
                    LOG.info(
                        _("Choosing host %(weighed_host)s "
                          "for instance %(instance_uuid)s"), {
                              'weighed_host': weighed_host,
                              'instance_uuid': instance_uuid
                          })
                except IndexError:
                    raise exception.NoValidHost(reason="")

                self._provision_resource(context,
                                         weighed_host,
                                         request_spec,
                                         filter_properties,
                                         requested_networks,
                                         injected_files,
                                         admin_password,
                                         is_first_time,
                                         instance_uuid=instance_uuid)
            except Exception as ex:
                # NOTE(vish): we don't reraise the exception here to make sure
                #             that all instances in the request get set to
                #             error properly
                driver.handle_schedule_error(context, ex, instance_uuid,
                                             request_spec)
            # scrub retry host list in case we're scheduling multiple
            # instances:
            retry = filter_properties.get('retry', {})
            retry['hosts'] = []

        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.end', notifier.INFO, payload)
Beispiel #56
0
 def _notify(self, ctxt, event_type, payload, priority):
     notifier_api.notify(ctxt,
                         self.publisher_id,
                         event_type,
                         priority,
                         payload)