def schedule_run_instance(self, context, request_spec,
                              admin_password, injected_files,
                              requested_networks, is_first_time,
                              filter_properties):
        """This method is called from nova.compute.api to provision
        an instance.  We first create a build plan (a list of WeightedHosts)
        and then provision.

        Returns a list of the instances created.
        """
        payload = dict(request_spec=request_spec)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.start', notifier.INFO, payload)

        instance_uuids = request_spec.get('instance_uuids')
        num_instances = len(instance_uuids)
        LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
                locals())

        weighed_hosts = self._schedule(context, request_spec,
                filter_properties, instance_uuids)

        # NOTE: Pop instance_uuids as individual creates do not need the
        # set of uuids. Do not pop before here as the upper exception
        # handler fo NoValidHost needs the uuid to set error state
        instance_uuids = request_spec.pop('instance_uuids')

        # NOTE(comstud): Make sure we do not pass this through.  It
        # contains an instance of RpcContext that cannot be serialized.
        filter_properties.pop('context', None)

        for num, instance_uuid in enumerate(instance_uuids):
            request_spec['instance_properties']['launch_index'] = num

            try:
                try:
                    weighed_host = weighed_hosts.pop(0)
                except IndexError:
                    raise exception.NoValidHost(reason="")

                self._provision_resource(context, weighed_host,
                                         request_spec,
                                         filter_properties,
                                         requested_networks,
                                         injected_files, admin_password,
                                         is_first_time,
                                         instance_uuid=instance_uuid)
            except Exception as ex:
                # NOTE(vish): we don't reraise the exception here to make sure
                #             that all instances in the request get set to
                #             error properly
                driver.handle_schedule_error(context, ex, instance_uuid,
                                             request_spec)
            # scrub retry host list in case we're scheduling multiple
            # instances:
            retry = filter_properties.get('retry', {})
            retry['hosts'] = []

        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.end', notifier.INFO, payload)
Example #2
0
    def schedule_run_instance(self, context, request_spec, admin_password,
                              injected_files, requested_networks,
                              is_first_time, filter_properties):
        """This method is called from nova.compute.api to provision
        an instance.  We first create a build plan (a list of WeightedHosts)
        and then provision.

        Returns a list of the instances created.
        """
        elevated = context.elevated()
        instance_uuids = request_spec.get('instance_uuids')
        num_instances = len(instance_uuids)
        LOG.debug(
            _("Attempting to build %(num_instances)d instance(s)") % locals())

        payload = dict(request_spec=request_spec)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.start', notifier.INFO, payload)

        weighted_hosts = self._schedule(context, "compute", request_spec,
                                        filter_properties, instance_uuids)

        # NOTE(comstud): Make sure we do not pass this through.  It
        # contains an instance of RpcContext that cannot be serialized.
        filter_properties.pop('context', None)

        for num, instance_uuid in enumerate(instance_uuids):
            request_spec['instance_properties']['launch_index'] = num

            try:
                try:
                    weighted_host = weighted_hosts.pop(0)
                except IndexError:
                    raise exception.NoValidHost(reason="")

                self._provision_resource(elevated,
                                         weighted_host,
                                         request_spec,
                                         filter_properties,
                                         requested_networks,
                                         injected_files,
                                         admin_password,
                                         is_first_time,
                                         instance_uuid=instance_uuid)
            except Exception as ex:
                # NOTE(vish): we don't reraise the exception here to make sure
                #             that all instances in the request get set to
                #             error properly
                driver.handle_schedule_error(context, ex, instance_uuid,
                                             request_spec)
            # scrub retry host list in case we're scheduling multiple
            # instances:
            retry = filter_properties.get('retry', {})
            retry['hosts'] = []

        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.end', notifier.INFO, payload)
Example #3
0
    def schedule_run_instance(self, context, request_spec,
                              admin_password, injected_files,
                              requested_networks, is_first_time,
                              filter_properties, reservations):
        """This method is called from nova.compute.api to provision
        an instance.  We first create a build plan (a list of WeightedHosts)
        and then provision.

        Returns a list of the instances created.
        """
        elevated = context.elevated()
        num_instances = request_spec.get('num_instances', 1)
        LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
                locals())

        payload = dict(request_spec=request_spec)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.start', notifier.INFO, payload)

        weighted_hosts = self._schedule(context, "compute", request_spec,
                                        filter_properties)

        if not weighted_hosts:
            raise exception.NoValidHost(reason="")

        # NOTE(comstud): Make sure we do not pass this through.  It
        # contains an instance of RpcContext that cannot be serialized.
        filter_properties.pop('context', None)

        instances = []
        for num in xrange(num_instances):
            if not weighted_hosts:
                break
            weighted_host = weighted_hosts.pop(0)

            request_spec['instance_properties']['launch_index'] = num

            instance = self._provision_resource(elevated, weighted_host,
                                                request_spec, reservations,
                                                filter_properties,
                                                requested_networks,
                                                injected_files, admin_password,
                                                is_first_time)
            # scrub retry host list in case we're scheduling multiple
            # instances:
            retry = filter_properties.get('retry', {})
            retry['hosts'] = []

            if instance:
                instances.append(instance)

        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.end', notifier.INFO, payload)

        return instances
Example #4
0
 def notify(self, context, status, state_name, chain, error=None,
            result=None):
     event_type = 'orc_%s' % _get_chain_state_name(chain, state_name,
                                                   sep=".")
     payload = dict(status=status, result=result, error=error)
     if status == states.ERRORED:
         notifier.notify(context, notifier.publisher_id("orc"), event_type,
                         notifier.ERROR, payload)
     else:
         notifier.notify(context, notifier.publisher_id("orc"), event_type,
                         notifier.INFO, payload)
Example #5
0
    def schedule_run_instance(self, context, request_spec, admin_password,
                              injected_files, requested_networks,
                              is_first_time, filter_properties, reservations):
        """This method is called from nova.compute.api to provision
        an instance.  We first create a build plan (a list of WeightedHosts)
        and then provision.

        Returns a list of the instances created.
        """
        elevated = context.elevated()
        num_instances = request_spec.get('num_instances', 1)
        LOG.debug(
            _("Attempting to build %(num_instances)d instance(s)") % locals())

        payload = dict(request_spec=request_spec)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.start', notifier.INFO, payload)

        weighted_hosts = self._schedule(context, "compute", request_spec,
                                        filter_properties)

        if not weighted_hosts:
            raise exception.NoValidHost(reason="")

        # NOTE(comstud): Make sure we do not pass this through.  It
        # contains an instance of RpcContext that cannot be serialized.
        filter_properties.pop('context', None)

        instances = []
        for num in xrange(num_instances):
            if not weighted_hosts:
                break
            weighted_host = weighted_hosts.pop(0)

            request_spec['instance_properties']['launch_index'] = num

            instance = self._provision_resource(elevated, weighted_host,
                                                request_spec, reservations,
                                                filter_properties,
                                                requested_networks,
                                                injected_files, admin_password,
                                                is_first_time)
            # scrub retry host list in case we're scheduling multiple
            # instances:
            retry = filter_properties.get('retry', {})
            retry['hosts'] = []

            if instance:
                instances.append(instance)

        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.end', notifier.INFO, payload)

        return instances
    def _test_set_vm_state_and_notify(self, request_spec,
                                      expected_uuids):
        updates = dict(vm_state='fake-vm-state')
        service = 'fake-service'
        method = 'fake-method'
        exc_info = 'exc_info'
        publisher_id = 'fake-publisher-id'

        self.mox.StubOutWithMock(compute_utils,
                                 'add_instance_fault_from_exc')
        self.mox.StubOutWithMock(notifications, 'send_update')
        self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
        self.mox.StubOutWithMock(notifier, 'publisher_id')

        old_ref = 'old_ref'
        new_ref = 'new_ref'

        for uuid in expected_uuids:
            db.instance_update_and_get_original(
                    self.context, uuid, updates).AndReturn((old_ref, new_ref))
            notifications.send_update(self.context, old_ref, new_ref,
                                      service=service)
            compute_utils.add_instance_fault_from_exc(
                    self.context,
                    mox.IsA(conductor_api.LocalAPI),
                    new_ref, exc_info, mox.IsA(tuple))

            payload = dict(request_spec=request_spec,
                           instance_properties=request_spec.get(
                               'instance_properties'),
                           instance_id=uuid,
                           state='fake-vm-state',
                           method=method,
                           reason=exc_info)
            event_type = '%s.%s' % (service, method)
            notifier.publisher_id(service).AndReturn(publisher_id)
            notifier.notify(self.context, publisher_id,
                            event_type, notifier.ERROR, payload)

        self.mox.ReplayAll()

        scheduler_utils.set_vm_state_and_notify(self.context,
                                                service,
                                                method,
                                                updates,
                                                exc_info,
                                                request_spec,
                                                db)
Example #7
0
    def _provision_resource(self, context, weighted_host, request_spec,
                            reservations, filter_properties, kwargs):
        """Create the requested resource in this Zone."""
        instance = self.create_instance_db_entry(context, request_spec,
                                                 reservations)

        # Add a retry entry for the selected compute host:
        self._add_retry_host(filter_properties, weighted_host.host_state.host)

        payload = dict(request_spec=request_spec,
                       weighted_host=weighted_host.to_dict(),
                       instance_id=instance['uuid'])
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        driver.cast_to_compute_host(context,
                                    weighted_host.host_state.host,
                                    'run_instance',
                                    instance_uuid=instance['uuid'],
                                    request_spec=request_spec,
                                    filter_properties=filter_properties,
                                    **kwargs)
        inst = driver.encode_instance(instance, local=True)

        # So if another instance is created, create_instance_db_entry will
        # actually create a new entry, instead of assume it's been created
        # already
        del request_spec['instance_properties']['uuid']

        return inst
Example #8
0
    def _provision_resource(self, context, weighed_host, request_spec,
            filter_properties, requested_networks, injected_files,
            admin_password, is_first_time, instance_uuid=None):
        """Create the requested resource in this Zone."""
        payload = dict(request_spec=request_spec,
                       weighted_host=weighed_host.to_dict(),
                       instance_id=instance_uuid)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        # TODO(NTTdocomo): Combine the next two updates into one
        driver.db_instance_node_set(context,
                instance_uuid, weighed_host.obj.nodename)
        updated_instance = driver.instance_update_db(context,
                instance_uuid)

        self._post_select_populate_filter_properties(filter_properties,
                weighed_host.obj)

        self.compute_rpcapi.run_instance(context, instance=updated_instance,
                host=weighed_host.obj.host,
                request_spec=request_spec, filter_properties=filter_properties,
                requested_networks=requested_networks,
                injected_files=injected_files,
                admin_password=admin_password, is_first_time=is_first_time)
Example #9
0
    def allocate_floating_ip(self, context, project_id, auto_assigned=False,
                             pool=None):
        """Gets a floating ip from the pool."""
        # NOTE(tr3buchet): all network hosts in zone now use the same pool
        pool = pool or CONF.default_floating_pool
        use_quota = not auto_assigned

        # Check the quota; can't put this in the API because we get
        # called into from other places
        try:
            if use_quota:
                reservations = QUOTAS.reserve(context, floating_ips=1)
        except exception.OverQuota:
            LOG.warn(_("Quota exceeded for %s, tried to allocate "
                       "floating IP"), context.project_id)
            raise exception.FloatingIpLimitExceeded()

        try:
            floating_ip = self.db.floating_ip_allocate_address(
                context, project_id, pool, auto_assigned=auto_assigned)
            payload = dict(project_id=project_id, floating_ip=floating_ip)
            notifier.notify(context,
                            notifier.publisher_id("network"),
                            'network.floating_ip.allocate',
                            notifier.INFO, payload)

            # Commit the reservations
            if use_quota:
                QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                if use_quota:
                    QUOTAS.rollback(context, reservations)

        return floating_ip
Example #10
0
        def do_associate():
            # associate floating ip
            fixed = self.db.floating_ip_fixed_ip_associate(context,
                                                           floating_address,
                                                           fixed_address,
                                                           self.host)
            if not fixed:
                # NOTE(vish): ip was already associated
                return
            try:
                # gogo driver time
                self.l3driver.add_floating_ip(floating_address, fixed_address,
                        interface, fixed['network'])
            except exception.ProcessExecutionError as e:
                self.db.floating_ip_disassociate(context, floating_address)
                if "Cannot find device" in str(e):
                    LOG.error(_('Interface %(interface)s not found'), locals())
                    raise exception.NoFloatingIpInterface(interface=interface)

            payload = dict(project_id=context.project_id,
                           instance_id=instance_uuid,
                           floating_ip=floating_address)
            notifier.notify(context,
                            notifier.publisher_id("network"),
                            'network.floating_ip.associate',
                        notifier.INFO, payload=payload)
Example #11
0
    def _provision_resource(self, context, weighted_host, request_spec,
            filter_properties, requested_networks, injected_files,
            admin_password, is_first_time, instance_uuid=None):
        """Create the requested resource in this Zone."""
        # Add a retry entry for the selected compute host:
        self._add_retry_host(filter_properties, weighted_host.host_state.host)

        self._add_oversubscription_policy(filter_properties,
                weighted_host.host_state)

        payload = dict(request_spec=request_spec,
                       weighted_host=weighted_host.to_dict(),
                       instance_id=instance_uuid)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        updated_instance = driver.instance_update_db(context, instance_uuid)

        self.compute_rpcapi.run_instance(context, instance=updated_instance,
                host=weighted_host.host_state.host,
                request_spec=request_spec, filter_properties=filter_properties,
                requested_networks=requested_networks,
                injected_files=injected_files,
                admin_password=admin_password, is_first_time=is_first_time)
Example #12
0
    def _provision_resource(self, context, weighed_host, request_spec,
            filter_properties, requested_networks, injected_files,
            admin_password, is_first_time, instance_uuid=None):
        """Create the requested resource in this Zone."""
        # NOTE(vish): add our current instance back into the request spec
        request_spec['instance_uuids'] = [instance_uuid]
        payload = dict(request_spec=request_spec,
                       weighted_host=weighed_host.to_dict(),
                       instance_id=instance_uuid)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        # Update the metadata if necessary
        scheduler_hints = filter_properties.get('scheduler_hints') or {}
        group = scheduler_hints.get('group', None)
        values = None
        if group:
            values = request_spec['instance_properties']['system_metadata']
            values.update({'group': group})
            values = {'system_metadata': values}

        updated_instance = driver.instance_update_db(context,
                instance_uuid, extra_values=values)

        self._post_select_populate_filter_properties(filter_properties,
                weighed_host.obj)

        self.compute_rpcapi.run_instance(context, instance=updated_instance,
                host=weighed_host.obj.host,
                request_spec=request_spec, filter_properties=filter_properties,
                requested_networks=requested_networks,
                injected_files=injected_files,
                admin_password=admin_password, is_first_time=is_first_time,
                node=weighed_host.obj.nodename)
Example #13
0
    def _provision_resource(self, context, weighted_host, request_spec,
            reservations, filter_properties, kwargs):
        """Create the requested resource in this Zone."""
        instance = self.create_instance_db_entry(context, request_spec,
                                                 reservations)

        # Add a retry entry for the selected compute host:
        self._add_retry_host(filter_properties, weighted_host.host_state.host)

        payload = dict(request_spec=request_spec,
                       weighted_host=weighted_host.to_dict(),
                       instance_id=instance['uuid'])
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        driver.cast_to_compute_host(context, weighted_host.host_state.host,
                'run_instance', instance_uuid=instance['uuid'],
                request_spec=request_spec, filter_properties=filter_properties,
                **kwargs)
        inst = driver.encode_instance(instance, local=True)

        # So if another instance is created, create_instance_db_entry will
        # actually create a new entry, instead of assume it's been created
        # already
        del request_spec['instance_properties']['uuid']

        return inst
Example #14
0
        def do_associate():
            # associate floating ip
            fixed = self.db.floating_ip_fixed_ip_associate(
                context, floating_address, fixed_address, self.host)
            if not fixed:
                # NOTE(vish): ip was already associated
                return
            try:
                # gogo driver time
                self.l3driver.add_floating_ip(floating_address, fixed_address,
                                              interface, fixed['network'])
            except processutils.ProcessExecutionError as e:
                self.db.floating_ip_disassociate(context, floating_address)
                if "Cannot find device" in str(e):
                    LOG.error(_('Interface %(interface)s not found'), locals())
                    raise exception.NoFloatingIpInterface(interface=interface)
                raise

            payload = dict(project_id=context.project_id,
                           instance_id=instance_uuid,
                           floating_ip=floating_address)
            notifier.notify(context,
                            notifier.publisher_id("network"),
                            'network.floating_ip.associate',
                            notifier.INFO,
                            payload=payload)
Example #15
0
    def _provision_resource(self, context, weighted_host, request_spec,
            reservations, filter_properties, requested_networks,
            injected_files, admin_password, is_first_time):
        """Create the requested resource in this Zone."""
        instance = self.create_instance_db_entry(context, request_spec,
                                                 reservations)

        # Add a retry entry for the selected compute host:
        self._add_retry_host(filter_properties, weighted_host.host_state.host)

        payload = dict(request_spec=request_spec,
                       weighted_host=weighted_host.to_dict(),
                       instance_id=instance['uuid'])
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        updated_instance = driver.instance_update_db(context, instance['uuid'],
                weighted_host.host_state.host)

        self.compute_rpcapi.run_instance(context, instance=updated_instance,
                host=weighted_host.host_state.host,
                request_spec=request_spec, filter_properties=filter_properties,
                requested_networks=requested_networks,
                injected_files=injected_files,
                admin_password=admin_password, is_first_time=is_first_time)

        inst = driver.encode_instance(updated_instance, local=True)

        # So if another instance is created, create_instance_db_entry will
        # actually create a new entry, instead of assume it's been created
        # already
        del request_spec['instance_properties']['uuid']

        return inst
Example #16
0
        def do_disassociate():
            # NOTE(vish): Note that we are disassociating in the db before we
            #             actually remove the ip address on the host. We are
            #             safe from races on this host due to the decorator,
            #             but another host might grab the ip right away. We
            #             don't worry about this case because the minuscule
            #             window where the ip is on both hosts shouldn't cause
            #             any problems.
            fixed = self.db.floating_ip_disassociate(context, address)

            if not fixed:
                # NOTE(vish): ip was already disassociated
                return
            if interface:
                # go go driver time
                self.l3driver.remove_floating_ip(address, fixed['address'],
                                                 interface, fixed['network'])
            payload = dict(project_id=context.project_id,
                           instance_id=instance_uuid,
                           floating_ip=address)
            notifier.notify(context,
                            notifier.publisher_id("network"),
                            'network.floating_ip.disassociate',
                            notifier.INFO,
                            payload=payload)
Example #17
0
    def _provision_resource(self,
                            context,
                            weighed_host,
                            request_spec,
                            filter_properties,
                            requested_networks,
                            injected_files,
                            admin_password,
                            is_first_time,
                            instance_uuid=None):
        """Create the requested resource in this Zone."""
        # NOTE(vish): add our current instance back into the request spec
        request_spec['instance_uuids'] = [instance_uuid]
        payload = dict(request_spec=request_spec,
                       weighted_host=weighed_host.to_dict(),
                       instance_id=instance_uuid)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        updated_instance = driver.instance_update_db(context, instance_uuid)

        self._post_select_populate_filter_properties(filter_properties,
                                                     weighed_host.obj)

        self.compute_rpcapi.run_instance(context,
                                         instance=updated_instance,
                                         host=weighed_host.obj.host,
                                         request_spec=request_spec,
                                         filter_properties=filter_properties,
                                         requested_networks=requested_networks,
                                         injected_files=injected_files,
                                         admin_password=admin_password,
                                         is_first_time=is_first_time,
                                         node=weighed_host.obj.nodename)
Example #18
0
def handle_schedule_error(context, ex, instance_uuid, request_spec):
    if not isinstance(ex, exception.NoValidHost):
        LOG.exception(_("Exception during scheduler.run_instance"))
    compute_utils.add_instance_fault_from_exc(context,
            instance_uuid, ex, sys.exc_info())
    state = vm_states.ERROR.upper()
    LOG.warning(_('Setting instance to %(state)s state.'),
                locals(), instance_uuid=instance_uuid)

    # update instance state and notify on the transition
    (old_ref, new_ref) = db.instance_update_and_get_original(context,
            instance_uuid, {'vm_state': vm_states.ERROR,
                            'task_state': None})
    notifications.send_update(context, old_ref, new_ref,
            service="scheduler")

    properties = request_spec.get('instance_properties', {})
    payload = dict(request_spec=request_spec,
                   instance_properties=properties,
                   instance_id=instance_uuid,
                   state=vm_states.ERROR,
                   method='run_instance',
                   reason=ex)

    notifier.notify(context, notifier.publisher_id("scheduler"),
                    'scheduler.run_instance', notifier.ERROR, payload)
Example #19
0
def handle_schedule_error(context, ex, instance_uuid, request_spec):
    if not isinstance(ex, exception.NoValidHost):
        LOG.exception(_("Exception during scheduler.run_instance"))
    compute_utils.add_instance_fault_from_exc(context,
            instance_uuid, ex, sys.exc_info())
    state = vm_states.ERROR.upper()
    LOG.warning(_('Setting instance to %(state)s state.'),
                locals(), instance_uuid=instance_uuid)

    # update instance state and notify on the transition
    (old_ref, new_ref) = db.instance_update_and_get_original(context,
            instance_uuid, {'vm_state': vm_states.ERROR,
                            'task_state': None})
    notifications.send_update(context, old_ref, new_ref,
            service="scheduler")

    properties = request_spec.get('instance_properties', {})
    payload = dict(request_spec=request_spec,
                   instance_properties=properties,
                   instance_id=instance_uuid,
                   state=vm_states.ERROR,
                   method='run_instance',
                   reason=ex)

    notifier.notify(context, notifier.publisher_id("scheduler"),
                    'scheduler.run_instance', notifier.ERROR, payload)
Example #20
0
    def _provision_resource(self, context, weighed_host, request_spec,
            filter_properties, requested_networks, injected_files,
            admin_password, is_first_time, instance_uuid=None):
        """Create the requested resource in this Zone."""
        # NOTE(vish): add our current instance back into the request spec
        request_spec['instance_uuids'] = [instance_uuid]
        payload = dict(request_spec=request_spec,
                       weighted_host=weighed_host.to_dict(),
                       instance_id=instance_uuid)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        # Update the metadata if necessary
        scheduler_hints = filter_properties.get('scheduler_hints') or {}
        group = scheduler_hints.get('group', None)
        values = None
        if group:
            values = request_spec['instance_properties']['system_metadata']
            values.update({'group': group})
            values = {'system_metadata': values}

        updated_instance = driver.instance_update_db(context,
                instance_uuid, extra_values=values)

        self._post_select_populate_filter_properties(filter_properties,
                weighed_host.obj)

        self.compute_rpcapi.run_instance(context, instance=updated_instance,
                host=weighed_host.obj.host,
                request_spec=request_spec, filter_properties=filter_properties,
                requested_networks=requested_networks,
                injected_files=injected_files,
                admin_password=admin_password, is_first_time=is_first_time,
                node=weighed_host.obj.nodename)
Example #21
0
def notify(context, message):
    if message["event_type"] != "compute.instance.delete.start":
        LOG.debug("ignoring %s", message["event_type"])
        return
    LOG.info("processing %s", message["event_type"])
    gatherer = initialize_gatherer()

    instance_id = message["payload"]["instance_id"]
    LOG.debug("polling final stats for %r", instance_id)

    # Ask for the instance details
    instance_ref = instance_info_source.instance_get_by_uuid(context, instance_id)

    # Get the default notification payload
    payload = notifications.info_from_instance(context, instance_ref, None, None)

    # Extend the payload with samples from our plugins.  We only need
    # to send some of the data from the counter objects, since a lot
    # of the fields are the same.
    instance = Instance(instance_ref)
    counters = gatherer(instance)
    payload["samples"] = [{"name": c.name, "type": c.type, "unit": c.unit, "volume": c.volume} for c in counters]

    publisher_id = notifier_api.publisher_id("compute", None)

    # We could simply modify the incoming message payload, but we
    # can't be sure that this notifier will be called before the RPC
    # notifier. Modifying the content may also break the message
    # signature. So, we start a new message publishing. We will be
    # called again recursively as a result, but we ignore the event we
    # generate so it doesn't matter.
    notifier_api.notify(context, publisher_id, "compute.instance.delete.samples", notifier_api.INFO, payload)
Example #22
0
    def _provision_resource(self,
                            context,
                            weighted_host,
                            request_spec,
                            filter_properties,
                            requested_networks,
                            injected_files,
                            admin_password,
                            is_first_time,
                            instance_uuid=None):
        """Create the requested resource in this Zone."""
        # Add a retry entry for the selected compute host:
        self._add_retry_host(filter_properties, weighted_host.host_state.host)

        payload = dict(request_spec=request_spec,
                       weighted_host=weighted_host.to_dict(),
                       instance_id=instance_uuid)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        updated_instance = driver.instance_update_db(
            context, instance_uuid, weighted_host.host_state.host)

        self.compute_rpcapi.run_instance(context,
                                         instance=updated_instance,
                                         host=weighted_host.host_state.host,
                                         request_spec=request_spec,
                                         filter_properties=filter_properties,
                                         requested_networks=requested_networks,
                                         injected_files=injected_files,
                                         admin_password=admin_password,
                                         is_first_time=is_first_time)
Example #23
0
def _send_instance_update_notification(context, instance, old_vm_state=None,
            old_task_state=None, new_vm_state=None, new_task_state=None,
            service="compute", host=None):
    """Send 'compute.instance.update' notification to inform observers
    about instance state changes"""

    payload = info_from_instance(context, instance, None, None)

    if not new_vm_state:
        new_vm_state = instance["vm_state"]
    if not new_task_state:
        new_task_state = instance["task_state"]

    states_payload = {
        "old_state": old_vm_state,
        "state": new_vm_state,
        "old_task_state": old_task_state,
        "new_task_state": new_task_state,
    }

    payload.update(states_payload)

    # add audit fields:
    (audit_start, audit_end) = audit_period_bounds(current_period=True)
    payload["audit_period_beginning"] = audit_start
    payload["audit_period_ending"] = audit_end

    # add bw usage info:
    bw = bandwidth_usage(instance, audit_start)
    payload["bandwidth"] = bw

    publisher_id = notifier_api.publisher_id(service, host)

    notifier_api.notify(context, publisher_id, 'compute.instance.update',
            notifier_api.INFO, payload)
Example #24
0
    def _test_set_vm_state_and_notify(self, request_spec, expected_uuids):
        updates = dict(vm_state='fake-vm-state')
        service = 'fake-service'
        method = 'fake-method'
        exc_info = 'exc_info'
        publisher_id = 'fake-publisher-id'

        self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
        self.mox.StubOutWithMock(notifications, 'send_update')
        self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
        self.mox.StubOutWithMock(notifier, 'publisher_id')

        old_ref = 'old_ref'
        new_ref = 'new_ref'

        for uuid in expected_uuids:
            db.instance_update_and_get_original(self.context, uuid,
                                                updates).AndReturn(
                                                    (old_ref, new_ref))
            notifications.send_update(self.context,
                                      old_ref,
                                      new_ref,
                                      service=service)
            compute_utils.add_instance_fault_from_exc(
                self.context, mox.IsA(conductor_api.LocalAPI), new_ref,
                exc_info, mox.IsA(tuple))

            payload = dict(
                request_spec=request_spec,
                instance_properties=request_spec.get('instance_properties'),
                instance_id=uuid,
                state='fake-vm-state',
                method=method,
                reason=exc_info)
            event_type = '%s.%s' % (service, method)
            notifier.publisher_id(service).AndReturn(publisher_id)
            notifier.notify(self.context, publisher_id, event_type,
                            notifier.ERROR, payload)

        self.mox.ReplayAll()

        scheduler_utils.set_vm_state_and_notify(self.context, service, method,
                                                updates, exc_info,
                                                request_spec, db)
Example #25
0
    def process_request(self, request):
        request.environ['HTTP_X_SERVICE_NAME'] = \
            self.service_name or request.host
        payload = {
            'request': self.environ_to_dict(request.environ),
        }

        api.notify(context.get_admin_context(),
                   api.publisher_id(os.path.basename(sys.argv[0])),
                   'http.request', api.INFO, payload)
Example #26
0
def send_api_fault(url, status, exception):
    """Send an api.fault notification."""

    if not CONF.notify_api_faults:
        return

    payload = {'url': url, 'exception': str(exception), 'status': status}

    publisher_id = notifier_api.publisher_id("api")

    notifier_api.notify(None, publisher_id, 'api.fault', notifier_api.ERROR,
                        payload)
Example #27
0
    def _set_vm_state_and_notify(self, method, updates, context, ex,
                                 request_spec):
        """changes VM state and notifies."""
        # FIXME(comstud): Re-factor this somehow. Not sure this belongs in the
        # scheduler manager like this. We should make this easier.
        # run_instance only sends a request_spec, and an instance may or may
        # not have been created in the API (or scheduler) already. If it was
        # created, there's a 'uuid' set in the instance_properties of the
        # request_spec.
        # (littleidea): I refactored this a bit, and I agree
        # it should be easier :)
        # The refactoring could go further but trying to minimize changes
        # for essex timeframe

        LOG.warning(_("Failed to schedule_%(method)s: %(ex)s") % locals())

        vm_state = updates['vm_state']
        properties = request_spec.get('instance_properties', {})
        # NOTE(vish): We shouldn't get here unless we have a catastrophic
        #             failure, so just set all instances to error. if uuid
        #             is not set, instance_uuids will be set to [None], this
        #             is solely to preserve existing behavior and can
        #             be removed along with the 'if instance_uuid:' if we can
        #             verify that uuid is always set.
        uuids = [properties.get('uuid')]
        for instance_uuid in request_spec.get('instance_uuids') or uuids:
            if instance_uuid:
                state = vm_state.upper()
                LOG.warning(_('Setting instance to %(state)s state.'),
                            locals(),
                            instance_uuid=instance_uuid)

                # update instance state and notify on the transition
                (old_ref, new_ref) = self.db.instance_update_and_get_original(
                    context, instance_uuid, updates)
                notifications.send_update(context,
                                          old_ref,
                                          new_ref,
                                          service="scheduler")
                compute_utils.add_instance_fault_from_exc(
                    context, conductor_api.LocalAPI(), new_ref, ex,
                    sys.exc_info())

            payload = dict(request_spec=request_spec,
                           instance_properties=properties,
                           instance_id=instance_uuid,
                           state=vm_state,
                           method=method,
                           reason=ex)

            notifier.notify(context, notifier.publisher_id("scheduler"),
                            'scheduler.' + method, notifier.ERROR, payload)
Example #28
0
 def _notify_NOS_connection_failure(self, context, keypair):
     """
     Send a message to notification about NOS connection failure.
     """
     try:
         LOG.info(_('notify keypairs NOS connection failure'))
         payload = dict(keypair)
         notifier_api.notify(context,
                             notifier_api.publisher_id('api_keypairs'),
                             'api_keypairs.nos_connection_failure',
                             notifier_api.ERROR, payload)
     except Exception:
         LOG.exception(_('notification module error when do notifying '
                         'keypairs connect NOS failed.'))
Example #29
0
    def _set_vm_state_and_notify(self, method, updates, context, ex,
                                 request_spec):
        """changes VM state and notifies"""
        # FIXME(comstud): Re-factor this somehow. Not sure this belongs in the
        # scheduler manager like this. We should make this easier.
        # run_instance only sends a request_spec, and an instance may or may
        # not have been created in the API (or scheduler) already. If it was
        # created, there's a 'uuid' set in the instance_properties of the
        # request_spec.
        # (littleidea): I refactored this a bit, and I agree
        # it should be easier :)
        # The refactoring could go further but trying to minimize changes
        # for essex timeframe

        LOG.warning(_("Failed to schedule_%(method)s: %(ex)s") % locals())

        vm_state = updates['vm_state']
        properties = request_spec.get('instance_properties', {})
        # FIXME(comstud): We really need to move error handling closer
        # to where the errors occur so we can deal with errors on
        # individual instances when scheduling multiple.
        if 'instance_uuids' in request_spec:
            instance_uuid = request_spec['instance_uuids'][0]
        else:
            instance_uuid = properties.get('uuid', {})

        if instance_uuid:
            state = vm_state.upper()
            LOG.warning(_('Setting instance to %(state)s state.'),
                        locals(),
                        instance_uuid=instance_uuid)

            # update instance state and notify on the transition
            (old_ref, new_ref) = db.instance_update_and_get_original(
                context, instance_uuid, updates)
            notifications.send_update(context,
                                      old_ref,
                                      new_ref,
                                      service="scheduler")

        payload = dict(request_spec=request_spec,
                       instance_properties=properties,
                       instance_id=instance_uuid,
                       state=vm_state,
                       method=method,
                       reason=ex)

        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.' + method, notifier.ERROR, payload)
Example #30
0
def set_vm_state_and_notify(context, service, method, updates, ex,
                            request_spec, db):
    """changes VM state and notifies."""
    LOG.warning(_("Failed to %(service)s_%(method)s: %(ex)s"), {
        'service': service,
        'method': method,
        'ex': ex
    })

    vm_state = updates['vm_state']
    properties = request_spec.get('instance_properties', {})
    # NOTE(vish): We shouldn't get here unless we have a catastrophic
    #             failure, so just set all instances to error. if uuid
    #             is not set, instance_uuids will be set to [None], this
    #             is solely to preserve existing behavior and can
    #             be removed along with the 'if instance_uuid:' if we can
    #             verify that uuid is always set.
    uuids = [properties.get('uuid')]
    from nova.conductor import api as conductor_api
    for instance_uuid in request_spec.get('instance_uuids') or uuids:
        if instance_uuid:
            state = vm_state.upper()
            LOG.warning(_('Setting instance to %s state.'),
                        state,
                        instance_uuid=instance_uuid)

            # update instance state and notify on the transition
            (old_ref, new_ref) = db.instance_update_and_get_original(
                context, instance_uuid, updates)
            notifications.send_update(context,
                                      old_ref,
                                      new_ref,
                                      service=service)
            compute_utils.add_instance_fault_from_exc(context,
                                                      conductor_api.LocalAPI(),
                                                      new_ref, ex,
                                                      sys.exc_info())

        payload = dict(request_spec=request_spec,
                       instance_properties=properties,
                       instance_id=instance_uuid,
                       state=vm_state,
                       method=method,
                       reason=ex)

        event_type = '%s.%s' % (service, method)
        notifier.notify(context, notifier.publisher_id(service), event_type,
                        notifier.ERROR, payload)
Example #31
0
    def deallocate_floating_ip(self,
                               context,
                               address,
                               affect_auto_assigned=False):
        """Returns a floating ip to the pool."""
        floating_ip = self.db.floating_ip_get_by_address(context, address)

        # handle auto_assigned
        if not affect_auto_assigned and floating_ip.get('auto_assigned'):
            return
        use_quota = not floating_ip.get('auto_assigned')

        # make sure project owns this floating ip (allocated)
        self._floating_ip_owned_by_project(context, floating_ip)

        # make sure floating ip is not associated
        if floating_ip['fixed_ip_id']:
            floating_address = floating_ip['address']
            raise exception.FloatingIpAssociated(address=floating_address)

        # clean up any associated DNS entries
        self._delete_all_entries_for_ip(context, floating_ip['address'])
        payload = dict(project_id=floating_ip['project_id'],
                       floating_ip=floating_ip['address'])
        notifier.notify(context,
                        notifier.publisher_id("network"),
                        'network.floating_ip.deallocate',
                        notifier.INFO,
                        payload=payload)

        # Get reservations...
        try:
            if use_quota:
                reservations = QUOTAS.reserve(context, floating_ips=-1)
            else:
                reservations = None
        except Exception:
            reservations = None
            LOG.exception(
                _("Failed to update usages deallocating "
                  "floating IP"))

        self.db.floating_ip_deallocate(context, address)

        # Commit the reservations
        if reservations:
            QUOTAS.commit(context, reservations)
Example #32
0
def notify(context, message):
    if message['event_type'] != 'compute.instance.delete.start':
        LOG.debug(_('ignoring %s'), message['event_type'])
        return
    LOG.info(_('processing %s'), message['event_type'])
    gatherer = initialize_gatherer()

    instance_id = message['payload']['instance_id']
    LOG.debug(_('polling final stats for %r'), instance_id)

    # Ask for the instance details
    instance_ref = conductor_api.instance_get_by_uuid(
        context,
        instance_id,
    )

    # Get the default notification payload
    payload = notifications.info_from_instance(context, instance_ref, None,
                                               None)

    # Extend the payload with samples from our plugins.  We only need
    # to send some of the data from the sample objects, since a lot
    # of the fields are the same.
    instance = Instance(context, instance_ref)
    samples = gatherer(instance)
    payload['samples'] = [{
        'name': s.name,
        'type': s.type,
        'unit': s.unit,
        'volume': s.volume
    } for s in samples]

    publisher_id = notifier_api.publisher_id('compute', None)

    # We could simply modify the incoming message payload, but we
    # can't be sure that this notifier will be called before the RPC
    # notifier. Modifying the content may also break the message
    # signature. So, we start a new message publishing. We will be
    # called again recursively as a result, but we ignore the event we
    # generate so it doesn't matter.
    notifier_api.notify(context, publisher_id,
                        'compute.instance.delete.samples', notifier_api.INFO,
                        payload)
Example #33
0
def _send_instance_update_notification(context,
                                       instance,
                                       old_vm_state,
                                       old_task_state,
                                       new_vm_state,
                                       new_task_state,
                                       service=None,
                                       host=None):
    """Send 'compute.instance.update' notification to inform observers
    about instance state changes"""

    payload = usage_from_instance(context, instance, None, None)

    states_payload = {
        "old_state": old_vm_state,
        "state": new_vm_state,
        "old_task_state": old_task_state,
        "new_task_state": new_task_state,
    }

    payload.update(states_payload)

    # add audit fields:
    (audit_start, audit_end) = audit_period_bounds(current_period=True)
    payload["audit_period_beginning"] = audit_start
    payload["audit_period_ending"] = audit_end

    # add bw usage info:
    bw = bandwidth_usage(instance, audit_start)
    payload["bandwidth"] = bw

    # if the service name (e.g. api/scheduler/compute) is not provided, default
    # to "compute"
    if not service:
        service = "compute"

    publisher_id = notifier_api.publisher_id(service, host)

    notifier_api.notify(context, publisher_id, 'compute.instance.update',
                        notifier_api.INFO, payload)
Example #34
0
    def _provision_resource(self, context, weighted_host, request_spec,
                            reservations, filter_properties,
                            requested_networks, injected_files, admin_password,
                            is_first_time):
        """Create the requested resource in this Zone."""
        instance = self.create_instance_db_entry(context, request_spec,
                                                 reservations)

        # Add a retry entry for the selected compute host:
        self._add_retry_host(filter_properties, weighted_host.host_state.host)

        payload = dict(request_spec=request_spec,
                       weighted_host=weighted_host.to_dict(),
                       instance_id=instance['uuid'])
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        updated_instance = driver.instance_update_db(
            context, instance['uuid'], weighted_host.host_state.host)

        self.compute_rpcapi.run_instance(context,
                                         instance=updated_instance,
                                         host=weighted_host.host_state.host,
                                         request_spec=request_spec,
                                         filter_properties=filter_properties,
                                         requested_networks=requested_networks,
                                         injected_files=injected_files,
                                         admin_password=admin_password,
                                         is_first_time=is_first_time)

        inst = driver.encode_instance(updated_instance, local=True)

        # So if another instance is created, create_instance_db_entry will
        # actually create a new entry, instead of assume it's been created
        # already
        del request_spec['instance_properties']['uuid']

        return inst
Example #35
0
    def process_response(self,
                         request,
                         response,
                         exception=None,
                         traceback=None):
        payload = {
            'request': self.environ_to_dict(request.environ),
        }

        if response:
            payload['response'] = {
                'status': response.status,
                'headers': response.headers,
            }

        if exception:
            payload['exception'] = {
                'value': repr(exception),
                'traceback': tb.format_tb(traceback)
            }

        api.notify(context.get_admin_context(),
                   api.publisher_id(os.path.basename(sys.argv[0])),
                   'http.response', api.INFO, payload)
Example #36
0
    def _provision_resource(self, context, weighted_host, request_spec,
            filter_properties, requested_networks, injected_files,
            admin_password, is_first_time, instance_uuid=None):
        """Create the requested resource in this Zone."""
        # Add a retry entry for the selected compute host:
        self._add_retry_host(filter_properties, weighted_host.host_state.host)

        self._add_oversubscription_policy(filter_properties,
                weighted_host.host_state)

        payload = dict(request_spec=request_spec,
                       weighted_host=weighted_host.to_dict(),
                       instance_id=instance_uuid)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        if weighted_host.host_state.nodename is not None:
            smd_dic = db.instance_system_metadata_get(context, instance_uuid)
            smd_dic['node'] = weighted_host.host_state.nodename
        else:
            # update is not needed
            smd_dic = None

        updated_instance = driver.instance_update_db(context,
                instance_uuid, weighted_host.host_state.host,
                system_metadata=smd_dic)
        # Ensure system_metadata is loaded and included in rpc payload
        updated_instance.get('system_metadata')

        self.compute_rpcapi.run_instance(context, instance=updated_instance,
                host=weighted_host.host_state.host,
                request_spec=request_spec, filter_properties=filter_properties,
                requested_networks=requested_networks,
                injected_files=injected_files,
                admin_password=admin_password, is_first_time=is_first_time)
Example #37
0
    def schedule_run_instance(self, context, request_spec, admin_password,
                              injected_files, requested_networks,
                              is_first_time, filter_properties):
        """This method is called from nova.compute.api to provision
        an instance.  We first create a build plan (a list of WeightedHosts)
        and then provision.

        Returns a list of the instances created.
        """
        payload = dict(request_spec=request_spec)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.start', notifier.INFO, payload)

        instance_uuids = request_spec.get('instance_uuids')
        LOG.info(
            _("Attempting to build %(num_instances)d instance(s) "
              "uuids: %(instance_uuids)s"), {
                  'num_instances': len(instance_uuids),
                  'instance_uuids': instance_uuids
              })
        LOG.debug(_("Request Spec: %s") % request_spec)

        weighed_hosts = self._schedule(context, request_spec,
                                       filter_properties, instance_uuids)

        # NOTE: Pop instance_uuids as individual creates do not need the
        # set of uuids. Do not pop before here as the upper exception
        # handler fo NoValidHost needs the uuid to set error state
        instance_uuids = request_spec.pop('instance_uuids')

        # NOTE(comstud): Make sure we do not pass this through.  It
        # contains an instance of RpcContext that cannot be serialized.
        filter_properties.pop('context', None)

        for num, instance_uuid in enumerate(instance_uuids):
            request_spec['instance_properties']['launch_index'] = num

            try:
                try:
                    weighed_host = weighed_hosts.pop(0)
                    LOG.info(
                        _("Choosing host %(weighed_host)s "
                          "for instance %(instance_uuid)s"), {
                              'weighed_host': weighed_host,
                              'instance_uuid': instance_uuid
                          })
                except IndexError:
                    raise exception.NoValidHost(reason="")

                self._provision_resource(context,
                                         weighed_host,
                                         request_spec,
                                         filter_properties,
                                         requested_networks,
                                         injected_files,
                                         admin_password,
                                         is_first_time,
                                         instance_uuid=instance_uuid)
            except Exception as ex:
                # NOTE(vish): we don't reraise the exception here to make sure
                #             that all instances in the request get set to
                #             error properly
                driver.handle_schedule_error(context, ex, instance_uuid,
                                             request_spec)
            # scrub retry host list in case we're scheduling multiple
            # instances:
            retry = filter_properties.get('retry', {})
            retry['hosts'] = []

        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.end', notifier.INFO, payload)