Exemple #1
0
    def object_action(self, context, objinst, objmethod, args, kwargs):
        """Perform an action on an object."""

        LOG.info(
            trace_event.event('conductor.object_action.start', None, context,
                              ""))
        oldobj = objinst.obj_clone()
        result = self._object_dispatch(objinst, objmethod, args, kwargs)
        updates = dict()
        # NOTE(danms): Diff the object with the one passed to us and
        # generate a list of changes to forward back
        for name, field in objinst.fields.items():
            if not objinst.obj_attr_is_set(name):
                # Avoid demand-loading anything
                continue
            if (not oldobj.obj_attr_is_set(name)
                    or getattr(oldobj, name) != getattr(objinst, name)):
                updates[name] = field.to_primitive(objinst, name,
                                                   getattr(objinst, name))
        # This is safe since a field named this would conflict with the
        # method anyway
        updates['obj_what_changed'] = objinst.obj_what_changed()
        LOG.info(
            trace_event.event('conductor.object_action.end', None, context,
                              ""))
        return updates, result
    def provider_fw_rule_get_all(self, context):
    	#msg="{'event_type':'conductor.provider_fw_rule.start','request_id':'"+str(context.request_id)+"','vmname':''}"
	    #LOG.info("lttng_trace:"+msg)
        LOG.info(trace_event.event('conductor.provider_fw_rule.start',None,context,""))
        rules = self.db.provider_fw_rule_get_all(context)
        #msg="{'event_type':'conductor.provider_fw_rule.end','request_id':'"+str(context.request_id)+"','vmname':''}"
	    #LOG.info("lttng_trace:"+msg)
        LOG.info(trace_event.event('conductor.provider_fw_rule.end',None,context,""))

        return jsonutils.to_primitive(rules)
    def select_destinations(self, context, request_spec, filter_properties):
        """Returns destinations(s) best suited for this request_spec and
        filter_properties.

        The result should be a list of dicts with 'host', 'nodename' and
        'limits' as keys.
        """
        LOG.info(trace_event.event('scheduler.select_destinations.start',None,context,''))
        dests = self.driver.select_destinations(context, request_spec,
            filter_properties)
        LOG.info(trace_event.event('scheduler.select_destinations.end',None,context,''))
        return jsonutils.to_primitive(dests)
Exemple #4
0
    def provider_fw_rule_get_all(self, context):
        #msg="{'event_type':'conductor.provider_fw_rule.start','request_id':'"+str(context.request_id)+"','vmname':''}"
        #LOG.info("lttng_trace:"+msg)
        LOG.info(
            trace_event.event('conductor.provider_fw_rule.start', None,
                              context, ""))
        rules = self.db.provider_fw_rule_get_all(context)
        #msg="{'event_type':'conductor.provider_fw_rule.end','request_id':'"+str(context.request_id)+"','vmname':''}"
        #LOG.info("lttng_trace:"+msg)
        LOG.info(
            trace_event.event('conductor.provider_fw_rule.end', None, context,
                              ""))

        return jsonutils.to_primitive(rules)
Exemple #5
0
    def select_destinations(self, context, request_spec, filter_properties):
        """Returns destinations(s) best suited for this request_spec and
        filter_properties.

        The result should be a list of dicts with 'host', 'nodename' and
        'limits' as keys.
        """
        LOG.info(
            trace_event.event('scheduler.select_destinations.start', None,
                              context, ''))
        dests = self.driver.select_destinations(context, request_spec,
                                                filter_properties)
        LOG.info(
            trace_event.event('scheduler.select_destinations.end', None,
                              context, ''))
        return jsonutils.to_primitive(dests)
    def object_action(self, context, objinst, objmethod, args, kwargs):
        """Perform an action on an object."""

        LOG.info(trace_event.event('conductor.object_action.start',None,context,""))
        oldobj = objinst.obj_clone()
        result = self._object_dispatch(objinst, objmethod, args, kwargs)
        updates = dict()
        # NOTE(danms): Diff the object with the one passed to us and
        # generate a list of changes to forward back
        for name, field in objinst.fields.items():
            if not objinst.obj_attr_is_set(name):
                # Avoid demand-loading anything
                continue
            if (not oldobj.obj_attr_is_set(name) or
                    getattr(oldobj, name) != getattr(objinst, name)):
                updates[name] = field.to_primitive(objinst, name,
                                                   getattr(objinst, name))
        # This is safe since a field named this would conflict with the
        # method anyway
        updates['obj_what_changed'] = objinst.obj_what_changed()
        LOG.info(trace_event.event('conductor.object_action.end',None,context,""))
        return updates, result
Exemple #7
0
    def rebuild_instance(self,
                         context,
                         instance,
                         orig_image_ref,
                         image_ref,
                         injected_files,
                         new_pass,
                         orig_sys_metadata,
                         bdms,
                         recreate,
                         on_shared_storage,
                         preserve_ephemeral=False,
                         host=None):
        LOG.info(
            trace_event.event('conductor.rebuild_instance.start', instance,
                              context, ""))
        with compute_utils.EventReporter(context, 'rebuild_server',
                                         instance.uuid):
            node = limits = None
            if not host:
                # NOTE(lcostantino): Retrieve scheduler filters for the
                # instance when the feature is available
                filter_properties = {'ignore_hosts': [instance.host]}
                request_spec = scheduler_utils.build_request_spec(
                    context, image_ref, [instance])
                try:
                    scheduler_utils.setup_instance_group(
                        context, request_spec, filter_properties)
                    hosts = self.scheduler_client.select_destinations(
                        context, request_spec, filter_properties)
                    host_dict = hosts.pop(0)
                    host, node, limits = (host_dict['host'],
                                          host_dict['nodename'],
                                          host_dict['limits'])
                except exception.NoValidHost as ex:
                    with excutils.save_and_reraise_exception():
                        self._set_vm_state_and_notify(
                            context, instance.uuid, 'rebuild_server', {
                                'vm_state': instance.vm_state,
                                'task_state': None
                            }, ex, request_spec)
                        LOG.warning(_LW("No valid host found for rebuild"),
                                    instance=instance)
                except exception.UnsupportedPolicyException as ex:
                    with excutils.save_and_reraise_exception():
                        self._set_vm_state_and_notify(
                            context, instance.uuid, 'rebuild_server', {
                                'vm_state': instance.vm_state,
                                'task_state': None
                            }, ex, request_spec)
                        LOG.warning(_LW("Server with unsupported policy "
                                        "cannot be rebuilt"),
                                    instance=instance)

            try:
                migration = objects.Migration.get_by_instance_and_status(
                    context, instance.uuid, 'accepted')
            except exception.MigrationNotFoundByStatus:
                LOG.debug(
                    "No migration record for the rebuild/evacuate "
                    "request.",
                    instance=instance)
                migration = None

            compute_utils.notify_about_instance_usage(self.notifier, context,
                                                      instance,
                                                      "rebuild.scheduled")

            self.compute_rpcapi.rebuild_instance(
                context,
                instance=instance,
                new_pass=new_pass,
                injected_files=injected_files,
                image_ref=image_ref,
                orig_image_ref=orig_image_ref,
                orig_sys_metadata=orig_sys_metadata,
                bdms=bdms,
                recreate=recreate,
                on_shared_storage=on_shared_storage,
                preserve_ephemeral=preserve_ephemeral,
                migration=migration,
                host=host,
                node=node,
                limits=limits)
        LOG.info(
            trace_event.event('conductor.rebuild_instance.end', instance,
                              context, ""))
Exemple #8
0
    def build_instances(self,
                        context,
                        instances,
                        image,
                        filter_properties,
                        admin_password,
                        injected_files,
                        requested_networks,
                        security_groups,
                        block_device_mapping=None,
                        legacy_bdm=True):
        # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
        #                 2.0 of the RPC API.
        #msg="{'event_type':'conductor.build_instance.start','request_id':'"+str(context.request_id)+"','vmname':''}"
        # LOG.info("lttng_trace:"+msg)
        LOG.info(
            trace_event.event('conductor.build_instances.start', None, context,
                              ""))
        request_spec = scheduler_utils.build_request_spec(
            context, image, instances)
        # TODO(danms): Remove this in version 2.0 of the RPC API
        if (requested_networks and not isinstance(requested_networks,
                                                  objects.NetworkRequestList)):
            requested_networks = objects.NetworkRequestList(objects=[
                objects.NetworkRequest.from_tuple(t)
                for t in requested_networks
            ])
        # TODO(melwitt): Remove this in version 2.0 of the RPC API
        flavor = filter_properties.get('instance_type')
        if flavor and not isinstance(flavor, objects.Flavor):
            # Code downstream may expect extra_specs to be populated since it
            # is receiving an object, so lookup the flavor to ensure this.
            flavor = objects.Flavor.get_by_id(context, flavor['id'])
            filter_properties = dict(filter_properties, instance_type=flavor)

        try:
            scheduler_utils.setup_instance_group(context, request_spec,
                                                 filter_properties)
            # check retry policy. Rather ugly use of instances[0]...
            # but if we've exceeded max retries... then we really only
            # have a single instance.
            scheduler_utils.populate_retry(filter_properties,
                                           instances[0].uuid)
            hosts = self.scheduler_client.select_destinations(
                context, request_spec, filter_properties)
        except Exception as exc:
            updates = {'vm_state': vm_states.ERROR, 'task_state': None}
            for instance in instances:
                self._set_vm_state_and_notify(context, instance.uuid,
                                              'build_instances', updates, exc,
                                              request_spec)
            return

        for (instance, host) in itertools.izip(instances, hosts):
            try:
                instance.refresh()
            except (exception.InstanceNotFound,
                    exception.InstanceInfoCacheNotFound):
                LOG.debug('Instance deleted during build', instance=instance)
                continue
            local_filter_props = copy.deepcopy(filter_properties)
            scheduler_utils.populate_filter_properties(local_filter_props,
                                                       host)
            # The block_device_mapping passed from the api doesn't contain
            # instance specific information
            bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
                context, instance.uuid)

            self.compute_rpcapi.build_and_run_instance(
                context,
                instance=instance,
                host=host['host'],
                image=image,
                request_spec=request_spec,
                filter_properties=local_filter_props,
                admin_password=admin_password,
                injected_files=injected_files,
                requested_networks=requested_networks,
                security_groups=security_groups,
                block_device_mapping=bdms,
                node=host['nodename'],
                limits=host['limits'])
            LOG.info(
                trace_event.event('conductor.build_instances.end', None,
                                  context, ""))
    def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
                         injected_files, new_pass, orig_sys_metadata,
                         bdms, recreate, on_shared_storage,
                         preserve_ephemeral=False, host=None):
        LOG.info(trace_event.event('conductor.rebuild_instance.start',instance,context,""))
        with compute_utils.EventReporter(context, 'rebuild_server',
                                          instance.uuid):
            node = limits = None
            if not host:
                # NOTE(lcostantino): Retrieve scheduler filters for the
                # instance when the feature is available
                filter_properties = {'ignore_hosts': [instance.host]}
                request_spec = scheduler_utils.build_request_spec(context,
                                                                  image_ref,
                                                                  [instance])
                try:
                    scheduler_utils.setup_instance_group(context, request_spec,
                                                         filter_properties)
                    hosts = self.scheduler_client.select_destinations(context,
                                                            request_spec,
                                                            filter_properties)
                    host_dict = hosts.pop(0)
                    host, node, limits = (host_dict['host'],
                                          host_dict['nodename'],
                                          host_dict['limits'])
                except exception.NoValidHost as ex:
                    with excutils.save_and_reraise_exception():
                        self._set_vm_state_and_notify(context, instance.uuid,
                                'rebuild_server',
                                {'vm_state': instance.vm_state,
                                 'task_state': None}, ex, request_spec)
                        LOG.warning(_LW("No valid host found for rebuild"),
                                    instance=instance)
                except exception.UnsupportedPolicyException as ex:
                    with excutils.save_and_reraise_exception():
                        self._set_vm_state_and_notify(context, instance.uuid,
                                'rebuild_server',
                                {'vm_state': instance.vm_state,
                                 'task_state': None}, ex, request_spec)
                        LOG.warning(_LW("Server with unsupported policy "
                                        "cannot be rebuilt"),
                                    instance=instance)

            try:
                migration = objects.Migration.get_by_instance_and_status(
                    context, instance.uuid, 'accepted')
            except exception.MigrationNotFoundByStatus:
                LOG.debug("No migration record for the rebuild/evacuate "
                          "request.", instance=instance)
                migration = None

            compute_utils.notify_about_instance_usage(
                self.notifier, context, instance, "rebuild.scheduled")

            self.compute_rpcapi.rebuild_instance(context,
                    instance=instance,
                    new_pass=new_pass,
                    injected_files=injected_files,
                    image_ref=image_ref,
                    orig_image_ref=orig_image_ref,
                    orig_sys_metadata=orig_sys_metadata,
                    bdms=bdms,
                    recreate=recreate,
                    on_shared_storage=on_shared_storage,
                    preserve_ephemeral=preserve_ephemeral,
                    migration=migration,
                    host=host, node=node, limits=limits)
        LOG.info(trace_event.event('conductor.rebuild_instance.end',instance,context,""))
    def build_instances(self, context, instances, image, filter_properties,
            admin_password, injected_files, requested_networks,
            security_groups, block_device_mapping=None, legacy_bdm=True):
        # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
        #                 2.0 of the RPC API.
        #msg="{'event_type':'conductor.build_instance.start','request_id':'"+str(context.request_id)+"','vmname':''}"
	   # LOG.info("lttng_trace:"+msg)
        LOG.info(trace_event.event('conductor.build_instances.start',None,context,""))
        request_spec = scheduler_utils.build_request_spec(context, image,
                                                          instances)
        # TODO(danms): Remove this in version 2.0 of the RPC API
        if (requested_networks and
                not isinstance(requested_networks,
                               objects.NetworkRequestList)):
            requested_networks = objects.NetworkRequestList(
                objects=[objects.NetworkRequest.from_tuple(t)
                         for t in requested_networks])
        # TODO(melwitt): Remove this in version 2.0 of the RPC API
        flavor = filter_properties.get('instance_type')
        if flavor and not isinstance(flavor, objects.Flavor):
            # Code downstream may expect extra_specs to be populated since it
            # is receiving an object, so lookup the flavor to ensure this.
            flavor = objects.Flavor.get_by_id(context, flavor['id'])
            filter_properties = dict(filter_properties, instance_type=flavor)

        try:
            scheduler_utils.setup_instance_group(context, request_spec,
                                                 filter_properties)
            # check retry policy. Rather ugly use of instances[0]...
            # but if we've exceeded max retries... then we really only
            # have a single instance.
            scheduler_utils.populate_retry(filter_properties,
                instances[0].uuid)
            hosts = self.scheduler_client.select_destinations(context,
                    request_spec, filter_properties)
        except Exception as exc:
            updates = {'vm_state': vm_states.ERROR, 'task_state': None}
            for instance in instances:
                self._set_vm_state_and_notify(
                    context, instance.uuid, 'build_instances', updates,
                    exc, request_spec)
            return

        for (instance, host) in itertools.izip(instances, hosts):
            try:
                instance.refresh()
            except (exception.InstanceNotFound,
                    exception.InstanceInfoCacheNotFound):
                LOG.debug('Instance deleted during build', instance=instance)
                continue
            local_filter_props = copy.deepcopy(filter_properties)
            scheduler_utils.populate_filter_properties(local_filter_props,
                host)
            # The block_device_mapping passed from the api doesn't contain
            # instance specific information
            bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
                    context, instance.uuid)

            self.compute_rpcapi.build_and_run_instance(context,
                    instance=instance, host=host['host'], image=image,
                    request_spec=request_spec,
                    filter_properties=local_filter_props,
                    admin_password=admin_password,
                    injected_files=injected_files,
                    requested_networks=requested_networks,
                    security_groups=security_groups,
                    block_device_mapping=bdms, node=host['nodename'],
                    limits=host['limits'])
	    LOG.info(trace_event.event('conductor.build_instances.end',None,context,""))