Esempio n. 1
0
    def _parse_node_properties(self, node):
        """Helper method to parse the node's properties."""
        properties = {}

        for prop in ('cpus', 'memory_mb', 'local_gb'):
            try:
                properties[prop] = int(node.properties.get(prop, 0))
            except (TypeError, ValueError):
                LOG.warning(_LW('Node %(uuid)s has a malformed "%(prop)s". '
                                'It should be an integer.'),
                            {'uuid': node.uuid, 'prop': prop})
                properties[prop] = 0

        raw_cpu_arch = node.properties.get('cpu_arch', None)
        try:
            cpu_arch = arch.canonicalize(raw_cpu_arch)
        except exception.InvalidArchitectureName:
            cpu_arch = None
        if not cpu_arch:
            LOG.warning(_LW("cpu_arch not defined for node '%s'"), node.uuid)

        properties['cpu_arch'] = cpu_arch
        properties['raw_cpu_arch'] = raw_cpu_arch
        properties['capabilities'] = node.properties.get('capabilities')
        return properties
Esempio n. 2
0
    def detach(self,
               context,
               volume_id,
               instance_uuid=None,
               attachment_id=None):
        if attachment_id is None:
            volume = self.get(context, volume_id)
            if volume['multiattach']:
                attachments = volume.get('attachments', {})
                if instance_uuid:
                    attachment_id = attachments.get(instance_uuid, {}).\
                            get('attachment_id')
                    if not attachment_id:
                        LOG.warning(
                            _LW("attachment_id couldn't be retrieved "
                                "for volume %(volume_id)s with "
                                "instance_uuid %(instance_id)s. The "
                                "volume has the 'multiattach' flag "
                                "enabled, without the attachment_id "
                                "Cinder most probably cannot perform "
                                "the detach."), {
                                    'volume_id': volume_id,
                                    'instance_id': instance_uuid
                                })
                else:
                    LOG.warning(
                        _LW("attachment_id couldn't be retrieved for "
                            "volume %(volume_id)s. The volume has the "
                            "'multiattach' flag enabled, without the "
                            "attachment_id Cinder most probably "
                            "cannot perform the detach."),
                        {'volume_id': volume_id})

        cinderclient(context).volumes.detach(volume_id, attachment_id)
Esempio n. 3
0
    def get_info(self, instance):
        """Get the current state and resource usage for this instance.

        If the instance is not found this method returns (a dictionary
        with) NOSTATE and all resources == 0.

        :param instance: the instance object.
        :returns: a InstanceInfo object
        """
        try:
            node = self._validate_instance_and_node(instance)
        except exception.InstanceNotFound:
            return hardware.InstanceInfo(
                state=map_power_state(ironic_states.NOSTATE))

        properties = self._parse_node_properties(node)
        memory_kib = properties['memory_mb'] * 1024
        if memory_kib == 0:
            LOG.warning(_LW("Warning, memory usage is 0 for "
                            "%(instance)s on baremetal node %(node)s."),
                        {'instance': instance.uuid,
                         'node': instance.node})

        num_cpu = properties['cpus']
        if num_cpu == 0:
            LOG.warning(_LW("Warning, number of cpus is 0 for "
                            "%(instance)s on baremetal node %(node)s."),
                        {'instance': instance.uuid,
                         'node': instance.node})

        return hardware.InstanceInfo(state=map_power_state(node.power_state),
                                     max_mem_kb=memory_kib,
                                     mem_kb=memory_kib,
                                     num_cpu=num_cpu)
Esempio n. 4
0
    def fetch_server(self, server_id):
        """Fetch fresh server object from Nova.

        Log warnings and return None for non-critical API errors.
        Use this method in various ``check_*_complete`` resource methods,
        where intermittent errors can be tolerated.
        """
        server = None
        try:
            server = self.client().servers.get(server_id)
        except exceptions.OverLimit as exc:
            LOG.warning(_LW("Received an OverLimit response when "
                            "fetching server (%(id)s) : %(exception)s"),
                        {'id': server_id,
                         'exception': exc})
        except exceptions.ClientException as exc:
            if ((getattr(exc, 'http_status', getattr(exc, 'code', None)) in
                     (500, 503))):
                LOG.warning(_LW("Received the following exception when "
                                "fetching server (%(id)s) : %(exception)s"),
                            {'id': server_id,
                             'exception': exc})
            else:
                raise
        return server
Esempio n. 5
0
    def detach(self, context, volume_id, instance_uuid=None,
               attachment_id=None):
        if attachment_id is None:
            volume = self.get(context, volume_id)
            if volume['multiattach']:
                attachments = volume.get('attachments', {})
                if instance_uuid:
                    attachment_id = attachments.get(instance_uuid, {}).\
                            get('attachment_id')
                    if not attachment_id:
                        LOG.warning(_LW("attachment_id couldn't be retrieved "
                                        "for volume %(volume_id)s with "
                                        "instance_uuid %(instance_id)s. The "
                                        "volume has the 'multiattach' flag "
                                        "enabled, without the attachment_id "
                                        "Cinder most probably cannot perform "
                                        "the detach."),
                                    {'volume_id': volume_id,
                                     'instance_id': instance_uuid})
                else:
                    LOG.warning(_LW("attachment_id couldn't be retrieved for "
                                    "volume %(volume_id)s. The volume has the "
                                    "'multiattach' flag enabled, without the "
                                    "attachment_id Cinder most probably "
                                    "cannot perform the detach."),
                                {'volume_id': volume_id})

        cinderclient(context).volumes.detach(volume_id, attachment_id)
Esempio n. 6
0
def set_vm_state_and_notify(context, instance_uuid, service, method, updates,
                            ex, request_spec):
    """changes VM state and notifies."""
    LOG.warning(_LW("Failed to %(service)s_%(method)s: %(ex)s"),
                {'service': service, 'method': method, 'ex': ex})

    vm_state = updates['vm_state']
    properties = request_spec.get('instance_properties', {})
    # NOTE(vish): We shouldn't get here unless we have a catastrophic
    #             failure, so just set the instance to its internal state
    notifier = rpc.get_notifier(service)
    state = vm_state.upper()
    LOG.warning(_LW('Setting instance to %s state.'), state,
                instance_uuid=instance_uuid)

    instance = objects.Instance(context=context, uuid=instance_uuid,
                                **updates)
    instance.obj_reset_changes(['uuid'])
    instance.save()
    compute_utils.add_instance_fault_from_exc(context,
            instance, ex, sys.exc_info())

    payload = dict(request_spec=request_spec,
                    instance_properties=properties,
                    instance_id=instance_uuid,
                    state=vm_state,
                    method=method,
                    reason=ex)

    event_type = '%s.%s' % (service, method)
    notifier.error(context, event_type, payload)
Esempio n. 7
0
    def authorize_console(self,
                          context,
                          token,
                          console_type,
                          host,
                          port,
                          internal_access_path,
                          instance_uuid,
                          access_url=None):

        token_dict = {
            'token': token,
            'instance_uuid': instance_uuid,
            'console_type': console_type,
            'host': host,
            'port': port,
            'internal_access_path': internal_access_path,
            'access_url': access_url,
            'last_activity_at': time.time()
        }
        data = jsonutils.dumps(token_dict)

        # We need to log the warning message if the token is not cached
        # successfully, because the failure will cause the console for
        # instance to not be usable.
        if not self.mc.set(token.encode('UTF-8'), data):
            LOG.warning(_LW("Token: %(token)s failed to save into memcached."),
                        {'token': token})
        tokens = self._get_tokens_for_instance(instance_uuid)

        # Remove the expired tokens from cache.
        token_values = self.mc.get_multi(
            [tok.encode('UTF-8') for tok in tokens])
        tokens = [
            name for name, value in zip(tokens, token_values)
            if value is not None
        ]
        tokens.append(token)

        if not self.mc_instance.set(instance_uuid.encode('UTF-8'),
                                    jsonutils.dumps(tokens)):
            LOG.warning(
                _LW("Instance: %(instance_uuid)s failed to save "
                    "into memcached"), {'instance_uuid': instance_uuid})

        LOG.info(_LI("Received Token: %(token)s, %(token_dict)s"), {
            'token': token,
            'token_dict': token_dict
        })
Esempio n. 8
0
        def do_associate():
            # associate floating ip
            floating = objects.FloatingIP.associate(context, floating_address,
                                                    fixed_address, self.host)
            fixed = floating.fixed_ip
            if not fixed:
                # NOTE(vish): ip was already associated
                return
            try:
                # gogo driver time
                self.l3driver.add_floating_ip(floating_address, fixed_address,
                                              interface, fixed['network'])
            except processutils.ProcessExecutionError as e:
                with excutils.save_and_reraise_exception():
                    try:
                        objects.FloatingIP.disassociate(
                            context, floating_address)
                    except Exception:
                        LOG.warning(
                            _LW('Failed to disassociated floating '
                                'address: %s'), floating_address)
                        pass
                    if "Cannot find device" in six.text_type(e):
                        try:
                            LOG.error(_LE('Interface %s not found'), interface)
                        except Exception:
                            pass
                        raise exception.NoFloatingIpInterface(
                            interface=interface)

            payload = dict(project_id=context.project_id,
                           instance_id=instance_uuid,
                           floating_ip=floating_address)
            self.notifier.info(context, 'network.floating_ip.associate',
                               payload)
Esempio n. 9
0
    def _report_state(self, service):
        """Update the state of this service in the datastore."""

        try:
            service.service_ref.report_count += 1
            service.service_ref.save()

            # TODO(termie): make this pattern be more elegant.
            if getattr(service, 'model_disconnected', False):
                service.model_disconnected = False
                LOG.info(
                    _LI('Recovered from being unable to report status.'))
        except messaging.MessagingTimeout:
            # NOTE(johngarbutt) during upgrade we will see messaging timeouts
            # as compute-conductor is restarted, so only log this error once.
            if not getattr(service, 'model_disconnected', False):
                service.model_disconnected = True
                LOG.warn(_LW('Lost connection to compute-conductor '
                             'for reporting service status.'))
        except Exception:
            # NOTE(rpodolyaka): we'd like to avoid catching of all possible
            # exceptions here, but otherwise it would become possible for
            # the state reporting thread to stop abruptly, and thus leave
            # the service unusable until it's restarted.
            LOG.exception(
                _LE('Unexpected error while reporting service status'))
            # trigger the recovery log message, if this error goes away
            service.model_disconnected = True
Esempio n. 10
0
 def __init__(self, scheduler_driver=None, *args, **kwargs):
     if not scheduler_driver:
         scheduler_driver = CONF.compute_scheduler_driver
     try:
         self.driver = driver.DriverManager(
             "jacket.compute.scheduler.driver",
             scheduler_driver,
             invoke_on_load=True).driver
     # TODO(Yingxin): Change to catch stevedore.exceptions.NoMatches after
     # stevedore v1.9.0
     except RuntimeError:
         # NOTE(Yingxin): Loading full class path is deprecated and should
         # be removed in the N release.
         try:
             self.driver = importutils.import_object(scheduler_driver)
             LOG.warning(
                 _LW("DEPRECATED: scheduler_driver uses "
                     "classloader to load %(path)s. This legacy "
                     "loading style will be removed in the "
                     "N release."), {'path': scheduler_driver})
         except (ImportError, ValueError):
             raise RuntimeError(
                 _("Cannot load scheduler driver from configuration "
                   "%(conf)s."), {'conf': scheduler_driver})
     super(SchedulerManager, self).__init__(service_name='scheduler',
                                            *args,
                                            **kwargs)
Esempio n. 11
0
    def migrate_instance_finish(self, context, instance_uuid,
                                floating_addresses, host=None,
                                rxtx_factor=None, project_id=None,
                                source=None, dest=None):
        # We only care if floating_addresses are provided and we're
        # switching hosts
        if host and not dest:
            dest = host
        if not floating_addresses or (source and source == dest):
            return

        LOG.info(_LI("Finishing migration network for instance %s"),
                 instance_uuid)

        for address in floating_addresses:
            floating_ip = objects.FloatingIP.get_by_address(context, address)

            if self._is_stale_floating_ip_address(context, floating_ip):
                LOG.warning(_LW("Floating IP address |%(address)s| no longer "
                                "belongs to instance %(instance_uuid)s. "
                                "Will not setup it."),
                            {'address': address,
                             'instance_uuid': instance_uuid})
                continue

            floating_ip.host = dest
            floating_ip.save()

            interface = CONF.public_interface or floating_ip.interface
            fixed_ip = floating_ip.fixed_ip
            self.l3driver.add_floating_ip(floating_ip.address,
                                          fixed_ip.address,
                                          interface,
                                          fixed_ip.network)
Esempio n. 12
0
def init():
    from oslo_config import cfg
    CONF = cfg.CONF

    # NOTE(markmc): gracefully handle the CLI options not being registered
    if 'remote_debug' not in CONF:
        return

    if not (CONF.remote_debug.host and CONF.remote_debug.port):
        return

    import logging
    from jacket.i18n import _LW
    LOG = logging.getLogger(__name__)

    LOG.debug('Listening on %(host)s:%(port)s for debug connection', {
        'host': CONF.remote_debug.host,
        'port': CONF.remote_debug.port
    })

    try:
        from pydev import pydevd
    except ImportError:
        import pydevd
    pydevd.settrace(host=CONF.remote_debug.host,
                    port=CONF.remote_debug.port,
                    stdoutToServer=False,
                    stderrToServer=False)

    LOG.warning(
        _LW('WARNING: Using the remote debug option changes how '
            'Nova uses the eventlet library to support async IO. This '
            'could result in failures that do not occur under normal '
            'operation. Use at your own risk.'))
    def host_passes(self, host_state, spec_obj):
        """Checks a host in an aggregate that metadata key/value match
        with image properties.
        """
        cfg_namespace = CONF.aggregate_image_properties_isolation_namespace
        cfg_separator = CONF.aggregate_image_properties_isolation_separator

        image_props = spec_obj.image.properties if spec_obj.image else {}
        metadata = utils.aggregate_metadata_get_by_host(host_state)

        for key, options in six.iteritems(metadata):
            if (cfg_namespace and
                    not key.startswith(cfg_namespace + cfg_separator)):
                continue
            prop = None
            try:
                prop = image_props.get(key)
            except AttributeError:
                LOG.warning(_LW("Host '%(host)s' has a metadata key '%(key)s' "
                                "that is not present in the image metadata.") %
                                {"host": host_state.host, "key": key})
                continue

            # NOTE(sbauza): Aggregate metadata is only strings, we need to
            # stringify the property to match with the option
            # TODO(sbauza): Fix that very ugly pattern matching
            if prop and str(prop) not in options:
                LOG.debug("%(host_state)s fails image aggregate properties "
                            "requirements. Property %(prop)s does not "
                            "match %(options)s.",
                          {'host_state': host_state,
                           'prop': prop,
                           'options': options})
                return False
        return True
Esempio n. 14
0
    def _reclaim_queued_deletes(self, context):
        """Reclaim instances that are queued for deletion."""
        interval = CONF.reclaim_instance_interval
        if interval <= 0:
            LOG.debug("CONF.reclaim_instance_interval <= 0, skipping...")
            return

        # TODO(comstud, jichenjc): Dummy quota object for now See bug 1296414.
        # The only case that the quota might be inconsistent is
        # the cloud node died between set instance state to SOFT_DELETED
        # and quota commit to DB. When cloud node starts again
        # it will have no idea the reservation is committed or not or even
        # expired, since it's a rare case, so marked as todo.
        quotas = objects.Quotas.from_reservations(context, None)

        filters = {'vm_state': vm_states.SOFT_DELETED,
                   'task_state': None,
                   'host': self.host}
        instances = objects.InstanceList.get_by_filters(
            context, filters,
            expected_attrs=objects.instance.INSTANCE_DEFAULT_FIELDS,
            use_slave=True)
        for instance in instances:
            if self._deleted_old_enough(instance, interval):
                bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
                    context, instance.uuid)
                LOG.info(_LI('Reclaiming deleted instance'), instance=instance)
                try:
                    self._delete_instance(context, instance, bdms, quotas)
                except Exception as e:
                    LOG.warning(_LW("Periodic reclaim failed to delete "
                                    "instance: %s"),
                                e, instance=instance)
Esempio n. 15
0
    def _run(self, name, method_type, args, kwargs, func=None):
        if method_type not in ('pre', 'post'):
            msg = _("Wrong type of hook method. "
                    "Only 'pre' and 'post' type allowed")
            raise ValueError(msg)

        for e in self.extensions:
            obj = e.obj
            hook_method = getattr(obj, method_type, None)
            if hook_method:
                LOG.warning(_LW("Hooks are deprecated as of Nova 13.0 and "
                                "will be removed in a future release"))
                LOG.debug("Running %(name)s %(type)s-hook: %(obj)s",
                          {'name': name, 'type': method_type, 'obj': obj})
                try:
                    if func:
                        hook_method(func, *args, **kwargs)
                    else:
                        hook_method(*args, **kwargs)
                except FatalHookException:
                    msg = _LE("Fatal Exception running %(name)s "
                              "%(type)s-hook: %(obj)s")
                    LOG.exception(msg, {'name': name, 'type': method_type,
                                        'obj': obj})
                    raise
                except Exception:
                    msg = _LE("Exception running %(name)s "
                              "%(type)s-hook: %(obj)s")
                    LOG.exception(msg, {'name': name, 'type': method_type,
                                        'obj': obj})
Esempio n. 16
0
    def load_extension(self, ext_factory):
        """Execute an extension factory.

        Loads an extension.  The 'ext_factory' is the name of a
        callable that will be imported and called with one
        argument--the extension manager.  The factory callable is
        expected to call the register() method at least once.
        """

        LOG.debug("Loading extension %s", ext_factory)

        if isinstance(ext_factory, six.string_types):
            if ext_factory.startswith('jacket.compute.cloud.contrib'):
                LOG.warn(_LW("The legacy v2 API module already moved into"
                             "'jacket.compute.cloud.legacy_v2.contrib'. "
                             "Use new path instead of old path %s"),
                         ext_factory)
                ext_factory = ext_factory.replace('contrib',
                                                  'legacy_v2.contrib')
            # Load the factory
            factory = importutils.import_class(ext_factory)
        else:
            factory = ext_factory

        # Call it
        LOG.debug("Calling extension factory %s", ext_factory)
        factory(self)
Esempio n. 17
0
    def _get_meta_by_instance_id(self, instance_id, tenant_id, remote_address):
        try:
            meta_data = self.get_metadata_by_instance_id(
                instance_id, remote_address)
        except Exception:
            LOG.exception(_LE('Failed to get metadata for instance id: %s'),
                          instance_id)
            msg = _('An unknown error has occurred. '
                    'Please try your request again.')
            raise webob.exc.HTTPInternalServerError(
                explanation=six.text_type(msg))

        if meta_data is None:
            LOG.error(_LE('Failed to get metadata for instance id: %s'),
                      instance_id)
        elif meta_data.instance.project_id != tenant_id:
            LOG.warning(
                _LW("Tenant_id %(tenant_id)s does not match tenant_id "
                    "of instance %(instance_id)s."), {
                        'tenant_id': tenant_id,
                        'instance_id': instance_id
                    })
            # causes a 404 to be raised
            meta_data = None

        return meta_data
Esempio n. 18
0
    def __init__(self, names, propagate_map_exceptions=False):
        """Initialise the resource handler by loading the plugins.

        The ResourceHandler uses stevedore to load the resource plugins.
        The handler can handle and report exceptions raised in the plugins
        depending on the value of the propagate_map_exceptions parameter.
        It is useful in testing to propagate exceptions so they are exposed
        as part of the test. If exceptions are not propagated they are
        logged at error level.

        Any named plugins that are not located are logged.

        :param names: the list of plugins to load by name
        :param propagate_map_exceptions: True indicates exceptions in the
        plugins should be raised, False indicates they should be handled and
        logged.
        """
        self._mgr = stevedore.NamedExtensionManager(
            namespace=RESOURCE_NAMESPACE,
            names=names,
            propagate_map_exceptions=propagate_map_exceptions,
            invoke_on_load=True)
        if self._mgr.names():
            LOG.warning(_LW(
                'The Extensible Resource Tracker is deprecated and will '
                'be removed in the 14.0.0 release. If you '
                'use this functionality and have custom resources that '
                'are managed by the Extensible Resource Tracker, please '
                'contact the Nova development team by posting to the '
                'openstack-dev mailing list. There is no future planned '
                'support for the tracking of custom resources.'))
        self._log_missing_plugins(names)
Esempio n. 19
0
def main():
    config.parse_args(sys.argv)
    logging.setup(CONF, 'jacket')
    utils.monkey_patch()
    objects.register_all()

    gmr.TextGuruMeditation.setup_autorun(version)

    # TODO(nkapotoxin) remove this config, db call is supposed to be local
    if not CONF.conductor.use_local:
        block_db_access()
        objects_base.NovaObject.indirection_api = \
            conductor_rpcapi.ConductorAPI()
    else:
        LOG.warning(
            _LW('Conductor local mode is deprecated and will '
                'be removed in a subsequent release'))

    # server = service.Service.create(binary='jacket-worker',
    server = service.Service.create(binary='nova-compute',
                                    topic=CONF.jacket_topic,
                                    db_allowed=CONF.conductor.use_local)
    workers = CONF.worker.workers or processutils.get_worker_count()
    service.serve(server, workers=workers)
    service.wait()
Esempio n. 20
0
def is_neutron():
    """Does this configuration mean we're neutron.

    This logic exists as a separate config option
    """
    legacy_class = oslo_config.cfg.CONF.network_api_class
    use_neutron = oslo_config.cfg.CONF.use_neutron

    if legacy_class not in (NEUTRON_NET_API, NOVA_NET_API):
        # Someone actually used this option, this gets a pass for now,
        # but will just go away once deleted.
        return None
    elif legacy_class == NEUTRON_NET_API and not use_neutron:
        # If they specified neutron via class, we should respect that
        LOG.warn(
            _LW("Config mismatch. The network_api_class specifies %s, "
                "however use_neutron is not set to True. Using Neutron "
                "networking for now, however please set use_neutron to "
                "True in your configuration as network_api_class is "
                "deprecated and will be removed."), legacy_class)
        return True
    elif use_neutron:
        return True
    else:
        return False
Esempio n. 21
0
    def _floating_ip_owned_by_project(self, context, floating_ip):
        """Raises if floating IP does not belong to project."""
        if context.is_admin:
            return

        if floating_ip.project_id != context.project_id:
            if floating_ip.project_id is None:
                LOG.warning(_LW('Address |%(address)s| is not allocated'),
                            {'address': floating_ip.address})
                raise exception.Forbidden()
            else:
                LOG.warning(_LW('Address |%(address)s| is not allocated '
                                'to your project |%(project)s|'),
                            {'address': floating_ip.address,
                             'project': context.project_id})
                raise exception.Forbidden()
Esempio n. 22
0
    def add_to_instance(self, context, instance, security_group_name):
        """Add security group to the instance."""

        neutron = neutronapi.get_client(context)
        try:
            security_group_id = neutronv20.find_resourceid_by_name_or_id(
                neutron, 'security_group', security_group_name,
                context.project_id)
        except n_exc.NeutronClientNoUniqueMatch as e:
            raise exception.NoUniqueMatch(six.text_type(e))
        except n_exc.NeutronClientException as e:
            exc_info = sys.exc_info()
            if e.status_code == 404:
                msg = (_("Security group %(name)s is not found for "
                         "project %(project)s") % {
                             'name': security_group_name,
                             'project': context.project_id
                         })
                self.raise_not_found(msg)
            else:
                LOG.exception(_LE("Neutron Error:"))
                six.reraise(*exc_info)
        params = {'device_id': instance.uuid}
        try:
            ports = neutron.list_ports(**params).get('ports')
        except n_exc.NeutronClientException:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE("Neutron Error:"))

        if not ports:
            msg = (_("instance_id %s could not be found as device id on"
                     " any ports") % instance.uuid)
            self.raise_not_found(msg)

        for port in ports:
            if not self._has_security_group_requirements(port):
                LOG.warning(
                    _LW("Cannot add security group %(name)s to "
                        "%(instance)s since the port %(port_id)s "
                        "does not meet security requirements"), {
                            'name': security_group_name,
                            'instance': instance.uuid,
                            'port_id': port['id']
                        })
                raise exception.SecurityGroupCannotBeApplied()
            if 'security_groups' not in port:
                port['security_groups'] = []
            port['security_groups'].append(security_group_id)
            updated_port = {'security_groups': port['security_groups']}
            try:
                LOG.info(
                    _LI("Adding security group %(security_group_id)s to "
                        "port %(port_id)s"), {
                            'security_group_id': security_group_id,
                            'port_id': port['id']
                        })
                neutron.update_port(port['id'], {'port': updated_port})
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.exception(_LE("Neutron Error:"))
Esempio n. 23
0
    def migrate_instance_start(self, context, instance_uuid,
                               floating_addresses,
                               rxtx_factor=None, project_id=None,
                               source=None, dest=None):
        # We only care if floating_addresses are provided and we're
        # switching hosts
        if not floating_addresses or (source and source == dest):
            return

        LOG.info(_LI("Starting migration network for instance %s"),
                 instance_uuid)
        for address in floating_addresses:
            floating_ip = objects.FloatingIP.get_by_address(context, address)

            if self._is_stale_floating_ip_address(context, floating_ip):
                LOG.warning(_LW("Floating IP address |%(address)s| no longer "
                                "belongs to instance %(instance_uuid)s. "
                                "Will not migrate it "),
                            {'address': address,
                             'instance_uuid': instance_uuid})
                continue

            interface = CONF.public_interface or floating_ip.interface
            fixed_ip = floating_ip.fixed_ip
            self.l3driver.remove_floating_ip(floating_ip.address,
                                             fixed_ip.address,
                                             interface,
                                             fixed_ip.network)

            # NOTE(wenjianhn): Make this address will not be bound to public
            # interface when restarts compute-network on dest compute node
            floating_ip.host = None
            floating_ip.save()
Esempio n. 24
0
def main():
    config.parse_args(sys.argv)
    logging.setup(CONF, "jacket")
    utils.monkey_patch()
    objects.register_all()
    log = logging.getLogger(__name__)

    gmr.TextGuruMeditation.setup_autorun(version)

    launcher = service.process_launcher()
    started = 0
    for api in CONF.enabled_apis:
        should_use_ssl = api in CONF.enabled_ssl_apis
        try:
            server = service.WSGIService(api, use_ssl=should_use_ssl)
            launcher.launch_service(server, workers=server.workers or 1)
            started += 1
        except exception.PasteAppNotFound as ex:
            log.warning(
                _LW("%s. ``enabled_apis`` includes bad values. "
                    "Fix to remove this warning."), six.text_type(ex))

    if started == 0:
        log.error(_LE('No APIs were started. '
                      'Check the enabled_apis config option.'))
        sys.exit(1)

    launcher.wait()
Esempio n. 25
0
def main():
    config.parse_args(sys.argv)
    logging.setup(CONF, "jacket")
    utils.monkey_patch()
    objects.register_all()
    log = logging.getLogger(__name__)

    gmr.TextGuruMeditation.setup_autorun(version)

    launcher = service.process_launcher()
    started = 0
    for api in CONF.enabled_apis:
        should_use_ssl = api in CONF.enabled_ssl_apis
        try:
            server = service.WSGIService(api, use_ssl=should_use_ssl)
            launcher.launch_service(server, workers=server.workers or 1)
            started += 1
        except exception.PasteAppNotFound as ex:
            log.warning(
                _LW("%s. ``enabled_apis`` includes bad values. "
                    "Fix to remove this warning."), six.text_type(ex))

    if started == 0:
        log.error(
            _LE('No APIs were started. '
                'Check the enabled_apis config option.'))
        sys.exit(1)

    launcher.wait()
Esempio n. 26
0
 def __init__(self):
     versionutils.report_deprecated_feature(
         LOG,
         _LW('The in tree EC2 API has been removed in Mitaka. '
             'Please remove entries from api-paste.ini and use '
             'the OpenStack ec2-api project '
             'http://git.openstack.org/cgit/openstack/ec2-api/'))
Esempio n. 27
0
def parse_options(opts, sep='=', converter=str, name=""):
    """Parse a list of options, each in the format of <key><sep><value>. Also
    use the converter to convert the value into desired type.

    :params opts: list of options, e.g. from oslo_config.cfg.ListOpt
    :params sep: the separator
    :params converter: callable object to convert the value, should raise
                       ValueError for conversion failure
    :params name: name of the option

    :returns: a lists of tuple of values (key, converted_value)
    """
    good = []
    bad = []
    for opt in opts:
        try:
            key, seen_sep, value = opt.partition(sep)
            value = converter(value)
        except ValueError:
            key = None
            value = None
        if key and seen_sep and value is not None:
            good.append((key, value))
        else:
            bad.append(opt)
    if bad:
        LOG.warning(_LW("Ignoring the invalid elements of the option "
                        "%(name)s: %(options)s"),
                    {'name': name,
                     'options': ", ".join(bad)})
    return good
Esempio n. 28
0
def setup_profiler(binary, host):
    if (osprofiler_notifier is None or
                profiler is None or
                osprofiler_web is None or
                profiler_opts is None):
        LOG.debug('osprofiler is not present')
        return

    if CONF.profiler.enabled:
        _notifier = osprofiler_notifier.create(
            "Messaging", messaging, context.get_admin_context().to_dict(),
            rpc.TRANSPORT, "cinder", binary, host)
        osprofiler_notifier.set(_notifier)
        osprofiler_web.enable(CONF.profiler.hmac_keys)
        LOG.warning(
            _LW("OSProfiler is enabled.\nIt means that person who knows "
                "any of hmac_keys that are specified in "
                "/etc/cinder/cinder.conf can trace his requests. \n"
                "In real life only operator can read this file so there "
                "is no security issue. Note that even if person can "
                "trigger profiler, only admin user can retrieve trace "
                "information.\n"
                "To disable OSprofiler set in cinder.conf:\n"
                "[profiler]\nenabled=false"))
    else:
        osprofiler_web.disable()
Esempio n. 29
0
    def _report_state(self, service):
        """Update the state of this service in the datastore."""

        try:
            service.service_ref.report_count += 1
            service.service_ref.save()

            # TODO(termie): make this pattern be more elegant.
            if getattr(service, 'model_disconnected', False):
                service.model_disconnected = False
                LOG.info(_LI('Recovered from being unable to report status.'))
        except messaging.MessagingTimeout:
            # NOTE(johngarbutt) during upgrade we will see messaging timeouts
            # as compute-conductor is restarted, so only log this error once.
            if not getattr(service, 'model_disconnected', False):
                service.model_disconnected = True
                LOG.warn(
                    _LW('Lost connection to compute-conductor '
                        'for reporting service status.'))
        except Exception:
            # NOTE(rpodolyaka): we'd like to avoid catching of all possible
            # exceptions here, but otherwise it would become possible for
            # the state reporting thread to stop abruptly, and thus leave
            # the service unusable until it's restarted.
            LOG.exception(
                _LE('Unexpected error while reporting service status'))
            # trigger the recovery log message, if this error goes away
            service.model_disconnected = True
Esempio n. 30
0
 def _get_host_metrics(self, context, nodename):
     """Get the metrics from monitors and
     notify information to message bus.
     """
     metrics = objects.MonitorMetricList()
     metrics_info = {}
     for monitor in self.monitors:
         try:
             monitor.add_metrics_to_list(metrics)
         except Exception as exc:
             LOG.warning(
                 _LW("Cannot get the metrics from %(mon)s; "
                     "error: %(exc)s"), {
                         'mon': monitor,
                         'exc': exc
                     })
     # TODO(jaypipes): Remove this when compute_node.metrics doesn't need
     # to be populated as a JSON-ified string.
     metrics = metrics.to_list()
     if len(metrics):
         metrics_info['nodename'] = nodename
         metrics_info['metrics'] = metrics
         metrics_info['host'] = self.host
         metrics_info['host_ip'] = CONF.my_ip
         notifier = rpc.get_notifier(service='compute', host=nodename)
         notifier.info(context, 'compute.metrics.update', metrics_info)
     return metrics
Esempio n. 31
0
        def do_associate():
            # associate floating ip
            floating = objects.FloatingIP.associate(context, floating_address,
                                                    fixed_address, self.host)
            fixed = floating.fixed_ip
            if not fixed:
                # NOTE(vish): ip was already associated
                return
            try:
                # gogo driver time
                self.l3driver.add_floating_ip(floating_address, fixed_address,
                        interface, fixed['network'])
            except processutils.ProcessExecutionError as e:
                with excutils.save_and_reraise_exception():
                    try:
                        objects.FloatingIP.disassociate(context,
                                                        floating_address)
                    except Exception:
                        LOG.warning(_LW('Failed to disassociated floating '
                                        'address: %s'), floating_address)
                        pass
                    if "Cannot find device" in six.text_type(e):
                        try:
                            LOG.error(_LE('Interface %s not found'), interface)
                        except Exception:
                            pass
                        raise exception.NoFloatingIpInterface(
                                interface=interface)

            payload = dict(project_id=context.project_id,
                           instance_id=instance_uuid,
                           floating_ip=floating_address)
            self.notifier.info(context,
                               'network.floating_ip.associate', payload)
Esempio n. 32
0
 def __init__(self):
     try:
         self.host_manager = driver.DriverManager(
                 "jacket.compute.scheduler.host_manager",
                 CONF.compute_scheduler_host_manager,
                 invoke_on_load=True).driver
     # TODO(Yingxin): Change to catch stevedore.exceptions.NoMatches
     # after stevedore v1.9.0
     except RuntimeError:
         # NOTE(Yingxin): Loading full class path is deprecated and
         # should be removed in the N release.
         try:
             self.host_manager = importutils.import_object(
                 CONF.compute_scheduler_host_manager)
             LOG.warning(_LW("DEPRECATED: compute_scheduler_host_manager uses "
                             "classloader to load %(path)s. This legacy "
                             "loading style will be removed in the "
                             "N release."),
                         {'path': CONF.compute_scheduler_host_manager})
         except (ImportError, ValueError):
             raise RuntimeError(
                     _("Cannot load host manager from configuration "
                       "compute_scheduler_host_manager = %(conf)s."),
                     {'conf': CONF.compute_scheduler_host_manager})
     self.servicegroup_api = servicegroup.API()
Esempio n. 33
0
 def inner(*args, **kwargs):
     if not CONF.allow_instance_snapshots:
         LOG.warning(_LW('Rejecting snapshot request, snapshots currently'
                         ' disabled'))
         msg = _("Instance snapshots are not permitted at this time.")
         raise webob.exc.HTTPBadRequest(explanation=msg)
     return f(*args, **kwargs)
Esempio n. 34
0
    def __init__(self, names, propagate_map_exceptions=False):
        """Initialise the resource handler by loading the plugins.

        The ResourceHandler uses stevedore to load the resource plugins.
        The handler can handle and report exceptions raised in the plugins
        depending on the value of the propagate_map_exceptions parameter.
        It is useful in testing to propagate exceptions so they are exposed
        as part of the test. If exceptions are not propagated they are
        logged at error level.

        Any named plugins that are not located are logged.

        :param names: the list of plugins to load by name
        :param propagate_map_exceptions: True indicates exceptions in the
        plugins should be raised, False indicates they should be handled and
        logged.
        """
        self._mgr = stevedore.NamedExtensionManager(
            namespace=RESOURCE_NAMESPACE,
            names=names,
            propagate_map_exceptions=propagate_map_exceptions,
            invoke_on_load=True)
        if self._mgr.names():
            LOG.warning(
                _LW('The Extensible Resource Tracker is deprecated and will '
                    'be removed in the 14.0.0 release. If you '
                    'use this functionality and have custom resources that '
                    'are managed by the Extensible Resource Tracker, please '
                    'contact the Nova development team by posting to the '
                    'openstack-dev mailing list. There is no future planned '
                    'support for the tracking of custom resources.'))
        self._log_missing_plugins(names)
Esempio n. 35
0
 def __init__(self):
     try:
         self.host_manager = driver.DriverManager(
             "jacket.compute.scheduler.host_manager",
             CONF.compute_scheduler_host_manager,
             invoke_on_load=True).driver
     # TODO(Yingxin): Change to catch stevedore.exceptions.NoMatches
     # after stevedore v1.9.0
     except RuntimeError:
         # NOTE(Yingxin): Loading full class path is deprecated and
         # should be removed in the N release.
         try:
             self.host_manager = importutils.import_object(
                 CONF.compute_scheduler_host_manager)
             LOG.warning(
                 _LW("DEPRECATED: compute_scheduler_host_manager uses "
                     "classloader to load %(path)s. This legacy "
                     "loading style will be removed in the "
                     "N release."),
                 {'path': CONF.compute_scheduler_host_manager})
         except (ImportError, ValueError):
             raise RuntimeError(
                 _("Cannot load host manager from configuration "
                   "compute_scheduler_host_manager = %(conf)s."),
                 {'conf': CONF.compute_scheduler_host_manager})
     self.servicegroup_api = servicegroup.API()
Esempio n. 36
0
    def _register_controllers(self, ext):
        """Register controllers defined by the extensions

        Extensions define what resources they want to add through
        a get_controller_extensions function
        """

        handler = ext.obj
        LOG.debug("Running _register_controllers on %s", ext.obj)

        for extension in handler.get_controller_extensions():
            ext_name = extension.extension.name
            collection = extension.collection
            controller = extension.controller

            if collection not in self.resources:
                LOG.warning(
                    _LW('Extension %(ext_name)s: Cannot extend '
                        'resource %(collection)s: No such resource'), {
                            'ext_name': ext_name,
                            'collection': collection
                        })
                continue

            LOG.debug(
                'Extension %(ext_name)s extending resource: '
                '%(collection)s', {
                    'ext_name': ext_name,
                    'collection': collection
                })

            resource = self.resources[collection]
            resource.register_actions(controller)
            resource.register_extensions(controller)
Esempio n. 37
0
    def _claim_instance(self, context, instance, prefix=''):
        pci_requests = objects.InstancePCIRequests.get_by_instance(
            context, instance)
        if not pci_requests.requests:
            return None
        instance_numa_topology = hardware.instance_topology_from_instance(
            instance)
        instance_cells = None
        if instance_numa_topology:
            instance_cells = instance_numa_topology.cells

        devs = self.stats.consume_requests(pci_requests.requests,
                                           instance_cells)
        if not devs:
            return None

        for dev in devs:
            dev.claim(instance)
        if instance_numa_topology and any(dev.numa_node is None
                                          for dev in devs):
            LOG.warning(
                _LW("Assigning a pci device without numa affinity to"
                    "instance %(instance)s which has numa topology"),
                {'instance': instance['uuid']})
        return devs
Esempio n. 38
0
    def host_passes(self, host_state, spec_obj):
        """Return True if host has the exact number of CPU cores."""
        if not host_state.vcpus_total:
            # Fail safe
            LOG.warning(_LW("VCPUs not set; assuming CPU collection broken"))
            return False

        required_vcpus = spec_obj.vcpus
        usable_vcpus = host_state.vcpus_total - host_state.vcpus_used

        if required_vcpus != usable_vcpus:
            LOG.debug(
                "%(host_state)s does not have exactly "
                "%(requested_vcpus)s cores of usable vcpu, it has "
                "%(usable_vcpus)s.", {
                    'host_state': host_state,
                    'requested_vcpus': required_vcpus,
                    'usable_vcpus': usable_vcpus
                })
            return False

        # NOTE(mgoddard): Setting the limit ensures that it is enforced in
        # compute. This ensures that if multiple instances are scheduled to a
        # single host, then all after the first will fail in the claim.
        host_state.limits['vcpu'] = host_state.vcpus_total
        return True
Esempio n. 39
0
    def destroy(self, context, instance, network_info,
                block_device_info=None, destroy_disks=True, migrate_data=None):
        """Destroy the specified instance, if it can be found.

        :param context: The security context.
        :param instance: The instance object.
        :param network_info: Instance network information.
        :param block_device_info: Instance block device
            information. Ignored by this driver.
        :param destroy_disks: Indicates if disks should be
            destroyed. Ignored by this driver.
        :param migrate_data: implementation specific params.
            Ignored by this driver.
        """
        LOG.debug('Destroy called for instance', instance=instance)
        try:
            node = self._validate_instance_and_node(instance)
        except exception.InstanceNotFound:
            LOG.warning(_LW("Destroy called on non-existing instance %s."),
                        instance.uuid)
            # NOTE(deva): if compute.compute.ComputeManager._delete_instance()
            #             is called on a non-existing instance, the only way
            #             to delete it is to return from this method
            #             without raising any exceptions.
            return

        if node.provision_state in _UNPROVISION_STATES:
            self._unprovision(instance, node)

        self._cleanup_deploy(node, instance, network_info)
        LOG.info(_LI('Successfully unprovisioned Ironic node %s'),
                 node.uuid, instance=instance)
Esempio n. 40
0
    def _parse_node_instance_info(self, node, props):
        """Helper method to parse the node's instance info.

        If a property cannot be looked up via instance_info, use the original
        value from the properties dict. This is most likely to be correct;
        it should only be incorrect if the properties were changed directly
        in Ironic while an instance was deployed.
        """
        instance_info = {}

        # add this key because it's different in instance_info for some reason
        props['vcpus'] = props['cpus']
        for prop in ('vcpus', 'memory_mb', 'local_gb'):
            original = props[prop]
            try:
                instance_info[prop] = int(node.instance_info.get(prop,
                                                                 original))
            except (TypeError, ValueError):
                LOG.warning(_LW('Node %(uuid)s has a malformed "%(prop)s". '
                                'It should be an integer but its value '
                                'is "%(value)s".'),
                            {'uuid': node.uuid, 'prop': prop,
                             'value': node.instance_info.get(prop)})
                instance_info[prop] = original

        return instance_info
Esempio n. 41
0
 def __init__(self):
     versionutils.report_deprecated_feature(
             LOG,
             _LW('The in tree EC2 API has been removed in Mitaka. '
                 'Please remove entries from api-paste.ini and use '
                 'the OpenStack ec2-api project '
                 'http://git.openstack.org/cgit/openstack/ec2-api/')
     )
Esempio n. 42
0
 def _set_migration_to_error(migration, reason, **kwargs):
     LOG.warning(_LW("Setting migration %(migration_id)s to error: "
                     "%(reason)s"),
                 {'migration_id': migration['id'], 'reason': reason},
                 **kwargs)
     migration.status = 'error'
     with migration.obj_as_admin():
         migration.save()
Esempio n. 43
0
 def _get_compute_node(self, context):
     """Returns compute node for the host and nodename."""
     try:
         return objects.ComputeNode.get_by_host_and_nodename(
             context, self.host, self.nodename)
     except exception.NotFound:
         LOG.warning(_LW("No compute node record for %(host)s:%(node)s"),
                     {'host': self.host, 'node': self.nodename})
Esempio n. 44
0
 def create_private_dns_domain(self, context, domain, av_zone):
     objects.DNSDomain.register_for_zone(context, domain, av_zone)
     try:
         self.instance_dns_manager.create_domain(domain)
     except exception.FloatingIpDNSExists:
         LOG.warning(_LW('Domain |%(domain)s| already exists, '
                         'changing zone to |%(av_zone)s|.'),
                     {'domain': domain, 'av_zone': av_zone})
Esempio n. 45
0
 def create_public_dns_domain(self, context, domain, project):
     objects.DNSDomain.register_for_project(context, domain, project)
     try:
         self.floating_dns_manager.create_domain(domain)
     except exception.FloatingIpDNSExists:
         LOG.warning(_LW('Domain |%(domain)s| already exists, '
                         'changing project to |%(project)s|.'),
                     {'domain': domain, 'project': project})
Esempio n. 46
0
 def __init__(self, ext_mgr=None, init_only=None):
     LOG.warning(
         _LW("Deprecated: Starting with the Liberty release, the v2 API was "
             "already deprecated and the v2.1 API is set as the default. Nova "
             "also supports v2.1 API legacy v2 compatible mode for switching "
             "to v2.1 API smoothly. For more information on how to configure "
             "v2.1 API and legacy v2 compatible mode, please refer Nova "
             "api-paste.ini sample file."))
     super(APIRouter, self).__init__(ext_mgr=ext_mgr, init_only=init_only)
Esempio n. 47
0
    def _sync_power_states(self, context):
        """Align power states between the database and the hypervisor.

        To sync power state data we make a DB call to get the number of
        virtual machines known by the hypervisor and if the number matches the
        number of virtual machines known by the database, we proceed in a lazy
        loop, one database record at a time, checking if the hypervisor has the
        same power state as is in the database.
        """
        db_instances = objects.InstanceList.get_by_host(context, self.host,
                                                        expected_attrs=[],
                                                        use_slave=True)

        #num_vm_instances = self.driver.get_num_instances()
        vm_instances_stats = self.driver.list_instances_stats()
        num_vm_instances = len(vm_instances_stats)
        num_db_instances = len(db_instances)

        if num_vm_instances != num_db_instances:
            LOG.warning(_LW("While synchronizing instance power states, found "
                            "%(num_db_instances)s instances in the database "
                            "and %(num_vm_instances)s instances on the "
                            "hypervisor."),
                        {'num_db_instances': num_db_instances,
                         'num_vm_instances': num_vm_instances})

        def _sync(db_instance, state):
            # NOTE(melwitt): This must be synchronized as we query state from
            #                two separate sources, the driver and the database.
            #                They are set (in stop_instance) and read, in sync.
            @utils.synchronized(db_instance.uuid)
            def query_driver_power_state_and_sync():
                self._query_driver_power_state_and_sync(context, db_instance, state)

            try:
                query_driver_power_state_and_sync()
            except Exception:
                LOG.exception(_LE("Periodic sync_power_state task had an "
                                  "error while processing an instance."),
                              instance=db_instance)

            self._syncs_in_progress.pop(db_instance.uuid)

        for db_instance in db_instances:
            # process syncs asynchronously - don't want instance locking to
            # block entire periodic task thread
            uuid = db_instance.uuid
            if uuid in self._syncs_in_progress:
                LOG.debug('Sync already in progress for %s' % uuid)
            else:
                LOG.debug('Triggering sync for uuid %s' % uuid)
                provider_instance_id = self._get_provider_instance_id(uuid)
                provider_instance_state = vm_instances_stats.get(provider_instance_id,
                                                                 power_state.NOSTATE)

                self._syncs_in_progress[uuid] = True
                self._sync_power_pool.spawn_n(_sync, db_instance, provider_instance_state)
Esempio n. 48
0
    def _floating_ip_owned_by_project(self, context, floating_ip):
        """Raises if floating IP does not belong to project."""
        if context.is_admin:
            return

        if floating_ip.project_id != context.project_id:
            if floating_ip.project_id is None:
                LOG.warning(_LW('Address |%(address)s| is not allocated'),
                            {'address': floating_ip.address})
                raise exception.Forbidden()
            else:
                LOG.warning(
                    _LW('Address |%(address)s| is not allocated '
                        'to your project |%(project)s|'), {
                            'address': floating_ip.address,
                            'project': context.project_id
                        })
                raise exception.Forbidden()
Esempio n. 49
0
 def __init__(self):
     self.cls_list = CONF.osapi_compute_extension
     if (len(self.cls_list) > 0 and
             self.cls_list[0] != STANDARD_EXTENSIONS):
         LOG.warning(_LW('The extension configure options are deprecated. '
                         'In the near future you must run all of the API.'))
     self.extensions = {}
     self.sorted_ext_list = []
     self._load_extensions()
Esempio n. 50
0
 def __init__(self):
     self.compute_attestation = ComputeAttestation()
     LOG.warning(
         _LW('The TrustedFilter is considered experimental '
             'by the OpenStack project because it receives much '
             'less testing than the rest of Nova. This may change '
             'in the future, but current deployers should be aware '
             'that the use of it in production right now may be '
             'risky.'))
Esempio n. 51
0
    def delete_key(self, ctxt, key_id, **kwargs):
        if ctxt is None:
            raise exception.Forbidden()

        if key_id != self.key_id:
            raise exception.KeyManagerError(
                        reason=_("cannot delete non-existent key"))

        LOG.warning(_LW("Not deleting key %s"), key_id)
Esempio n. 52
0
    def add_to_instance(self, context, instance, security_group_name):
        """Add security group to the instance."""

        neutron = neutronapi.get_client(context)
        try:
            security_group_id = neutronv20.find_resourceid_by_name_or_id(
                neutron, 'security_group',
                security_group_name,
                context.project_id)
        except n_exc.NeutronClientNoUniqueMatch as e:
            raise exception.NoUniqueMatch(six.text_type(e))
        except n_exc.NeutronClientException as e:
            exc_info = sys.exc_info()
            if e.status_code == 404:
                msg = (_("Security group %(name)s is not found for "
                         "project %(project)s") %
                       {'name': security_group_name,
                        'project': context.project_id})
                self.raise_not_found(msg)
            else:
                LOG.exception(_LE("Neutron Error:"))
                six.reraise(*exc_info)
        params = {'device_id': instance.uuid}
        try:
            ports = neutron.list_ports(**params).get('ports')
        except n_exc.NeutronClientException:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE("Neutron Error:"))

        if not ports:
            msg = (_("instance_id %s could not be found as device id on"
                   " any ports") % instance.uuid)
            self.raise_not_found(msg)

        for port in ports:
            if not self._has_security_group_requirements(port):
                LOG.warning(_LW("Cannot add security group %(name)s to "
                                "%(instance)s since the port %(port_id)s "
                                "does not meet security requirements"),
                            {'name': security_group_name,
                             'instance': instance.uuid,
                             'port_id': port['id']})
                raise exception.SecurityGroupCannotBeApplied()
            if 'security_groups' not in port:
                port['security_groups'] = []
            port['security_groups'].append(security_group_id)
            updated_port = {'security_groups': port['security_groups']}
            try:
                LOG.info(_LI("Adding security group %(security_group_id)s to "
                             "port %(port_id)s"),
                         {'security_group_id': security_group_id,
                          'port_id': port['id']})
                neutron.update_port(port['id'], {'port': updated_port})
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.exception(_LE("Neutron Error:"))
Esempio n. 53
0
 def __init__(self):
     self.cls_list = CONF.osapi_compute_extension
     if (len(self.cls_list) > 0
             and self.cls_list[0] != STANDARD_EXTENSIONS):
         LOG.warning(
             _LW('The extension configure options are deprecated. '
                 'In the near future you must run all of the API.'))
     self.extensions = {}
     self.sorted_ext_list = []
     self._load_extensions()