Ejemplo n.º 1
0
    def authorize_console(self, context, token, console_type, host, port,
                          internal_access_path, instance_uuid,
                          access_url=None):

        token_dict = {'token': token,
                      'instance_uuid': instance_uuid,
                      'console_type': console_type,
                      'host': host,
                      'port': port,
                      'internal_access_path': internal_access_path,
                      'access_url': access_url,
                      'last_activity_at': time.time()}
        data = jsonutils.dumps(token_dict)

        # We need to log the warning message if the token is not cached
        # successfully, because the failure will cause the console for
        # instance to not be usable.
        if not self.mc.set(token.encode('UTF-8'),
                           data, CONF.console_token_ttl):
            LOG.warning(_LW("Token: %(token)s failed to save into memcached."),
                        {'token': token})
        tokens = self._get_tokens_for_instance(instance_uuid)

        # Remove the expired tokens from cache.
        tokens = [tok for tok in tokens if self.mc.get(tok.encode('UTF-8'))]
        tokens.append(token)

        if not self.mc.set(instance_uuid.encode('UTF-8'),
                           jsonutils.dumps(tokens)):
            LOG.warning(_LW("Instance: %(instance_uuid)s failed to save "
                            "into memcached"),
                        {'instance_uuid': instance_uuid})

        LOG.info(_LI("Received Token: %(token)s, %(token_dict)s"),
                  {'token': token, 'token_dict': token_dict})
Ejemplo n.º 2
0
    def get_info(self, instance):
        """Get the current state and resource usage for this instance.

        If the instance is not found this method returns (a dictionary
        with) NOSTATE and all resources == 0.

        :param instance: the instance object.
        :returns: a InstanceInfo object
        """
        try:
            node = _validate_instance_and_node(self.ironicclient, instance)
        except exception.InstanceNotFound:
            return hardware.InstanceInfo(
                state=map_power_state(ironic_states.NOSTATE))

        memory_kib = int(node.properties.get('memory_mb', 0)) * 1024
        if memory_kib == 0:
            LOG.warning(_LW("Warning, memory usage is 0 for "
                            "%(instance)s on baremetal node %(node)s."),
                        {'instance': instance.uuid,
                         'node': instance.node})

        num_cpu = node.properties.get('cpus', 0)
        if num_cpu == 0:
            LOG.warning(_LW("Warning, number of cpus is 0 for "
                            "%(instance)s on baremetal node %(node)s."),
                        {'instance': instance.uuid,
                         'node': instance.node})

        return hardware.InstanceInfo(state=map_power_state(node.power_state),
                                     max_mem_kb=memory_kib,
                                     mem_kb=memory_kib,
                                     num_cpu=num_cpu)
Ejemplo n.º 3
0
    def teardown(self):
        LOG.debug("Tearing down appliance")

        try:
            try:
                if self.mount:
                    self.handle.aug_close()
            except RuntimeError as e:
                LOG.warning(_LW("Failed to close augeas %s"), e)

            try:
                self.handle.shutdown()
            except AttributeError:
                # Older libguestfs versions haven't an explicit shutdown
                pass
            except RuntimeError as e:
                LOG.warning(_LW("Failed to shutdown appliance %s"), e)

            try:
                self.handle.close()
            except AttributeError:
                # Older libguestfs versions haven't an explicit close
                pass
            except RuntimeError as e:
                LOG.warning(_LW("Failed to close guest handle %s"), e)
        finally:
            # dereference object and implicitly close()
            self.handle = None
Ejemplo n.º 4
0
def _unplug_pbds(session, sr_ref):
    try:
        pbds = session.call_xenapi("SR.get_PBDs", sr_ref)
    except session.XenAPI.Failure as exc:
        LOG.warning(_LW('Ignoring exception %(exc)s when getting PBDs'
                        ' for %(sr_ref)s'), {'exc': exc, 'sr_ref': sr_ref})
        return

    for pbd in pbds:
        try:
            session.call_xenapi("PBD.unplug", pbd)
        except session.XenAPI.Failure as exc:
            LOG.warning(_LW('Ignoring exception %(exc)s when unplugging'
                            ' PBD %(pbd)s'), {'exc': exc, 'pbd': pbd})
Ejemplo n.º 5
0
    def choose_monitors(self, manager):
        """This function checks the monitor names and metrics names against a
        predefined set of acceptable monitors.
        """
        monitor_classes = self.get_matching_classes(
            CONF.compute_available_monitors)
        monitor_class_map = {cls.__name__: cls for cls in monitor_classes}
        monitor_cls_names = CONF.compute_monitors
        good_monitors = []
        bad_monitors = []
        metric_names = set()
        for monitor_name in monitor_cls_names:
            if monitor_name not in monitor_class_map:
                bad_monitors.append(monitor_name)
                continue

            try:
                # make sure different monitors do not have the same
                # metric name
                monitor = monitor_class_map[monitor_name](manager)
                metric_names_tmp = set(monitor.get_metric_names())
                overlap = metric_names & metric_names_tmp
                if not overlap:
                    metric_names = metric_names | metric_names_tmp
                    good_monitors.append(monitor)
                else:
                    msg = (_LW("Excluding monitor %(monitor_name)s due to "
                               "metric name overlap; overlapping "
                               "metrics: %(overlap)s") % {
                                   'monitor_name': monitor_name,
                                   'overlap': ', '.join(overlap)
                               })
                    LOG.warn(msg)
                    bad_monitors.append(monitor_name)
            except Exception as ex:
                msg = (_LW("Monitor %(monitor_name)s cannot be used: %(ex)s") %
                       {
                           'monitor_name': monitor_name,
                           'ex': ex
                       })
                LOG.warn(msg)
                bad_monitors.append(monitor_name)

        if bad_monitors:
            LOG.warning(_LW("The following monitors have been disabled: %s"),
                        ', '.join(bad_monitors))

        return good_monitors
Ejemplo n.º 6
0
    def _get_new_connection(self):
        # call with _wrapped_conn_lock held
        LOG.debug('Connecting to libvirt: %s', self._uri)
        wrapped_conn = None

        try:
            wrapped_conn = self._connect(self._uri, self._read_only)
        finally:
            # Enabling the compute service, in case it was disabled
            # since the connection was successful.
            disable_reason = None
            if not wrapped_conn:
                disable_reason = 'Failed to connect to libvirt'

            if self._conn_event_handler is not None:
                self._conn_event_handler(bool(wrapped_conn), disable_reason)

        self._wrapped_conn = wrapped_conn

        try:
            LOG.debug("Registering for lifecycle events %s", self)
            wrapped_conn.domainEventRegisterAny(
                None,
                libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
                self._event_lifecycle_callback,
                self)
        except Exception as e:
            LOG.warn(_LW("URI %(uri)s does not support events: %(error)s"),
                     {'uri': self._uri, 'error': e})

        try:
            LOG.debug("Registering for connection events: %s", str(self))
            wrapped_conn.registerCloseCallback(self._close_callback, None)
        except (TypeError, AttributeError) as e:
            # NOTE: The registerCloseCallback of python-libvirt 1.0.1+
            # is defined with 3 arguments, and the above registerClose-
            # Callback succeeds. However, the one of python-libvirt 1.0.0
            # is defined with 4 arguments and TypeError happens here.
            # Then python-libvirt 0.9 does not define a method register-
            # CloseCallback.
            LOG.debug("The version of python-libvirt does not support "
                      "registerCloseCallback or is too old: %s", e)
        except libvirt.libvirtError as e:
            LOG.warn(_LW("URI %(uri)s does not support connection"
                         " events: %(error)s"),
                     {'uri': self._uri, 'error': e})

        return wrapped_conn
Ejemplo n.º 7
0
    def _soft_shutdown(self, instance,
                       timeout=CONF.hyperv.wait_soft_reboot_seconds,
                       retry_interval=SHUTDOWN_TIME_INCREMENT):
        """Perform a soft shutdown on the VM.

           :return: True if the instance was shutdown within time limit,
                    False otherwise.
        """
        LOG.debug("Performing Soft shutdown on instance", instance=instance)

        while timeout > 0:
            # Perform a soft shutdown on the instance.
            # Wait maximum timeout for the instance to be shutdown.
            # If it was not shutdown, retry until it succeeds or a maximum of
            # time waited is equal to timeout.
            wait_time = min(retry_interval, timeout)
            try:
                LOG.debug("Soft shutdown instance, timeout remaining: %d",
                          timeout, instance=instance)
                self._vmutils.soft_shutdown_vm(instance.name)
                if self._wait_for_power_off(instance.name, wait_time):
                    LOG.info(_LI("Soft shutdown succeeded."),
                             instance=instance)
                    return True
            except vmutils.HyperVException as e:
                # Exception is raised when trying to shutdown the instance
                # while it is still booting.
                LOG.debug("Soft shutdown failed: %s", e, instance=instance)
                time.sleep(wait_time)

            timeout -= retry_interval

        LOG.warning(_LW("Timed out while waiting for soft shutdown."),
                    instance=instance)
        return False
Ejemplo n.º 8
0
def init():
    from oslo_config import cfg
    CONF = cfg.CONF

    # NOTE(markmc): gracefully handle the CLI options not being registered
    if 'remote_debug' not in CONF:
        return

    if not (CONF.remote_debug.host and CONF.remote_debug.port):
        return

    import logging
    from patron.i18n import _LW
    LOG = logging.getLogger(__name__)

    LOG.debug('Listening on %(host)s:%(port)s for debug connection',
              {'host': CONF.remote_debug.host,
               'port': CONF.remote_debug.port})

    try:
        from pydev import pydevd
    except ImportError:
        import pydevd
    pydevd.settrace(host=CONF.remote_debug.host,
                    port=CONF.remote_debug.port,
                    stdoutToServer=False,
                    stderrToServer=False)

    LOG.warning(_LW('WARNING: Using the remote debug option changes how '
                    'Nova uses the eventlet library to support async IO. This '
                    'could result in failures that do not occur under normal '
                    'operation. Use at your own risk.'))
Ejemplo n.º 9
0
 def __call__(self, req):
     access_key = str(req.params['AWSAccessKeyId'])
     failures_key = "authfailures-%s" % access_key
     failures = int(self.mc.get(failures_key) or 0)
     if failures >= CONF.lockout_attempts:
         detail = _("Too many failed authentications.")
         raise webob.exc.HTTPForbidden(explanation=detail)
     res = req.get_response(self.application)
     if res.status_int == 403:
         failures = self.mc.incr(failures_key)
         if failures is None:
             # NOTE(vish): To use incr, failures has to be a string.
             self.mc.set(failures_key, '1', time=CONF.lockout_window * 60)
         elif failures >= CONF.lockout_attempts:
             LOG.warning(
                 _LW('Access key %(access_key)s has had '
                     '%(failures)d failed authentications and '
                     'will be locked out for %(lock_mins)d '
                     'minutes.'), {
                         'access_key': access_key,
                         'failures': failures,
                         'lock_mins': CONF.lockout_minutes
                     })
             self.mc.set(failures_key,
                         str(failures),
                         time=CONF.lockout_minutes * 60)
     return res
Ejemplo n.º 10
0
    def destroy(self, context, instance, network_info,
                block_device_info=None, destroy_disks=True, migrate_data=None):
        """Destroy the specified instance, if it can be found.

        :param context: The security context.
        :param instance: The instance object.
        :param network_info: Instance network information.
        :param block_device_info: Instance block device
            information. Ignored by this driver.
        :param destroy_disks: Indicates if disks should be
            destroyed. Ignored by this driver.
        :param migrate_data: implementation specific params.
            Ignored by this driver.
        """
        try:
            node = _validate_instance_and_node(self.ironicclient, instance)
        except exception.InstanceNotFound:
            LOG.warning(_LW("Destroy called on non-existing instance %s."),
                        instance.uuid)
            # NOTE(deva): if patron.compute.ComputeManager._delete_instance()
            #             is called on a non-existing instance, the only way
            #             to delete it is to return from this method
            #             without raising any exceptions.
            return

        if node.provision_state in (ironic_states.ACTIVE,
                                    ironic_states.DEPLOYFAIL,
                                    ironic_states.ERROR,
                                    ironic_states.DEPLOYWAIT):
            self._unprovision(self.ironicclient, instance, node)

        self._cleanup_deploy(context, node, instance, network_info)
Ejemplo n.º 11
0
    def _init_session(self):
        """Initializes new session.

        Optionally creates required servicegroup prefix.

        :returns ZKSession - newly created session
        """
        null = open(os.devnull, "w")
        session = evzookeeper.ZKSession(CONF.zookeeper.address,
                                              recv_timeout=
                                                CONF.zookeeper.recv_timeout,
                                              zklog_fd=null)
        # Make sure the prefix exists
        try:
            session.create(CONF.zookeeper.sg_prefix, "",
                                 acl=[evzookeeper.ZOO_OPEN_ACL_UNSAFE])
        except zookeeper.NodeExistsException:
            pass
        # Log a warning about quality for this driver.
        LOG.warning(_LW('The ZooKeeper service group driver in Nova is not '
                        'tested by the OpenStack project and thus its quality '
                        'can not be ensured. This may change in the future, '
                        'but current deployers should be aware that the use '
                        'of it in production right now may be risky.'))
        return session
Ejemplo n.º 12
0
 def kill(self):
     """Destroy the service object in the datastore."""
     self.stop()
     try:
         self.conductor_api.service_destroy(context.get_admin_context(), self.service_id)
     except exception.NotFound:
         LOG.warning(_LW("Service killed that has no database entry"))
Ejemplo n.º 13
0
    def host_passes(self, host_state, filter_properties):
        """Return True if host has sufficient CPU cores."""
        instance_type = filter_properties.get('instance_type')
        if not instance_type:
            return True

        if not host_state.vcpus_total:
            # Fail safe
            LOG.warning(_LW("VCPUs not set; assuming CPU collection broken"))
            return True

        instance_vcpus = instance_type['vcpus']
        cpu_allocation_ratio = self._get_cpu_allocation_ratio(host_state,
                                                          filter_properties)
        vcpus_total = host_state.vcpus_total * cpu_allocation_ratio

        # Only provide a VCPU limit to compute if the virt driver is reporting
        # an accurate count of installed VCPUs. (XenServer driver does not)
        if vcpus_total > 0:
            host_state.limits['vcpu'] = vcpus_total

        free_vcpus = vcpus_total - host_state.vcpus_used
        if free_vcpus < instance_vcpus:
            LOG.debug("%(host_state)s does not have %(instance_vcpus)d "
                      "usable vcpus, it only has %(free_vcpus)d usable "
                      "vcpus",
                      {'host_state': host_state,
                       'instance_vcpus': instance_vcpus,
                       'free_vcpus': free_vcpus})
            return False

        return True
Ejemplo n.º 14
0
def get_image_metadata(context, image_api, image_id_or_uri, instance):
    image_system_meta = {}
    # In case of boot from volume, image_id_or_uri may be None or ''
    if image_id_or_uri is not None and image_id_or_uri != '':
        # If the base image is still available, get its metadata
        try:
            image = image_api.get(context, image_id_or_uri)
        except (exception.ImageNotAuthorized,
                exception.ImageNotFound,
                exception.Invalid) as e:
            LOG.warning(_LW("Can't access image %(image_id)s: %(error)s"),
                        {"image_id": image_id_or_uri, "error": e},
                        instance=instance)
        else:
            flavor = instance.get_flavor()
            image_system_meta = utils.get_system_metadata_from_image(image,
                                                                     flavor)

    # Get the system metadata from the instance
    system_meta = utils.instance_sys_meta(instance)

    # Merge the metadata from the instance with the image's, if any
    system_meta.update(image_system_meta)

    # Convert the system metadata to image metadata
    return utils.get_image_from_system_metadata(system_meta)
Ejemplo n.º 15
0
 def set_disk_host_resource(self, vm_name, controller_path, address,
                            mounted_disk_path):
     disk_found = False
     vm = self._lookup_vm_check(vm_name)
     (disk_resources, volume_resources) = self._get_vm_disks(vm)
     for disk_resource in disk_resources + volume_resources:
         if (disk_resource.Parent == controller_path and
                 self._get_disk_resource_address(disk_resource) ==
                 str(address)):
             if (disk_resource.HostResource and
                     disk_resource.HostResource[0] != mounted_disk_path):
                 LOG.debug('Updating disk host resource "%(old)s" to '
                             '"%(new)s"' %
                           {'old': disk_resource.HostResource[0],
                            'new': mounted_disk_path})
                 disk_resource.HostResource = [mounted_disk_path]
                 self._modify_virt_resource(disk_resource, vm.path_())
             disk_found = True
             break
     if not disk_found:
         LOG.warning(_LW('Disk not found on controller '
                         '"%(controller_path)s" with '
                         'address "%(address)s"'),
                     {'controller_path': controller_path,
                      'address': address})
Ejemplo n.º 16
0
 def inner(*args, **kwargs):
     if not CONF.allow_instance_snapshots:
         LOG.warning(_LW('Rejecting snapshot request, snapshots currently'
                         ' disabled'))
         msg = _("Instance snapshots are not permitted at this time.")
         raise webob.exc.HTTPBadRequest(explanation=msg)
     return f(*args, **kwargs)
Ejemplo n.º 17
0
def init():
    from oslo_config import cfg
    CONF = cfg.CONF

    # NOTE(markmc): gracefully handle the CLI options not being registered
    if 'remote_debug' not in CONF:
        return

    if not (CONF.remote_debug.host and CONF.remote_debug.port):
        return

    import logging
    from patron.i18n import _LW
    LOG = logging.getLogger(__name__)

    LOG.debug('Listening on %(host)s:%(port)s for debug connection', {
        'host': CONF.remote_debug.host,
        'port': CONF.remote_debug.port
    })

    try:
        from pydev import pydevd
    except ImportError:
        import pydevd
    pydevd.settrace(host=CONF.remote_debug.host,
                    port=CONF.remote_debug.port,
                    stdoutToServer=False,
                    stderrToServer=False)

    LOG.warning(
        _LW('WARNING: Using the remote debug option changes how '
            'Nova uses the eventlet library to support async IO. This '
            'could result in failures that do not occur under normal '
            'operation. Use at your own risk.'))
Ejemplo n.º 18
0
    def _register_controllers(self, ext):
        """Register controllers defined by the extensions

        Extensions define what resources they want to add through
        a get_controller_extensions function
        """

        handler = ext.obj
        LOG.debug("Running _register_controllers on %s", ext.obj)

        for extension in handler.get_controller_extensions():
            ext_name = extension.extension.name
            collection = extension.collection
            controller = extension.controller

            if collection not in self.resources:
                LOG.warning(_LW('Extension %(ext_name)s: Cannot extend '
                                'resource %(collection)s: No such resource'),
                            {'ext_name': ext_name, 'collection': collection})
                continue

            LOG.debug('Extension %(ext_name)s extending resource: '
                      '%(collection)s',
                      {'ext_name': ext_name, 'collection': collection})

            resource = self.resources[collection]
            resource.register_actions(controller)
            resource.register_extensions(controller)
Ejemplo n.º 19
0
    def __init__(self, *args, **kwargs):
        '''Create an instance of the servicegroup API.

        args and kwargs are passed down to the servicegroup driver when it gets
        created.
        '''
        # Make sure report interval is less than service down time
        report_interval = CONF.report_interval
        if CONF.service_down_time <= report_interval:
            new_service_down_time = int(report_interval * 2.5)
            LOG.warning(_LW("Report interval must be less than service down "
                            "time. Current config: <service_down_time: "
                            "%(service_down_time)s, report_interval: "
                            "%(report_interval)s>. Setting service_down_time "
                            "to: %(new_service_down_time)s"),
                        {'service_down_time': CONF.service_down_time,
                         'report_interval': report_interval,
                         'new_service_down_time': new_service_down_time})
            CONF.set_override('service_down_time', new_service_down_time)
        LOG.debug('ServiceGroup driver defined as an instance of %s',
                  str(CONF.servicegroup_driver))
        driver_name = CONF.servicegroup_driver
        try:
            driver_class = _driver_name_class_mapping[driver_name]
        except KeyError:
            raise TypeError(_("unknown ServiceGroup driver name: %s")
                            % driver_name)
        self._driver = importutils.import_object(driver_class,
                                                 *args, **kwargs)
Ejemplo n.º 20
0
def inject_data_into_fs(fs, key, net, metadata, admin_password, files,
                        mandatory=()):
    """Injects data into a filesystem already mounted by the caller.
    Virt connections can call this directly if they mount their fs
    in a different way to inject_data.

    If an item name is not specified in the MANDATORY iterable, then a warning
    is logged on failure to inject that item, rather than raising an exception.

    Returns True if all requested operations completed without issue.
    Raises an exception if a mandatory item can't be injected.
    """
    status = True
    for inject in ('key', 'net', 'metadata', 'admin_password', 'files'):
        inject_val = locals()[inject]
        inject_func = globals()['_inject_%s_into_fs' % inject]
        if inject_val:
            try:
                inject_func(inject_val, fs)
            except Exception as e:
                if inject in mandatory:
                    raise
                LOG.warning(_LW('Ignoring error injecting %(inject)s into '
                                'image (%(e)s)'), {'inject': inject, 'e': e})
                status = False
    return status
Ejemplo n.º 21
0
    def _add_floating_ip(self, req, id, body):
        """Associate floating_ip to an instance."""
        context = req.environ['patron.context']
        authorize(context)

        address = body['addFloatingIp']['address']

        instance = common.get_instance(self.compute_api, context, id)
        cached_nwinfo = compute_utils.get_nw_info_for_instance(instance)
        if not cached_nwinfo:
            msg = _('No nw_info cache associated with instance')
            raise webob.exc.HTTPBadRequest(explanation=msg)

        fixed_ips = cached_nwinfo.fixed_ips()
        if not fixed_ips:
            msg = _('No fixed ips associated to instance')
            raise webob.exc.HTTPBadRequest(explanation=msg)

        fixed_address = None
        if 'fixed_address' in body['addFloatingIp']:
            fixed_address = body['addFloatingIp']['fixed_address']
            for fixed in fixed_ips:
                if fixed['address'] == fixed_address:
                    break
            else:
                msg = _('Specified fixed address not assigned to instance')
                raise webob.exc.HTTPBadRequest(explanation=msg)

        if not fixed_address:
            fixed_address = fixed_ips[0]['address']
            if len(fixed_ips) > 1:
                LOG.warning(_LW('multiple fixed_ips exist, using the first: '
                                '%s'), fixed_address)

        try:
            self.network_api.associate_floating_ip(context, instance,
                                  floating_address=address,
                                  fixed_address=fixed_address)
        except exception.FloatingIpAssociated:
            msg = _('floating ip is already associated')
            raise webob.exc.HTTPBadRequest(explanation=msg)
        except exception.NoFloatingIpInterface:
            msg = _('l3driver call to add floating ip failed')
            raise webob.exc.HTTPBadRequest(explanation=msg)
        except exception.FloatingIpNotFoundForAddress:
            msg = _('floating ip not found')
            raise webob.exc.HTTPNotFound(explanation=msg)
        except exception.Forbidden as e:
            raise webob.exc.HTTPForbidden(explanation=e.format_message())
        except Exception as e:
            msg = _('Unable to associate floating ip %(address)s to '
                    'fixed ip %(fixed_address)s for instance %(id)s. '
                    'Error: %(error)s') % (
                    {'address': address, 'fixed_address': fixed_address,
                     'id': id, 'error': e})
            LOG.exception(msg)
            raise webob.exc.HTTPBadRequest(explanation=msg)

        return webob.Response(status_int=202)
Ejemplo n.º 22
0
    def update_from_compute_node(self, compute):
        """Update information about a host from a ComputeNode object."""
        if (self.updated and compute.updated_at
                and self.updated > compute.updated_at):
            return
        all_ram_mb = compute.memory_mb

        # Assume virtual size is all consumed by instances if use qcow2 disk.
        free_gb = compute.free_disk_gb
        least_gb = compute.disk_available_least
        if least_gb is not None:
            if least_gb > free_gb:
                # can occur when an instance in database is not on host
                LOG.warning(_LW("Host %(hostname)s has more disk space than "
                                "database expected "
                                "(%(physical)sgb > %(database)sgb)"),
                            {'physical': least_gb, 'database': free_gb,
                             'hostname': compute.hypervisor_hostname})
            free_gb = min(least_gb, free_gb)
        free_disk_mb = free_gb * 1024

        self.disk_mb_used = compute.local_gb_used * 1024

        # NOTE(jogo) free_ram_mb can be negative
        self.free_ram_mb = compute.free_ram_mb
        self.total_usable_ram_mb = all_ram_mb
        self.total_usable_disk_gb = compute.local_gb
        self.free_disk_mb = free_disk_mb
        self.vcpus_total = compute.vcpus
        self.vcpus_used = compute.vcpus_used
        self.updated = compute.updated_at
        self.numa_topology = compute.numa_topology
        self.pci_stats = pci_stats.PciDeviceStats(
            compute.pci_device_pools)

        # All virt drivers report host_ip
        self.host_ip = compute.host_ip
        self.hypervisor_type = compute.hypervisor_type
        self.hypervisor_version = compute.hypervisor_version
        self.hypervisor_hostname = compute.hypervisor_hostname
        self.cpu_info = compute.cpu_info
        if compute.supported_hv_specs:
            self.supported_instances = [spec.to_list() for spec
                                        in compute.supported_hv_specs]
        else:
            self.supported_instances = []

        # Don't store stats directly in host_state to make sure these don't
        # overwrite any values, or get overwritten themselves. Store in self so
        # filters can schedule with them.
        self.stats = compute.stats or {}

        # Track number of instances on host
        self.num_instances = int(self.stats.get('num_instances', 0))

        self.num_io_ops = int(self.stats.get('io_workload', 0))

        # update metrics
        self._update_metrics_from_compute_node(compute)
Ejemplo n.º 23
0
    def delete_key(self, ctxt, key_id, **kwargs):
        if ctxt is None:
            raise exception.Forbidden()

        if key_id != self.key_id:
            raise exception.KeyManagerError(reason=_("cannot delete non-existent key"))

        LOG.warning(_LW("Not deleting key %s"), key_id)
Ejemplo n.º 24
0
Archivo: zk.py Proyecto: hsluoyz/patron
    def get_all(self, group_id):
        """Return all members in a list, or a ServiceGroupUnavailable
        exception.
        """
        monitor = self._monitors.get(group_id, None)
        if monitor is None:
            path = "%s/%s" % (CONF.zookeeper.sg_prefix, group_id)

            null = open(os.devnull, "w")
            local_session = evzookeeper.ZKSession(CONF.zookeeper.address,
                                                  recv_timeout=
                                                  CONF.zookeeper.recv_timeout,
                                                  zklog_fd=null)

            monitor = membership.MembershipMonitor(local_session, path)
            self._monitors[group_id] = monitor
            # Note(maoy): When initialized for the first time, it takes a
            # while to retrieve all members from zookeeper. To prevent
            # None to be returned, we sleep 5 sec max to wait for data to
            # be ready.
            timeout = 5  # seconds
            interval = 0.1
            tries = int(timeout / interval)
            for _retry in range(tries):
                eventlet.sleep(interval)
                all_members = monitor.get_all()
                if all_members is not None:
                    # Stop the tries once the cache is populated
                    LOG.debug('got info about members in %r: %r',
                              path, ', '.join(all_members))
                    break
            else:
                # if all_members, weren't populated
                LOG.warning(_LW('Problem with acquiring the list of '
                                'children of %(path)r within a given '
                                'timeout=%(timeout)rs'),
                            path, timeout)
        else:
            all_members = monitor.get_all()

        if all_members is None:
            raise exception.ServiceGroupUnavailable(driver="ZooKeeperDriver")

        def have_processes(member):
            """Predicate that given member has processes (subnode exists)."""
            value, stat = monitor.get_member_details(member)
            # only check nodes that are created by Membership class
            if value == 'ZKMembers':
                num_children = stat['numChildren']
                return num_children > 0
            else:
                # unknown type of node found - ignoring
                return False

        # filter only this members that have processes running
        all_members = filter(have_processes, all_members)

        return all_members
Ejemplo n.º 25
0
def get_fc_hbas():
    """Get the Fibre Channel HBA information."""
    out = None
    try:
        out, err = execute('systool', '-c', 'fc_host', '-v',
                           run_as_root=True)
    except processutils.ProcessExecutionError as exc:
        # This handles the case where rootwrap is used
        # and systool is not installed
        # 96 = patron.cmd.rootwrap.RC_NOEXECFOUND:
        if exc.exit_code == 96:
            LOG.warn(_LW("systool is not installed"))
        return []
    except OSError as exc:
        # This handles the case where rootwrap is NOT used
        # and systool is not installed
        if exc.errno == errno.ENOENT:
            LOG.warn(_LW("systool is not installed"))
        return []

    if out is None:
        raise RuntimeError(_("Cannot find any Fibre Channel HBAs"))

    lines = out.split('\n')
    # ignore the first 2 lines
    lines = lines[2:]
    hbas = []
    hba = {}
    lastline = None
    for line in lines:
        line = line.strip()
        # 2 newlines denotes a new hba port
        if line == '' and lastline == '':
            if len(hba) > 0:
                hbas.append(hba)
                hba = {}
        else:
            val = line.split('=')
            if len(val) == 2:
                key = val[0].strip().replace(" ", "")
                value = val[1].strip()
                hba[key] = value.replace('"', '')
        lastline = line

    return hbas
Ejemplo n.º 26
0
    def __init__(self, instance_md=None):
        if CONF.force_config_drive == 'always':
            LOG.warning(_LW('The setting "always" will be deprecated in the '
                            'Liberty version. Please use "True" instead'))
        self.imagefile = None
        self.mdfiles = []

        if instance_md is not None:
            self.add_instance_metadata(instance_md)
Ejemplo n.º 27
0
 def _perform_update(self, agent_build):
     args = {'url': agent_build['url'], 'md5sum': agent_build['md5hash']}
     try:
         self._call_agent('agentupdate', args)
     except exception.AgentError as exc:
         # Silently fail for agent upgrades
         LOG.warning(_LW("Unable to update the agent due "
                         "to: %(exc)s"), dict(exc=exc),
                     instance=self.instance)
Ejemplo n.º 28
0
    def __init__(self):
        LOG.warning(_LW("This key manager is insecure and is not recommended " "for production deployments"))
        super(SingleKeyManager, self).__init__()

        self.key_id = "00000000-0000-0000-0000-000000000000"
        self.key = self._generate_key(key_length=256)

        # key should exist by default
        self.keys[self.key_id] = self.key
Ejemplo n.º 29
0
            def check_whiteblack_lists(ext):
                # Check whitelist is either empty or if not then the extension
                # is in the whitelist
                if (not CONF.osapi_v3.extensions_whitelist or
                        ext.obj.alias in CONF.osapi_v3.extensions_whitelist):

                    # Check the extension is not in the blacklist
                    if ext.obj.alias not in CONF.osapi_v3.extensions_blacklist:
                        return True
                    else:
                        LOG.warning(_LW("Not loading %s because it is "
                                        "in the blacklist"), ext.obj.alias)
                        return False
                else:
                    LOG.warning(
                        _LW("Not loading %s because it is not in the "
                            "whitelist"), ext.obj.alias)
                    return False
Ejemplo n.º 30
0
    def choose_monitors(self, manager):
        """This function checks the monitor names and metrics names against a
        predefined set of acceptable monitors.
        """
        monitor_classes = self.get_matching_classes(
             CONF.compute_available_monitors)
        monitor_class_map = {cls.__name__: cls for cls in monitor_classes}
        monitor_cls_names = CONF.compute_monitors
        good_monitors = []
        bad_monitors = []
        metric_names = set()
        for monitor_name in monitor_cls_names:
            if monitor_name not in monitor_class_map:
                bad_monitors.append(monitor_name)
                continue

            try:
                # make sure different monitors do not have the same
                # metric name
                monitor = monitor_class_map[monitor_name](manager)
                metric_names_tmp = set(monitor.get_metric_names())
                overlap = metric_names & metric_names_tmp
                if not overlap:
                    metric_names = metric_names | metric_names_tmp
                    good_monitors.append(monitor)
                else:
                    msg = (_LW("Excluding monitor %(monitor_name)s due to "
                               "metric name overlap; overlapping "
                               "metrics: %(overlap)s") %
                               {'monitor_name': monitor_name,
                                'overlap': ', '.join(overlap)})
                    LOG.warn(msg)
                    bad_monitors.append(monitor_name)
            except Exception as ex:
                msg = (_LW("Monitor %(monitor_name)s cannot be used: %(ex)s") %
                          {'monitor_name': monitor_name, 'ex': ex})
                LOG.warn(msg)
                bad_monitors.append(monitor_name)

        if bad_monitors:
            LOG.warning(_LW("The following monitors have been disabled: %s"),
                        ', '.join(bad_monitors))

        return good_monitors
Ejemplo n.º 31
0
    def add_to_instance(self, context, instance, security_group_name):
        """Add security group to the instance."""

        neutron = neutronapi.get_client(context)
        try:
            security_group_id = neutronv20.find_resourceid_by_name_or_id(
                neutron, 'security_group',
                security_group_name,
                context.project_id)
        except n_exc.NeutronClientNoUniqueMatch as e:
            raise exception.NoUniqueMatch(six.text_type(e))
        except n_exc.NeutronClientException as e:
            exc_info = sys.exc_info()
            if e.status_code == 404:
                msg = (_("Security group %(name)s is not found for "
                         "project %(project)s") %
                       {'name': security_group_name,
                        'project': context.project_id})
                self.raise_not_found(msg)
            else:
                LOG.exception(_LE("Neutron Error:"))
                raise exc_info[0], exc_info[1], exc_info[2]
        params = {'device_id': instance.uuid}
        try:
            ports = neutron.list_ports(**params).get('ports')
        except n_exc.NeutronClientException:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE("Neutron Error:"))

        if not ports:
            msg = (_("instance_id %s could not be found as device id on"
                   " any ports") % instance.uuid)
            self.raise_not_found(msg)

        for port in ports:
            if not self._has_security_group_requirements(port):
                LOG.warning(_LW("Cannot add security group %(name)s to "
                                "%(instance)s since the port %(port_id)s "
                                "does not meet security requirements"),
                            {'name': security_group_name,
                             'instance': instance.uuid,
                             'port_id': port['id']})
                raise exception.SecurityGroupCannotBeApplied()
            if 'security_groups' not in port:
                port['security_groups'] = []
            port['security_groups'].append(security_group_id)
            updated_port = {'security_groups': port['security_groups']}
            try:
                LOG.info(_LI("Adding security group %(security_group_id)s to "
                             "port %(port_id)s"),
                         {'security_group_id': security_group_id,
                          'port_id': port['id']})
                neutron.update_port(port['id'], {'port': updated_port})
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.exception(_LE("Neutron Error:"))
Ejemplo n.º 32
0
 def _add_instance_fault(self, error, exc_info):
     LOG.warning(_LW("Ignoring error while configuring instance with "
                     "agent: %s"), error,
                 instance=self.instance, exc_info=True)
     try:
         ctxt = context.get_admin_context()
         compute_utils.add_instance_fault_from_exc(
                 ctxt, self.instance, error, exc_info=exc_info)
     except Exception:
         LOG.debug("Error setting instance fault.", exc_info=True)
Ejemplo n.º 33
0
 def _find_unused(self, devices):
     for device in devices:
         if not os.path.exists(os.path.join('/sys/block/', device, 'pid')):
             if not os.path.exists('/var/lock/qemu-nbd-%s' % device):
                 return device
             else:
                 LOG.error(_LE('NBD error - previous umount did not '
                               'cleanup /var/lock/qemu-nbd-%s.'), device)
     LOG.warning(_LW('No free nbd devices'))
     return None
Ejemplo n.º 34
0
 def _get_tuple_for_domain(cls, lobj, domain):
     entry = lobj.search_s(CONF.ldap_dns_base_dn, ldap.SCOPE_SUBTREE,
                           '(associatedDomain=%s)' % utils.utf8(domain))
     if not entry:
         return None
     if len(entry) > 1:
         LOG.warning(_LW("Found multiple matches for domain "
                         "%(domain)s.\n%(entry)s"),
                     domain, entry)
     return entry[0]