def _check_prerequisites(self): """Sanity checks before attempting to mount SOFS.""" # config is mandatory config = CONF.libvirt.scality_sofs_config if not config: msg = _LW("Value required for 'scality_sofs_config'") LOG.warn(msg) raise exception.NovaException(msg) # config can be a file path or a URL, check it if urlparse.urlparse(config).scheme == '': # turn local path into URL config = 'file://%s' % config try: urllib2.urlopen(config, timeout=5).close() except urllib2.URLError as e: msg = _LW("Cannot access 'scality_sofs_config': %s") % e LOG.warn(msg) raise exception.NovaException(msg) # mount.sofs must be installed if not os.access('/sbin/mount.sofs', os.X_OK): msg = _LW("Cannot execute /sbin/mount.sofs") LOG.warn(msg) raise exception.NovaException(msg)
def _set_session_sql_mode(dbapi_con, connection_rec, connection_proxy, sql_mode=None): """Set the sql_mode session variable. MySQL supports several server modes. The default is None, but sessions may choose to enable server modes like TRADITIONAL, ANSI, several STRICT_* modes and others. Note: passing in '' (empty string) for sql_mode clears the SQL mode for the session, overriding a potentially set server default. Passing in None (the default) makes this a no-op, meaning if a server-side SQL mode is set, it still applies. """ cursor = dbapi_con.cursor() if sql_mode is not None: cursor.execute("SET SESSION sql_mode = %s", [sql_mode]) # Check against the real effective SQL mode. Even when unset by # our own config, the server may still be operating in a specific # SQL mode as set by the server configuration cursor.execute("SHOW VARIABLES LIKE 'sql_mode'") row = cursor.fetchone() if row is None: LOG.warning(_LW("Unable to detect effective SQL mode")) return realmode = row[1] LOG.info(_LI("MySQL server mode set to %s") % realmode) # 'TRADITIONAL' mode enables several other modes, so # we need a substring match here if not ("TRADITIONAL" in realmode.upper() or "STRICT_ALL_TABLES" in realmode.upper()): LOG.warning(_LW("MySQL SQL mode is '%s', " "consider enabling TRADITIONAL or STRICT_ALL_TABLES") % realmode)
def authorize_console(self, context, token, console_type, host, port, internal_access_path, instance_uuid): token_dict = {'token': token, 'instance_uuid': instance_uuid, 'console_type': console_type, 'host': host, 'port': port, 'internal_access_path': internal_access_path, 'last_activity_at': time.time()} data = jsonutils.dumps(token_dict) # We need to log the warning message if the token is not cached # successfully, because the failure will cause the console for # instance to not be usable. if not self.mc.set(token.encode('UTF-8'), data, CONF.console_token_ttl): LOG.warning(_LW("Token: %(token)s failed to save into memcached."), {'token': token}) tokens = self._get_tokens_for_instance(instance_uuid) # Remove the expired tokens from cache. for tok in tokens: token_str = self.mc.get(tok.encode('UTF-8')) if not token_str: tokens.remove(tok) tokens.append(token) if not self.mc.set(instance_uuid.encode('UTF-8'), jsonutils.dumps(tokens)): LOG.warning(_LW("Instance: %(instance_uuid)s failed to save " "into memcached"), {'instance_uuid': instance_uuid}) LOG.audit(_("Received Token: %(token)s, %(token_dict)s"), {'token': token, 'token_dict': token_dict})
def set_tcp_keepalive(sock, tcp_keepalive=True, tcp_keepidle=None, tcp_keepalive_interval=None, tcp_keepalive_count=None): """Set values for tcp keepalive parameters This function configures tcp keepalive parameters if users wish to do so. :param tcp_keepalive: Boolean, turn on or off tcp_keepalive. If users are not sure, this should be True, and default values will be used. :param tcp_keepidle: time to wait before starting to send keepalive probes :param tcp_keepalive_interval: time between successive probes, once the initial wait time is over :param tcp_keepalive_count: number of probes to send before the connection is killed """ # NOTE(praneshp): Despite keepalive being a tcp concept, the level is # still SOL_SOCKET. This is a quirk. if isinstance(tcp_keepalive, bool): sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, tcp_keepalive) else: raise TypeError("tcp_keepalive must be a boolean") if not tcp_keepalive: return # These options aren't available in the OS X version of eventlet, # Idle + Count * Interval effectively gives you the total timeout. if tcp_keepidle is not None: if hasattr(socket, 'TCP_KEEPIDLE'): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, tcp_keepidle) else: LOG.warning(_LW('tcp_keepidle not available on your system')) if tcp_keepalive_interval is not None: if hasattr(socket, 'TCP_KEEPINTVL'): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, tcp_keepalive_interval) else: LOG.warning(_LW('tcp_keepintvl not available on your system')) if tcp_keepalive_count is not None: if hasattr(socket, 'TCP_KEEPCNT'): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, tcp_keepalive_count) else: LOG.warning(_LW('tcp_keepknt not available on your system'))
def set_tcp_keepalive(sock, tcp_keepalive=True, tcp_keepidle=None, tcp_keepalive_interval=None, tcp_keepalive_count=None): """Set values for tcp keepalive parameters This function configures tcp keepalive parameters if users wish to do so. :param:tcp_keepalive: Boolean, turn on or off tcp_keepalive. If users are not sure, this should be True, and default values will be used :param:tcp_keepidle: time to wait before starting to send keepalive probes :param:tcp_keepalive_interval: time between successive probes, once the initial wait time is over :param:tcp_keepalive_count: number of probes to send before the connection is killed """ # NOTE(praneshp): Despite keepalive being a tcp concept, the level is # still SOL_SOCKET. This is a quirk. if isinstance(tcp_keepalive, bool): sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, tcp_keepalive) else: raise TypeError("tcp_keepalive must be a boolean") if not tcp_keepalive: return # These options aren't available in the OS X version of eventlet, # Idle + Count * Interval effectively gives you the total timeout. if tcp_keepidle is not None: if hasattr(socket, 'TCP_KEEPIDLE'): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, tcp_keepidle) else: LOG.warning(_LW('tcp_keepidle not available on your system')) if tcp_keepalive_interval is not None: if hasattr(socket, 'TCP_KEEPINTVL'): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, tcp_keepalive_interval) else: LOG.warning(_LW('tcp_keepintvl not available on your system')) if tcp_keepalive_count is not None: if hasattr(socket, 'TCP_KEEPCNT'): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, tcp_keepalive_count) else: LOG.warning(_LW('tcp_keepknt not available on your system'))
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = _ts() self.f(*self.args, **self.kw) end = _ts() if not self._running: break delay = end - start - interval if delay > 0: LOG.warn( _LW("task %(func_name)s run outlasted " "interval by %(delay).2f sec"), {"func_name": repr(self.f), "delay": delay}, ) greenthread.sleep(-delay if delay < 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_LE("in fixed duration looping call")) done.send_exception(*sys.exc_info()) return else: done.send(True)
def _mysql_check_effective_sql_mode(engine): """Logs a message based on the effective SQL mode for MySQL connections.""" realmode = _mysql_get_effective_sql_mode(engine) if realmode is None: LOG.warning(_LW('Unable to detect effective SQL mode')) return LOG.debug('MySQL server mode set to %s', realmode) # 'TRADITIONAL' mode enables several other modes, so # we need a substring match here if not ('TRADITIONAL' in realmode.upper() or 'STRICT_ALL_TABLES' in realmode.upper()): LOG.warning(_LW("MySQL SQL mode is '%s', " "consider enabling TRADITIONAL or STRICT_ALL_TABLES"), realmode)
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn( _LW('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_LE('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def _wait_child(self): try: # Don't block if no child processes have exited pid, status = os.waitpid(0, os.WNOHANG) if not pid: return None except OSError as exc: if exc.errno not in (errno.EINTR, errno.ECHILD): raise return None if os.WIFSIGNALED(status): sig = os.WTERMSIG(status) LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'), dict(pid=pid, sig=sig)) else: code = os.WEXITSTATUS(status) LOG.info(_LI('Child %(pid)s exited with status %(code)d'), dict(pid=pid, code=code)) if pid not in self.children: LOG.warning(_LW('pid %d not in child list'), pid) return None wrap = self.children.pop(pid) wrap.children.remove(pid) return wrap
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy): """Ensures that MySQL, PostgreSQL or DB2 connections are alive. Borrowed from: http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f """ cursor = dbapi_conn.cursor() try: ping_sql = 'select 1' if engine.name == 'ibm_db_sa': # DB2 requires a table expression ping_sql = 'select 1 from (values (1)) AS t1' cursor.execute(ping_sql) except Exception as ex: if engine.dialect.is_disconnect(ex, dbapi_conn, cursor): msg = _LW('Database server has gone away: %s') % ex LOG.warning(msg) # if the database server has gone away, all connections in the pool # have become invalid and we can safely close all of them here, # rather than waste time on checking of every single connection engine.dispose() # this will be handled by SQLAlchemy and will force it to create # a new connection and retry the original action raise sqla_exc.DisconnectionError(msg) else: raise
def destroy(self, context, instance, network_info, block_device_info=None, destroy_disks=True): """Destroy the specified instance, if it can be found. :param context: The security context. :param instance: The instance object. :param network_info: Instance network information. :param block_device_info: Instance block device information. Ignored by this driver. :param destroy_disks: Indicates if disks should be destroyed. Ignored by this driver. """ icli = client_wrapper.IronicClientWrapper() try: node = _validate_instance_and_node(icli, instance) except exception.InstanceNotFound: LOG.warning(_LW("Destroy called on non-existing instance %s."), instance['uuid']) # NOTE(deva): if nova.compute.ComputeManager._delete_instance() # is called on a non-existing instance, the only way # to delete it is to return from this method # without raising any exceptions. return if node.provision_state in (ironic_states.ACTIVE, ironic_states.DEPLOYFAIL, ironic_states.ERROR, ironic_states.DEPLOYWAIT): self._unprovision(icli, instance, node) self._cleanup_deploy(node, instance, network_info)
def disconnect_volume(self, connection_info, mount_device): """Detach the volume from instance_name.""" super(LibvirtFibreChannelVolumeDriver, self).disconnect_volume(connection_info, mount_device) # If this is a multipath device, we need to search again # and make sure we remove all the devices. Some of them # might not have shown up at attach time. if 'multipath_id' in connection_info['data']: multipath_id = connection_info['data']['multipath_id'] mdev_info = linuxscsi.find_multipath_device(multipath_id) devices = mdev_info['devices'] LOG.debug(_("devices to remove = %s"), devices) else: # only needed when multipath-tools work improperly devices = connection_info['data'].get('devices', []) LOG.warn( _LW("multipath-tools probably work improperly. " "devices to remove = %s.") % devices) # There may have been more than 1 device mounted # by the kernel for this volume. We have to remove # all of them for device in devices: linuxscsi.remove_device(device)
def _wait_for_device_discovery(host_devices, mount_device): tries = self.tries for device in host_devices: LOG.debug("Looking for Fibre Channel dev %(device)s", {'device': device}) if os.path.exists(device): self.host_device = device # get the /dev/sdX device. This is used # to find the multipath device. self.device_name = os.path.realpath(device) raise loopingcall.LoopingCallDone() if self.tries >= CONF.libvirt.num_iscsi_scan_tries: msg = _("Fibre Channel device not found.") raise exception.NovaException(msg) LOG.warn( _LW("Fibre volume not yet found at: %(mount_device)s. " "Will rescan & retry. Try number: %(tries)s"), { 'mount_device': mount_device, 'tries': tries }) linuxscsi.rescan_hosts(hbas) self.tries = self.tries + 1
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn(_LW('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_LE('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = _ts() self.f(*self.args, **self.kw) end = _ts() if not self._running: break delay = end - start - interval if delay > 0: LOG.warn( _LW('task %(func_name)s run outlasted ' 'interval by %(delay).2f sec'), { 'func_name': repr(self.f), 'delay': delay }) greenthread.sleep(-delay if delay < 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_LE('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def set_disk_host_resource(self, vm_name, controller_path, address, mounted_disk_path): disk_found = False vm = self._lookup_vm_check(vm_name) (disk_resources, volume_resources) = self._get_vm_disks(vm) for disk_resource in disk_resources + volume_resources: if (disk_resource.Parent == controller_path and self._get_disk_resource_address(disk_resource) == str(address)): if (disk_resource.HostResource and disk_resource.HostResource[0] != mounted_disk_path): LOG.debug( 'Updating disk host resource "%(old)s" to ' '"%(new)s"' % { 'old': disk_resource.HostResource[0], 'new': mounted_disk_path }) disk_resource.HostResource = [mounted_disk_path] self._modify_virt_resource(disk_resource, vm.path_()) disk_found = True break if not disk_found: LOG.warn( _LW('Disk not found on controller "%(controller_path)s" ' 'with address "%(address)s"'), { 'controller_path': controller_path, 'address': address })
def get_info(self, instance): """Get the current state and resource usage for this instance. If the instance is not found this method returns (a dictionary with) NOSTATE and all resources == 0. :param instance: the instance object. :returns: a dictionary containing: :state: the running state. One of :mod:`nova.compute.power_state`. :max_mem: (int) the maximum memory in KBytes allowed. :mem: (int) the memory in KBytes used by the domain. :num_cpu: (int) the number of CPUs. :cpu_time: (int) the CPU time used in nanoseconds. Always 0 for this driver. """ icli = client_wrapper.IronicClientWrapper() try: node = _validate_instance_and_node(icli, instance) except exception.InstanceNotFound: return {'state': map_power_state(ironic_states.NOSTATE), 'max_mem': 0, 'mem': 0, 'num_cpu': 0, 'cpu_time': 0 } memory_kib = int(node.properties.get('memory_mb', 0)) * 1024 if memory_kib == 0: LOG.warn(_LW("Warning, memory usage is 0 for " "%(instance)s on baremetal node %(node)s."), {'instance': instance['uuid'], 'node': instance['node']}) num_cpu = node.properties.get('cpus', 0) if num_cpu == 0: LOG.warn(_LW("Warning, number of cpus is 0 for " "%(instance)s on baremetal node %(node)s."), {'instance': instance['uuid'], 'node': instance['node']}) return {'state': map_power_state(node.power_state), 'max_mem': memory_kib, 'mem': memory_kib, 'num_cpu': num_cpu, 'cpu_time': 0 }
def _mysql_check_effective_sql_mode(engine): """Logs a message based on the effective SQL mode for MySQL connections.""" realmode = _mysql_get_effective_sql_mode(engine) if realmode is None: LOG.warning(_LW('Unable to detect effective SQL mode')) return LOG.debug('MySQL server mode set to %s', realmode) # 'TRADITIONAL' mode enables several other modes, so # we need a substring match here if not ('TRADITIONAL' in realmode.upper() or 'STRICT_ALL_TABLES' in realmode.upper()): LOG.warning( _LW("MySQL SQL mode is '%s', " "consider enabling TRADITIONAL or STRICT_ALL_TABLES"), realmode)
def get_fc_hbas(): """Get the Fibre Channel HBA information.""" out = None try: out, err = execute('systool', '-c', 'fc_host', '-v', run_as_root=True) except processutils.ProcessExecutionError as exc: # This handles the case where rootwrap is used # and systool is not installed # 96 = nova.cmd.rootwrap.RC_NOEXECFOUND: if exc.exit_code == 96: LOG.warn(_LW("systool is not installed")) return [] except OSError as exc: # This handles the case where rootwrap is NOT used # and systool is not installed if exc.errno == errno.ENOENT: LOG.warn(_LW("systool is not installed")) return [] if out is None: raise RuntimeError(_("Cannot find any Fibre Channel HBAs")) lines = out.split('\n') # ignore the first 2 lines lines = lines[2:] hbas = [] hba = {} lastline = None for line in lines: line = line.strip() # 2 newlines denotes a new hba port if line == '' and lastline == '': if len(hba) > 0: hbas.append(hba) hba = {} else: val = line.split('=') if len(val) == 2: key = val[0].strip().replace(" ", "") value = val[1].strip() hba[key] = value.replace('"', '') lastline = line return hbas
def _delete_device(self, device_path): device_name = os.path.basename(os.path.realpath(device_path)) delete_control = '/sys/block/' + device_name + '/device/delete' if os.path.exists(delete_control): # Copy '1' from stdin to the device delete control file utils.execute('cp', '/dev/stdin', delete_control, process_input='1', run_as_root=True) else: LOG.warn(_LW("Unable to delete volume device %s"), device_name)
def remove_rbd_volumes(pool, *names): """Remove one or more rbd volume.""" for name in names: rbd_remove = ['rbd', '-p', pool, 'rm', name] try: _run_rbd(*rbd_remove, attempts=3, run_as_root=True) except processutils.ProcessExecutionError: LOG.warn(_LW("rbd remove %(name)s in pool %(pool)s failed"), {'name': name, 'pool': pool})
def remove_rbd_volumes(pool, *names): """Remove one or more rbd volume.""" for name in names: rbd_remove = ['rbd', '-p', pool, 'rm', name] try: _run_rbd(*rbd_remove, attempts=3, run_as_root=True) except processutils.ProcessExecutionError: LOG.warn(_LW("rbd remove %(name)s in pool %(pool)s failed"), { 'name': name, 'pool': pool })
def __init__(self, virtapi, get_connection, **kwargs): super(NWFilterFirewall, self).__init__(virtapi) global libvirt if libvirt is None: try: libvirt = __import__('libvirt') except ImportError: LOG.warn(_LW("Libvirt module could not be loaded. " "NWFilterFirewall will not work correctly.")) self._libvirt_get_connection = get_connection self.static_filters_configured = False self.handle_security_groups = False
def connect_volume(self, connection_info, disk_info): """Connect the volume. Returns xml for libvirt.""" conf = vconfig.LibvirtConfigGuestDisk() conf.driver_name = virtutils.pick_disk_driver_name( self.connection.get_hypervisor_version(), self.is_block_dev ) conf.source_device = disk_info['type'] conf.driver_format = "raw" conf.driver_cache = "none" conf.target_dev = disk_info['dev'] conf.target_bus = disk_info['bus'] conf.serial = connection_info.get('serial') # Support for block size tuning data = {} if 'data' in connection_info: data = connection_info['data'] if 'logical_block_size' in data: conf.logical_block_size = data['logical_block_size'] if 'physical_block_size' in data: conf.physical_block_size = data['physical_block_size'] # Extract rate_limit control parameters if 'qos_specs' in data and data['qos_specs']: tune_opts = ['total_bytes_sec', 'read_bytes_sec', 'write_bytes_sec', 'total_iops_sec', 'read_iops_sec', 'write_iops_sec'] specs = data['qos_specs'] if isinstance(specs, dict): for k, v in specs.iteritems(): if k in tune_opts: new_key = 'disk_' + k setattr(conf, new_key, v) else: LOG.warn(_LW('Unknown content in connection_info/' 'qos_specs: %s'), specs) # Extract access_mode control parameters if 'access_mode' in data and data['access_mode']: access_mode = data['access_mode'] if access_mode in ('ro', 'rw'): conf.readonly = access_mode == 'ro' else: LOG.error(_LE('Unknown content in ' 'connection_info/access_mode: %s'), access_mode) raise exception.InvalidVolumeAccessMode( access_mode=access_mode) return conf
def connect_volume(self, connection_info, disk_info): """Connect the volume. Returns xml for libvirt.""" conf = vconfig.LibvirtConfigGuestDisk() conf.driver_name = virtutils.pick_disk_driver_name( self.connection._get_hypervisor_version(), self.is_block_dev) conf.source_device = disk_info['type'] conf.driver_format = "raw" conf.driver_cache = "none" conf.target_dev = disk_info['dev'] conf.target_bus = disk_info['bus'] conf.serial = connection_info.get('serial') # Support for block size tuning data = {} if 'data' in connection_info: data = connection_info['data'] if 'logical_block_size' in data: conf.logical_block_size = data['logical_block_size'] if 'physical_block_size' in data: conf.physical_block_size = data['physical_block_size'] # Extract rate_limit control parameters if 'qos_specs' in data and data['qos_specs']: tune_opts = [ 'total_bytes_sec', 'read_bytes_sec', 'write_bytes_sec', 'total_iops_sec', 'read_iops_sec', 'write_iops_sec' ] specs = data['qos_specs'] if isinstance(specs, dict): for k, v in specs.iteritems(): if k in tune_opts: new_key = 'disk_' + k setattr(conf, new_key, v) else: LOG.warn( _LW('Unknown content in connection_info/' 'qos_specs: %s'), specs) # Extract access_mode control parameters if 'access_mode' in data and data['access_mode']: access_mode = data['access_mode'] if access_mode in ('ro', 'rw'): conf.readonly = access_mode == 'ro' else: LOG.error( _LE('Unknown content in ' 'connection_info/access_mode: %s'), access_mode) raise exception.InvalidVolumeAccessMode( access_mode=access_mode) return conf
def _remove_multipath_device_descriptor(self, disk_descriptor): disk_descriptor = disk_descriptor.replace('/dev/mapper/', '') try: self._run_multipath(['-f', disk_descriptor], check_exit_code=[0, 1]) except processutils.ProcessExecutionError as exc: # Because not all cinder drivers need to remove the dev mapper, # here just logs a warning to avoid affecting those drivers in # exceptional cases. LOG.warn(_LW('Failed to remove multipath device descriptor ' '%(dev_mapper)s. Exception message: %(msg)s') % {'dev_mapper': disk_descriptor, 'msg': exc.message})
def authorize_console(self, context, token, console_type, host, port, internal_access_path, instance_uuid): token_dict = { 'token': token, 'instance_uuid': instance_uuid, 'console_type': console_type, 'host': host, 'port': port, 'internal_access_path': internal_access_path, 'last_activity_at': time.time() } data = jsonutils.dumps(token_dict) # We need to log the warning message if the token is not cached # successfully, because the failure will cause the console for # instance to not be usable. if not self.mc.set(token.encode('UTF-8'), data, CONF.console_token_ttl): LOG.warning(_LW("Token: %(token)s failed to save into memcached."), {'token': token}) tokens = self._get_tokens_for_instance(instance_uuid) # Remove the expired tokens from cache. for tok in tokens: token_str = self.mc.get(tok.encode('UTF-8')) if not token_str: tokens.remove(tok) tokens.append(token) if not self.mc.set(instance_uuid.encode('UTF-8'), jsonutils.dumps(tokens)): LOG.warning( _LW("Instance: %(instance_uuid)s failed to save " "into memcached"), {'instance_uuid': instance_uuid}) LOG.audit(_("Received Token: %(token)s, %(token_dict)s"), { 'token': token, 'token_dict': token_dict })
def _wait_for_device_discovery(aoedevpath, mount_device): tries = self.tries if os.path.exists(aoedevpath): raise loopingcall.LoopingCallDone() if self.tries >= CONF.libvirt.num_aoe_discover_tries: raise exception.NovaException(_("AoE device not found at %s") % (aoedevpath)) LOG.warn(_LW("AoE volume not yet found at: %(aoedevpath)s. " "Try number: %(tries)s"), {'aoedevpath': aoedevpath, 'tries': tries}) self._aoe_discover() self.tries = self.tries + 1
def _mount_sofs(self): config = CONF.libvirt.scality_sofs_config mount_path = CONF.libvirt.scality_sofs_mount_point sysdir = os.path.join(mount_path, 'sys') if not os.path.isdir(mount_path): utils.execute('mkdir', '-p', mount_path) if not os.path.isdir(sysdir): utils.execute('mount', '-t', 'sofs', config, mount_path, run_as_root=True) if not os.path.isdir(sysdir): msg = _LW("Cannot mount Scality SOFS, check syslog for errors") LOG.warn(msg) raise exception.NovaException(msg)
def _set_session_sql_mode(dbapi_con, connection_rec, connection_proxy, sql_mode=None): """Set the sql_mode session variable. MySQL supports several server modes. The default is None, but sessions may choose to enable server modes like TRADITIONAL, ANSI, several STRICT_* modes and others. Note: passing in '' (empty string) for sql_mode clears the SQL mode for the session, overriding a potentially set server default. Passing in None (the default) makes this a no-op, meaning if a server-side SQL mode is set, it still applies. """ cursor = dbapi_con.cursor() if sql_mode is not None: cursor.execute("SET SESSION sql_mode = %s", [sql_mode]) # Check against the real effective SQL mode. Even when unset by # our own config, the server may still be operating in a specific # SQL mode as set by the server configuration cursor.execute("SHOW VARIABLES LIKE 'sql_mode'") row = cursor.fetchone() if row is None: LOG.warning(_LW('Unable to detect effective SQL mode')) return realmode = row[1] LOG.info(_LI('MySQL server mode set to %s') % realmode) # 'TRADITIONAL' mode enables several other modes, so # we need a substring match here if not ('TRADITIONAL' in realmode.upper() or 'STRICT_ALL_TABLES' in realmode.upper()): LOG.warning( _LW("MySQL SQL mode is '%s', " "consider enabling TRADITIONAL or STRICT_ALL_TABLES") % realmode)
def remove_rbd_volumes(pool, *names): """Remove one or more rbd volume.""" for name in names: # NOTE(nic): the rbd command supports two methods for # specifying a pool name: the "-p" flag, and using the volume # name notation "pool_name/volume_name" # The latter method supercedes the former, so to guard # against slashes in the volume name confusing things, always # use the path notation rbd_remove = ('rbd', 'rm', os.path.join(pool, name)) try: _run_rbd(*rbd_remove, attempts=3, run_as_root=True) except processutils.ProcessExecutionError: LOG.warn(_LW("rbd remove %(name)s in pool %(pool)s failed"), {'name': name, 'pool': pool})
def _remove_multipath_device_descriptor(self, disk_descriptor): disk_descriptor = disk_descriptor.replace('/dev/mapper/', '') try: self._run_multipath(['-f', disk_descriptor], check_exit_code=[0, 1]) except processutils.ProcessExecutionError as exc: # Because not all cinder drivers need to remove the dev mapper, # here just logs a warning to avoid affecting those drivers in # exceptional cases. LOG.warn( _LW('Failed to remove multipath device descriptor ' '%(dev_mapper)s. Exception message: %(msg)s') % { 'dev_mapper': disk_descriptor, 'msg': exc.message })
def create_volume(vg, lv, size, sparse=False): """Create LVM image. Creates a LVM image with given size. :param vg: existing volume group which should hold this image :param lv: name for this image (logical volume) :size: size of image in bytes :sparse: create sparse logical volume """ vg_info = get_volume_group_info(vg) free_space = vg_info['free'] def check_size(vg, lv, size): if size > free_space: raise RuntimeError( _('Insufficient Space on Volume Group %(vg)s.' ' Only %(free_space)db available,' ' but %(size)db required' ' by volume %(lv)s.') % { 'vg': vg, 'free_space': free_space, 'size': size, 'lv': lv }) if sparse: preallocated_space = 64 * units.Mi check_size(vg, lv, preallocated_space) if free_space < size: LOG.warn( _LW('Volume group %(vg)s will not be able' ' to hold sparse volume %(lv)s.' ' Virtual volume size is %(size)db,' ' but free space on volume group is' ' only %(free_space)db.'), { 'vg': vg, 'free_space': free_space, 'size': size, 'lv': lv }) cmd = ('lvcreate', '-L', '%db' % preallocated_space, '--virtualsize', '%db' % size, '-n', lv, vg) else: check_size(vg, lv, size) cmd = ('lvcreate', '-L', '%db' % size, '-n', lv, vg) utils.execute(*cmd, run_as_root=True, attempts=3)
def _get_cpu_allocation_ratio(self, host_state, filter_properties): # TODO(uni): DB query in filter is a performance hit, especially for # system with lots of hosts. Will need a general solution here to fix # all filters with aggregate DB call things. aggregate_vals = utils.aggregate_values_from_db( filter_properties['context'], host_state.host, 'cpu_allocation_ratio') try: ratio = utils.validate_num_values( aggregate_vals, CONF.cpu_allocation_ratio, cast_to=float) except ValueError as e: LOG.warning(_LW("Could not decode cpu_allocation_ratio: '%s'"), e) ratio = CONF.cpu_allocation_ratio return ratio
def _get_cpu_allocation_ratio(self, host_state, filter_properties): # TODO(uni): DB query in filter is a performance hit, especially for # system with lots of hosts. Will need a general solution here to fix # all filters with aggregate DB call things. aggregate_vals = utils.aggregate_values_from_db( filter_properties['context'], host_state.host, 'cpu_allocation_ratio') try: ratio = utils.validate_num_values(aggregate_vals, CONF.cpu_allocation_ratio, cast_to=float) except ValueError as e: LOG.warning(_LW("Could not decode cpu_allocation_ratio: '%s'"), e) ratio = CONF.cpu_allocation_ratio return ratio
def _wait_for_device_discovery(aoedevpath, mount_device): tries = self.tries if os.path.exists(aoedevpath): raise loopingcall.LoopingCallDone() if self.tries >= CONF.libvirt.num_aoe_discover_tries: raise exception.NovaException( _("AoE device not found at %s") % (aoedevpath)) LOG.warn( _LW("AoE volume not yet found at: %(aoedevpath)s. " "Try number: %(tries)s"), { 'aoedevpath': aoedevpath, 'tries': tries }) self._aoe_discover() self.tries = self.tries + 1
def _mount_glusterfs(self, mount_path, glusterfs_share, options=None, ensure=False): """Mount glusterfs export to mount path.""" utils.execute('mkdir', '-p', mount_path) gluster_cmd = ['mount', '-t', 'glusterfs'] if options is not None: gluster_cmd.extend(options.split(' ')) gluster_cmd.extend([glusterfs_share, mount_path]) try: utils.execute(*gluster_cmd, run_as_root=True) except processutils.ProcessExecutionError as exc: if ensure and 'already mounted' in exc.message: LOG.warn(_LW("%s is already mounted"), glusterfs_share) else: raise
def create_lvm_image(vg, lv, size, sparse=False): """Create LVM image. Creates a LVM image with given size. :param vg: existing volume group which should hold this image :param lv: name for this image (logical volume) :size: size of image in bytes :sparse: create sparse logical volume """ vg_info = get_volume_group_info(vg) free_space = vg_info['free'] def check_size(vg, lv, size): if size > free_space: raise RuntimeError(_('Insufficient Space on Volume Group %(vg)s.' ' Only %(free_space)db available,' ' but %(size)db required' ' by volume %(lv)s.') % {'vg': vg, 'free_space': free_space, 'size': size, 'lv': lv}) if sparse: preallocated_space = 64 * units.Mi check_size(vg, lv, preallocated_space) if free_space < size: LOG.warn(_LW('Volume group %(vg)s will not be able' ' to hold sparse volume %(lv)s.' ' Virtual volume size is %(size)db,' ' but free space on volume group is' ' only %(free_space)db.'), {'vg': vg, 'free_space': free_space, 'size': size, 'lv': lv}) cmd = ('lvcreate', '-L', '%db' % preallocated_space, '--virtualsize', '%db' % size, '-n', lv, vg) else: check_size(vg, lv, size) cmd = ('lvcreate', '-L', '%db' % size, '-n', lv, vg) execute(*cmd, run_as_root=True, attempts=3)
def _age_and_verify_cached_images(self, context, all_instances, base_dir): LOG.debug('Verify base images') # Determine what images are on disk because they're in use for img in self.used_images: fingerprint = hashlib.sha1(img).hexdigest() LOG.debug('Image id %(id)s yields fingerprint %(fingerprint)s', {'id': img, 'fingerprint': fingerprint}) for result in self._find_base_file(base_dir, fingerprint): base_file, image_small, image_resized = result self._handle_base_image(img, base_file) if not image_small and not image_resized: self.originals.append(base_file) # Elements remaining in unexplained_images might be in use inuse_backing_images = self._list_backing_images() for backing_path in inuse_backing_images: if backing_path not in self.active_base_files: self.active_base_files.append(backing_path) # Anything left is an unknown base image for img in self.unexplained_images: LOG.warn(_LW('Unknown base file: %s'), img) self.removable_base_files.append(img) # Dump these lists if self.active_base_files: LOG.info(_LI('Active base files: %s'), ' '.join(self.active_base_files)) if self.corrupt_base_files: LOG.info(_LI('Corrupt base files: %s'), ' '.join(self.corrupt_base_files)) if self.removable_base_files: LOG.info(_LI('Removable base files: %s'), ' '.join(self.removable_base_files)) if self.remove_unused_base_images: for base_file in self.removable_base_files: self._remove_base_file(base_file) # That's it LOG.debug('Verification complete')
def _mount_nfs(self, mount_path, nfs_share, options=None, ensure=False): """Mount nfs export to mount path.""" utils.execute('mkdir', '-p', mount_path) # Construct the NFS mount command. nfs_cmd = ['mount', '-t', 'nfs'] if CONF.libvirt.nfs_mount_options is not None: nfs_cmd.extend(['-o', CONF.libvirt.nfs_mount_options]) if options is not None: nfs_cmd.extend(options.split(' ')) nfs_cmd.extend([nfs_share, mount_path]) try: utils.execute(*nfs_cmd, run_as_root=True) except processutils.ProcessExecutionError as exc: if ensure and 'already mounted' in exc.message: LOG.warn(_LW("%s is already mounted"), nfs_share) else: raise
def _list_backing_images(self): """List the backing images currently in use.""" inuse_images = [] for ent in os.listdir(CONF.instances_path): if ent in self.instance_names: LOG.debug('%s is a valid instance name', ent) disk_path = os.path.join(CONF.instances_path, ent, 'disk') if os.path.exists(disk_path): LOG.debug('%s has a disk file', ent) try: backing_file = virtutils.get_disk_backing_file( disk_path) except processutils.ProcessExecutionError: # (for bug 1261442) if not os.path.exists(disk_path): LOG.debug('Failed to get disk backing file: %s', disk_path) continue else: raise LOG.debug('Instance %(instance)s is backed by ' '%(backing)s', {'instance': ent, 'backing': backing_file}) if backing_file: backing_path = os.path.join( CONF.instances_path, CONF.image_cache_subdirectory_name, backing_file) if backing_path not in inuse_images: inuse_images.append(backing_path) if backing_path in self.unexplained_images: LOG.warn(_LW('Instance %(instance)s is using a ' 'backing file %(backing)s which ' 'does not appear in the image ' 'service'), {'instance': ent, 'backing': backing_file}) self.unexplained_images.remove(backing_path) return inuse_images
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy): """Ensures that MySQL and DB2 connections are alive. Borrowed from: http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f """ cursor = dbapi_conn.cursor() try: ping_sql = 'select 1' if engine.name == 'ibm_db_sa': # DB2 requires a table expression ping_sql = 'select 1 from (values (1)) AS t1' cursor.execute(ping_sql) except Exception as ex: if engine.dialect.is_disconnect(ex, dbapi_conn, cursor): msg = _LW('Database server has gone away: %s') % ex LOG.warning(msg) raise sqla_exc.DisconnectionError(msg) else: raise
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy): """Ensures that MySQL and DB2 connections are alive. Borrowed from: http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f """ cursor = dbapi_conn.cursor() try: ping_sql = "select 1" if engine.name == "ibm_db_sa": # DB2 requires a table expression ping_sql = "select 1 from (values (1)) AS t1" cursor.execute(ping_sql) except Exception as ex: if engine.dialect.is_disconnect(ex, dbapi_conn, cursor): msg = _LW("Database server has gone away: %s") % ex LOG.warning(msg) raise sqla_exc.DisconnectionError(msg) else: raise
def disconnect_volume(self, connection_info, mount_device): """Detach the volume from instance_name.""" super(LibvirtFibreChannelVolumeDriver, self).disconnect_volume(connection_info, mount_device) # If this is a multipath device, we need to search again # and make sure we remove all the devices. Some of them # might not have shown up at attach time. if "multipath_id" in connection_info["data"]: multipath_id = connection_info["data"]["multipath_id"] mdev_info = linuxscsi.find_multipath_device(multipath_id) devices = mdev_info["devices"] LOG.debug(_("devices to remove = %s"), devices) else: # only needed when multipath-tools work improperly devices = connection_info["data"].get("devices", []) LOG.warn(_LW("multipath-tools probably work improperly. " "devices to remove = %s.") % devices) # There may have been more than 1 device mounted # by the kernel for this volume. We have to remove # all of them for device in devices: linuxscsi.remove_device(device)
def _wait_for_device_discovery(host_devices, mount_device): tries = self.tries for device in host_devices: LOG.debug("Looking for Fibre Channel dev %(device)s", {'device': device}) if os.path.exists(device): self.host_device = device # get the /dev/sdX device. This is used # to find the multipath device. self.device_name = os.path.realpath(device) raise loopingcall.LoopingCallDone() if self.tries >= CONF.libvirt.num_iscsi_scan_tries: msg = _("Fibre Channel device not found.") raise exception.NovaException(msg) LOG.warn(_LW("Fibre volume not yet found at: %(mount_device)s. " "Will rescan & retry. Try number: %(tries)s"), {'mount_device': mount_device, 'tries': tries}) linuxscsi.rescan_hosts(hbas) self.tries = self.tries + 1
def host_passes(self, host_state, filter_properties): """Return True if host has sufficient CPU cores.""" instance_type = filter_properties.get('instance_type') if not instance_type: return True if not host_state.vcpus_total: # Fail safe LOG.warning(_LW("VCPUs not set; assuming CPU collection broken")) return True instance_vcpus = instance_type['vcpus'] cpu_allocation_ratio = self._get_cpu_allocation_ratio( host_state, filter_properties) vcpus_total = host_state.vcpus_total * cpu_allocation_ratio # Only provide a VCPU limit to compute if the virt driver is reporting # an accurate count of installed VCPUs. (XenServer driver does not) if vcpus_total > 0: host_state.limits['vcpu'] = vcpus_total return (vcpus_total - host_state.vcpus_used) >= instance_vcpus
def host_passes(self, host_state, filter_properties): """Return True if host has sufficient CPU cores.""" instance_type = filter_properties.get('instance_type') if not instance_type: return True if not host_state.vcpus_total: # Fail safe LOG.warning(_LW("VCPUs not set; assuming CPU collection broken")) return True instance_vcpus = instance_type['vcpus'] cpu_allocation_ratio = self._get_cpu_allocation_ratio(host_state, filter_properties) vcpus_total = host_state.vcpus_total * cpu_allocation_ratio # Only provide a VCPU limit to compute if the virt driver is reporting # an accurate count of installed VCPUs. (XenServer driver does not) if vcpus_total > 0: host_state.limits['vcpu'] = vcpus_total return (vcpus_total - host_state.vcpus_used) >= instance_vcpus
def destroy(self, context, instance, network_info, block_device_info=None, destroy_disks=True): """Destroy the specified instance, if it can be found. :param context: The security context. :param instance: The instance object. :param network_info: Instance network information. :param block_device_info: Instance block device information. Ignored by this driver. :param destroy_disks: Indicates if disks should be destroyed. Ignored by this driver. """ icli = client_wrapper.IronicClientWrapper() try: node = validate_instance_and_node(icli, instance) except exception.InstanceNotFound: LOG.warning(_LW("Destroy called on non-existing instance %s."), instance['uuid']) # NOTE(deva): if nova.compute.ComputeManager._delete_instance() # is called on a non-existing instance, the only way # to delete it is to return from this method # without raising any exceptions. return if node.provision_state in (ironic_states.ACTIVE, ironic_states.DEPLOYFAIL, ironic_states.ERROR, ironic_states.DEPLOYWAIT): self._unprovision(icli, instance, node) self._cleanup_deploy(node, instance, network_info)