Beispiel #1
0
 def __init__(self, name=None):
     self._name = name or config.get("containers", "network_name")
     self._gw = config.get("containers", "network_gateway")
     self._nic = config.get("containers", "network_interface")
     self._subnet = config.get("containers", "network_subnet")
     self._mask = config.getint("containers", "network_mask")
     self._existing = False
Beispiel #2
0
 def __init__(self, name=None):
     self._name = name or config.get(
         'containers', 'network_name')
     self._gw = config.get('containers', 'network_gateway')
     self._nic = config.get('containers', 'network_interface')
     self._subnet = config.get('containers', 'network_subnet')
     self._mask = config.getint('containers', 'network_mask')
     self._existing = False
Beispiel #3
0
 def _loadBindingJsonRpc(self):
     from BindingJsonRpc import BindingJsonRpc
     from Bridge import DynamicBridge
     ip = config.get('addresses', 'management_ip')
     port = config.getint('addresses', 'json_port')
     truststore_path = None
     if config.getboolean('vars', 'ssl'):
         truststore_path = config.get('vars', 'trust_store_path')
     conf = [('tcp', {"ip": ip, "port": port})]
     self.bindings['json'] = BindingJsonRpc(DynamicBridge(), conf,
                                            truststore_path)
Beispiel #4
0
 def _createSSLContext(self):
     sslctx = None
     if config.getboolean('vars', 'ssl'):
         truststore_path = config.get('vars', 'trust_store_path')
         key_file = os.path.join(truststore_path, 'keys', 'vdsmkey.pem')
         cert_file = os.path.join(truststore_path, 'certs', 'vdsmcert.pem')
         ca_cert = os.path.join(truststore_path, 'certs', 'cacert.pem')
         protocol = config.get('vars', 'ssl_protocol')
         sslctx = SSLContext(cert_file, key_file, ca_cert=ca_cert,
                             protocol=protocol)
     return sslctx
Beispiel #5
0
 def _prepareBindings(self):
     self.bindings = {}
     xmlrpc_params = {
         'ip': config.get('addresses', 'management_ip'),
         'port': config.get('addresses', 'management_port'),
         'ssl': config.getboolean('vars', 'ssl'),
         'vds_responsiveness_timeout':
             config.getint('vars', 'vds_responsiveness_timeout'),
         'trust_store_path': config.get('vars', 'trust_store_path'),
         'default_bridge': config.get("vars", "default_bridge"), }
     self.bindings['xmlrpc'] = BindingXMLRPC(self, self.log, xmlrpc_params)
Beispiel #6
0
 def start(self):
     if _JSONRPC_ENABLED:
         requestQueues = config.get('addresses', 'request_queues')
         requestQueue = requestQueues.split(",")[0]
         self.vdscli = jsonrpcvdscli.connect(requestQueue, xml_compat=False)
     else:
         self.vdscli = vdscli.connect()
     self.netinfo = self._get_netinfo()
     if config.get('vars', 'net_persistence') == 'unified':
         self.config = RunningConfig()
     else:
         self.config = None
Beispiel #7
0
 def _loadBindingXMLRPC(self):
     from BindingXMLRPC import BindingXMLRPC
     ip = config.get('addresses', 'management_ip')
     xmlrpc_port = config.get('addresses', 'management_port')
     use_ssl = config.getboolean('vars', 'ssl')
     resp_timeout = config.getint('vars', 'vds_responsiveness_timeout')
     truststore_path = config.get('vars', 'trust_store_path')
     default_bridge = config.get("vars", "default_bridge")
     self.bindings['xmlrpc'] = BindingXMLRPC(self, self.log, ip,
                                             xmlrpc_port, use_ssl,
                                             resp_timeout, truststore_path,
                                             default_bridge)
Beispiel #8
0
Datei: cpu.py Projekt: EdDev/vdsm
def start():
    """ Starts application wide CPU profiling """
    global _profiler
    if is_enabled():
        with _lock:
            if _profiler:
                raise UsageError('CPU profiler is already running')
            _profiler = Profiler(
                config.get('devel', 'cpu_profile_filename'),
                format=config.get('devel', 'cpu_profile_format'),
                clock=config.get('devel', 'cpu_profile_clock'),
                builtins=config.getboolean('devel', 'cpu_profile_builtins'),
                threads=True)
            _profiler.start()
Beispiel #9
0
 def start(self):
     self.vdscli = vdscli.connect()
     self.netinfo = self._get_netinfo()
     if config.get('vars', 'net_persistence') == 'unified':
         self.config = RunningConfig()
     else:
         self.config = None
Beispiel #10
0
    def wrapped(*args, **kwargs):
        if not config.get('vars', 'net_persistence') == 'unified':
            return func(*args, **kwargs)

        # Get args and kwargs in a single dictionary
        attrs = kwargs.copy()
        attrs.update(dict(zip(spec.args, args)))

        isolatedCommand = attrs.get('configurator') is None
        # Detect if we are running an isolated command, i.e., a command that is
        # not called as part of composed API operation like setupNetworks or
        # editNetwork, but rather as its own API verb. This is necessary in
        # order to maintain behavior of the addNetwork and delNetwork API verbs
        if isolatedCommand:
            attrs['configurator'] = configurator = ConfiguratorClass()
            configurator.begin()
        else:
            configurator = attrs['configurator']

        ret = func(**attrs)

        nics = attrs.pop('nics', None)
        # Bond config handled in configurator so that operations only touching
        # bonds don't need special casing and the logic of this remains simpler
        if not attrs.get('bonding'):
            if nics:
                attrs['nic'], = nics

        if func.__name__ == 'delNetwork':
            configurator.runningConfig.removeNetwork(attrs.pop('network'))
        else:
            configurator.runningConfig.setNetwork(attrs.pop('network'), attrs)
        if isolatedCommand:  # Commit the no-rollback transaction.
            configurator.commit()
        return ret
Beispiel #11
0
    def __init__(self, vm, dst='', dstparams='',
                 mode=MODE_REMOTE, method=METHOD_ONLINE,
                 tunneled=False, dstqemu='', abortOnError=False,
                 consoleAddress=None, compressed=False,
                 autoConverge=False, **kwargs):
        self.log = vm.log
        self._vm = vm
        self._dst = dst
        self._mode = mode
        if method != METHOD_ONLINE:
            self.log.warning(
                'migration method %s is deprecated, forced to "online"',
                method)
        self._dstparams = dstparams
        self._machineParams = {}
        self._tunneled = utils.tobool(tunneled)
        self._abortOnError = utils.tobool(abortOnError)
        self._consoleAddress = consoleAddress
        self._dstqemu = dstqemu
        self._downtime = kwargs.get('downtime') or \
            config.get('vars', 'migration_downtime')
        self._maxBandwidth = int(
            kwargs.get('maxBandwidth') or
            config.getint('vars', 'migration_max_bandwidth')
        )
        self._autoConverge = autoConverge
        self._compressed = compressed
        self.status = {
            'status': {
                'code': 0,
                'message': 'Migration in progress'}}
        self._progress = 0
        threading.Thread.__init__(self)
        self._preparingMigrationEvt = True
        self._migrationCanceledEvt = False
        self._monitorThread = None
        self._destServer = None

        progress_timeout = config.getint('vars', 'migration_progress_timeout')

        self._convergence_schedule = {
            'init': [],
            'stalling': [
                {
                    'limit': progress_timeout,
                    'action': {
                        'name': CONVERGENCE_SCHEDULE_SET_ABORT,
                        'params': []
                    }
                }
            ]
        }

        self._use_convergence_schedule = False
        if 'convergenceSchedule' in kwargs:
            self._convergence_schedule = kwargs.get('convergenceSchedule')
            self._use_convergence_schedule = True

        self.log.debug('convergence schedule set to: %s',
                       str(self._convergence_schedule))
Beispiel #12
0
 def __init__(self, configApplier):
     self.configApplier = configApplier
     self._libvirtAdded = set()
     self.unifiedPersistence = \
         config.get('vars', 'persistence') == 'unified'
     if self.unifiedPersistence:
         self.runningConfig = RunningConfig()
Beispiel #13
0
 def __init__(self, hostID, poolID, monitorInterval=2):
     self._hostID = str(hostID)
     self._poolID = str(poolID)
     self._monitorInterval = monitorInterval
     self._spmStorageDir = config.get('irs', 'repository')
     self._queue = Queue.Queue(-1)
     #  *** IMPORTANT NOTE: The SPM's inbox is the HSMs' outbox and vice
     #                      versa *** #
     self._inbox = os.path.join(self._spmStorageDir, self._poolID,
                                "mastersd", sd.DOMAIN_META_DATA, "outbox")
     if not os.path.exists(self._inbox):
         self.log.error("HSM_Mailbox create failed - inbox %s does not "
                        "exist" % repr(self._inbox))
         raise RuntimeError("HSM_Mailbox create failed - inbox %s does not "
                            "exist" % repr(self._inbox))
     self._outbox = os.path.join(self._spmStorageDir, self._poolID,
                                 "mastersd", sd.DOMAIN_META_DATA, "inbox")
     if not os.path.exists(self._outbox):
         self.log.error("HSM_Mailbox create failed - outbox %s does not "
                        "exist" % repr(self._outbox))
         raise RuntimeError("HSM_Mailbox create failed - outbox %s does "
                            "not exist" % repr(self._outbox))
     self._mailman = HSM_MailMonitor(self._inbox, self._outbox, hostID,
                                     self._queue, monitorInterval)
     self.log.debug('HSM_MailboxMonitor created for pool %s' % self._poolID)
Beispiel #14
0
    def _setupVdsConnection(self):
        if self.hibernating:
            return

        # FIXME: The port will depend on the binding being used.
        # This assumes xmlrpc
        hostPort = vdscli.cannonizeHostPort(
            self._dst,
            config.get('addresses', 'management_port'))
        self.remoteHost, _ = hostPort.rsplit(':', 1)

        if config.getboolean('vars', 'ssl'):
            self.destServer = vdscli.connect(
                hostPort,
                useSSL=True,
                TransportClass=kaxmlrpclib.TcpkeepSafeTransport)
        else:
            self.destServer = kaxmlrpclib.Server('http://' + hostPort)
        self.log.debug('Destination server is: ' + hostPort)
        try:
            self.log.debug('Initiating connection with destination')
            status = self.destServer.getVmStats(self._vm.id)
            if not status['status']['code']:
                self.log.error("Machine already exists on the destination")
                self.status = errCode['exist']
        except Exception:
            self.log.error("Error initiating connection", exc_info=True)
            self.status = errCode['noConPeer']
Beispiel #15
0
 def start(self):
     self.vdscli = vdscli.connect()
     self.netinfo = self._get_netinfo()
     if config.get("vars", "net_persistence") == "unified":
         self.config = RunningConfig()
     else:
         self.config = None
Beispiel #16
0
 def _loadBindingJsonRpc(self):
     from BindingJsonRpc import BindingJsonRpc
     from Bridge import DynamicBridge
     ip = config.get('addresses', 'management_ip')
     port = config.getint('addresses', 'json_port')
     conf = [('tcp', {"ip": ip, "port": port})]
     self.bindings['json'] = BindingJsonRpc(DynamicBridge(), conf)
Beispiel #17
0
 def __init__(self, inRollback=False):
     self.unifiedPersistence = \
         config.get('vars', 'net_persistence') == 'unified'
     super(Ifcfg, self).__init__(ConfigWriter(self.unifiedPersistence),
                                 inRollback)
     if self.unifiedPersistence:
         self.runningConfig = RunningConfig()
Beispiel #18
0
def _get_persistence_module():
    persistence = config.get('vars', 'net_persistence')
    if persistence == 'unified':
        return netconfpersistence
    else:
        from .configurators import ifcfg
        return ifcfg
Beispiel #19
0
def _del_broken_network(network, netAttr, configurator):
    """
    Adapts the network information of broken networks so that they can be
    deleted via _del_network.
    """
    _netinfo = CachingNetInfo()
    _netinfo.networks[network] = netAttr
    _netinfo.networks[network]['dhcpv4'] = False

    if _netinfo.networks[network]['bridged']:
        try:
            nets = configurator.runningConfig.networks
        except AttributeError:
            nets = {}  # ifcfg does not need net definitions
        _netinfo.networks[network]['ports'] = _persistence.configuredPorts(
            nets, network)
    elif not os.path.exists('/sys/class/net/' + netAttr['iface']):
        # Bridgeless broken network without underlying device
        libvirt.removeNetwork(network)
        if config.get('vars', 'net_persistence') == 'unified':
            configurator.runningConfig.removeNetwork(network)
        return
    canonicalize_networks({network: _netinfo.networks[network]})
    _del_network(network, configurator, bypass_validation=True,
                 _netinfo=_netinfo)
Beispiel #20
0
 def __init__(self, net_info, inRollback=False):
     is_unipersistence = config.get('vars', 'net_persistence') == 'unified'
     super(Ifcfg, self).__init__(ConfigWriter(),
                                 net_info,
                                 is_unipersistence,
                                 inRollback)
     self.runningConfig = RunningConfig()
Beispiel #21
0
 def wrapped(network, configurator, **kwargs):
     if config.get('vars', 'net_persistence') == 'unified':
         if func.__name__ == '_del_network':
             configurator.runningConfig.removeNetwork(network)
         else:
             configurator.runningConfig.setNetwork(network, kwargs)
     return func(network, configurator, **kwargs)
Beispiel #22
0
 def _backup(self, filename):
     self._atomicBackup(filename)
     if config.get('vars', 'net_persistence') != 'unified':
         self._persistentBackup(filename)
     elif not self._ownedIfcfg(filename):
         # Backup non-VDSM network devices (BZ#1188251)
         self._persistentBackup(filename)
Beispiel #23
0
 def _send_notification(self, message):
     try:
         self.bindings['jsonrpc'].reactor.server.send(
             message, config.get('addresses', 'event_queue'))
     except KeyError:
         self.log.warning("Attempt to send an event when jsonrpc binding"
                          " not available")
Beispiel #24
0
    def _setupVdsConnection(self):
        if self.hibernating:
            return

        hostPort = vdscli.cannonizeHostPort(
            self._dst,
            config.getint('addresses', 'management_port'))
        self.remoteHost, port = hostPort.rsplit(':', 1)

        try:
            client = self._createClient(port)
            requestQueues = config.get('addresses', 'request_queues')
            requestQueue = requestQueues.split(",")[0]
            self._destServer = jsonrpcvdscli.connect(requestQueue, client)
            self.log.debug('Initiating connection with destination')
            self._destServer.ping()

        except (JsonRpcBindingsError, JsonRpcNoResponseError):
            if config.getboolean('vars', 'ssl'):
                self._destServer = vdscli.connect(
                    hostPort,
                    useSSL=True,
                    TransportClass=kaxmlrpclib.TcpkeepSafeTransport)
            else:
                self._destServer = kaxmlrpclib.Server('http://' + hostPort)

        self.log.debug('Destination server is: ' + hostPort)
Beispiel #25
0
 def __init__(self, vm, dst='', dstparams='',
              mode=MODE_REMOTE, method=METHOD_ONLINE,
              tunneled=False, dstqemu='', abortOnError=False,
              compressed=False, autoConverge=False, **kwargs):
     self.log = vm.log
     self._vm = vm
     self._dst = dst
     self._mode = mode
     if method != METHOD_ONLINE:
         self.log.warning(
             'migration method %s is deprecated, forced to "online"',
             method)
     self._dstparams = dstparams
     self._machineParams = {}
     self._tunneled = utils.tobool(tunneled)
     self._abortOnError = utils.tobool(abortOnError)
     self._dstqemu = dstqemu
     self._downtime = kwargs.get('downtime') or \
         config.get('vars', 'migration_downtime')
     self._autoConverge = autoConverge
     self._compressed = compressed
     self.status = {
         'status': {
             'code': 0,
             'message': 'Migration in progress'}}
     self._progress = 0
     threading.Thread.__init__(self)
     self._preparingMigrationEvt = True
     self._migrationCanceledEvt = False
     self._monitorThread = None
Beispiel #26
0
    def __init__(self, irs, log, scheduler):
        """
        Initialize the (single) clientIF instance

        :param irs: a Dispatcher object to be used as this object's irs.
        :type irs: :class:`storage.dispatcher.Dispatcher`
        :param log: a log object to be used for this object's logging.
        :type log: :class:`logging.Logger`
        """
        self.vmContainerLock = threading.Lock()
        self._networkSemaphore = threading.Semaphore()
        self._shutdownSemaphore = threading.Semaphore()
        self.irs = irs
        if self.irs:
            self._contEIOVmsCB = partial(clientIF.contEIOVms, proxy(self))
            self.irs.registerDomainStateChangeCallback(self._contEIOVmsCB)
        self.log = log
        self._recovery = True
        self.channelListener = Listener(self.log)
        self._generationID = str(uuid.uuid4())
        self.mom = None
        self.bindings = {}
        self._broker_client = None
        self._subscriptions = defaultdict(list)
        self._scheduler = scheduler
        if _glusterEnabled:
            self.gluster = gapi.GlusterApi(self, log)
        else:
            self.gluster = None
        try:
            self.vmContainer = {}
            self._hostStats = sampling.HostStatsThread(
                sampling.host_samples)
            self._hostStats.start()
            self.lastRemoteAccess = 0
            self._enabled = True
            self._netConfigDirty = False
            self._prepareMOM()
            secret.clear()
            concurrent.thread(self._recoverThread, name='clientIFinit').start()
            self.channelListener.settimeout(
                config.getint('vars', 'guest_agent_timeout'))
            self.channelListener.start()
            self.threadLocal = threading.local()
            self.threadLocal.client = ''

            host = config.get('addresses', 'management_ip')
            port = config.getint('addresses', 'management_port')

            self._createAcceptor(host, port)
            self._prepareXMLRPCBinding()
            self._prepareJSONRPCBinding()
            self._connectToBroker()
        except:
            self.log.error('failed to init clientIF, '
                           'shutting down storage dispatcher')
            if self.irs:
                self.irs.prepareForShutdown()
            raise
Beispiel #27
0
 def __init__(self):
     self.vdscli = vdscli.connect()
     self.netinfo = \
         netinfo.NetInfo(self.vdscli.getVdsCapabilities()['info'])
     if config.get('vars', 'net_persistence') == 'unified':
         self.config = RunningConfig()
     else:
         self.config = None
Beispiel #28
0
 def __init__(self,
              tpSize=config.getint('irs', 'thread_pool_size'),
              waitTimeout=3,
              maxTasks=config.getint('irs', 'max_tasks')):
     self.storage_repository = config.get('irs', 'repository')
     self.tp = ThreadPool(tpSize, waitTimeout, maxTasks)
     self._tasks = {}
     self._unqueuedTasks = []
Beispiel #29
0
def _getPersistenceModule():
    persistence = config.get("vars", "net_persistence")
    if persistence == "unified":
        return netconfpersistence
    else:
        from .configurators import ifcfg

        return ifcfg
Beispiel #30
0
 def __init__(self, path=None):
     self._path = (
         config.get('sampling', 'collectd_sock_path')
         if path is None else path
     )
     self._sock = None
     self._fobjr = None
     self._fobjw = None
Beispiel #31
0
    def setPolicyParameters(self, key_value_store):
        # mom.setNamedPolicy will raise an exception on failure.

        # Prepare in-memory policy file with tuning variables
        # this might need to convert certain python types to proper MoM
        # policy language
        self._policy.update(key_value_store)

        # Python bool values are defined in 00-defines.policy so need no
        # conversion here
        policy_string = "\n".join(
            ["(set %s %r)" % (k, v) for k, v in self._policy.iteritems()])

        try:
            self._mom.setNamedPolicy(config.get("mom", "tuning_policy"),
                                     policy_string)
        except (AttributeError, socket.error):
            self.log.warning("MOM not available, Policy could not be set.")
Beispiel #32
0
    def _set_cache(self):
        # default
        self.cache = config.get('vars', 'qemu_drive_cache')

        # do we need overrides?
        if self.transientDisk:
            # Force the cache to be writethrough, which is qemu's default.
            # This is done to ensure that we don't ever use cache=none for
            # transient disks, since we create them in /var/run/vdsm which
            # may end up on tmpfs and don't support O_DIRECT, and qemu uses
            # O_DIRECT when cache=none and hence hotplug might fail with
            # error that one can take eternity to debug the reason behind it!
            self.cache = "writethrough"
        elif self.iface == 'virtio':
            try:
                self.cache = self.vm_custom['viodiskcache']
            except KeyError:
                pass  # Ignore if custom disk cache is missing
Beispiel #33
0
 def __init__(self,
              vm,
              dst='',
              dstparams='',
              mode=MODE_REMOTE,
              method=METHOD_ONLINE,
              tunneled=False,
              dstqemu='',
              abortOnError=False,
              consoleAddress=None,
              compressed=False,
              autoConverge=False,
              **kwargs):
     self.log = vm.log
     self._vm = vm
     self._dst = dst
     self._mode = mode
     if method != METHOD_ONLINE:
         self.log.warning(
             'migration method %s is deprecated, forced to "online"',
             method)
     self._dstparams = dstparams
     self._machineParams = {}
     self._tunneled = utils.tobool(tunneled)
     self._abortOnError = utils.tobool(abortOnError)
     self._consoleAddress = consoleAddress
     self._dstqemu = dstqemu
     self._downtime = kwargs.get('downtime') or \
         config.get('vars', 'migration_downtime')
     self._autoConverge = autoConverge
     self._compressed = compressed
     self.status = {
         'status': {
             'code': 0,
             'message': 'Migration in progress'
         }
     }
     self._progress = 0
     threading.Thread.__init__(self)
     self._preparingMigrationEvt = True
     self._migrationCanceledEvt = False
     self._monitorThread = None
Beispiel #34
0
    def __init__(self, log, **kwargs):
        if not kwargs.get('serial'):
            self.serial = kwargs.get('imageID'[-20:]) or ''
        self._path = None
        super(Drive, self).__init__(log, **kwargs)
        if not hasattr(self, 'vm_custom'):
            self.vm_custom = {}
        self.device = getattr(self, 'device', 'disk')
        # Keep sizes as int
        self.reqsize = int(kwargs.get('reqsize', '0'))  # Backward compatible
        self.truesize = int(kwargs.get('truesize', '0'))
        self.apparentsize = int(kwargs.get('apparentsize', '0'))
        self.name = makeName(self.iface, self.index)
        self.cache = config.get('vars', 'qemu_drive_cache')
        self.discard = kwargs.get('discard', False)

        self._blockDev = None  # Lazy initialized

        self._customize()
        self._setExtSharedState()
Beispiel #35
0
    def _legacy_convergence_schedule(self, max_downtime):
        # Simplified emulation of legacy non-scheduled migrations.
        if max_downtime is None:
            max_downtime = config.get('vars', 'migration_downtime')
        max_downtime = int(max_downtime)
        max_steps = config.getint('vars', 'migration_downtime_steps')
        downtimes = exponential_downtime(max_downtime, max_steps)

        def downtime_action(downtime):
            return {'params': [str(downtime)], 'name': 'setDowntime'}
        init = [downtime_action(next(downtimes))]
        stalling = []
        limit = 1
        for d in downtimes:
            stalling.append({'action': downtime_action(d), 'limit': limit})
            limit += 1
        stalling.append({'action': downtime_action(d), 'limit': 42})
        stalling.append({'action': {'params': [], 'name': 'abort'},
                         'limit': -1})
        return {'init': init, 'stalling': stalling}
Beispiel #36
0
def zero(device_path, size=None, task=_NullTask()):
    """
    Zero a block device.

    Arguments:
        device_path (str): Path to block device to wipe
        size (int): Number of bytes to write. If not specified, use the device
            size. Size must be aligned to `vdsm.storage.constants.BLOCK_SIZE`.
        task (`storage.task.Task`): Task running this operation. If specified,
            the zero operation will be aborted if the task is aborted.

    Raises:
        `vdsm.common.exception.ActionStopped` if the wipe was aborted
        `vdsm.storage.exception.VolumesZeroingError` if writing to storage
            failed.
        `vdsm.storage.exception.InvalidParameterException` if size is not
            aligned to `vdsm.storage.constants.BLOCK_SIZE`.
    """
    if size is None:
        # Always aligned to LVM extent size (128MiB).
        size = fsutils.size(device_path)
    elif size % sc.BLOCK_SIZE:
        raise se.InvalidParameterException("size", size)

    log.info("Zeroing device %s (size=%d)", device_path, size)
    with utils.stopwatch("Zero device %s" % device_path,
                         level=logging.INFO,
                         log=log):
        zero_method = config.get('irs', 'zero_method')
        try:
            if zero_method == "blkdiscard":
                _zero_blkdiscard(device_path, size, task)
            elif zero_method == "dd":
                _zero_dd(device_path, size, task)
            else:
                raise exception.InvalidConfiguration(
                    reason="Unsupported value for irs:zero_method",
                    zero_method=zero_method)
        except se.StorageException as e:
            raise se.VolumesZeroingError("Zeroing device %s failed: %s" %
                                         (device_path, e))
Beispiel #37
0
def __set_cpu_affinity():
    cpu_affinity = config.get('vars', 'cpu_affinity')
    if cpu_affinity == "":
        return

    online_cpus = taskset.online_cpus()

    log = logging.getLogger('vds')

    if len(online_cpus) == 1:
        log.debug('Only one cpu detected: affinity disabled')
        return

    if cpu_affinity.lower() == taskset.AUTOMATIC:
        cpu_set = frozenset((taskset.pick_cpu(online_cpus), ))
    else:
        cpu_set = frozenset(
            int(cpu.strip()) for cpu in cpu_affinity.split(","))

    log.info('VDSM will run with cpu affinity: %s', cpu_set)
    taskset.set(os.getpid(), cpu_set, all_tasks=True)
Beispiel #38
0
def _getConfiguratorClass():
    configurator = config.get('vars', 'net_configurator')
    if configurator == 'iproute2':
        from .configurators.iproute2 import Iproute2
        return Iproute2
    elif configurator == 'pyroute2':
        try:
            from .configurators.pyroute_two import PyrouteTwo
            return PyrouteTwo
        except ImportError:
            logging.error('pyroute2 library for %s configurator is missing. '
                          'Use ifcfg instead.', configurator)
            from .configurator.ifcfg import Ifcfg
            return Ifcfg

    else:
        if configurator != 'ifcfg':
            logging.warn('Invalid config for network configurator: %s. '
                         'Use ifcfg instead.', configurator)
        from .configurators.ifcfg import Ifcfg
        return Ifcfg
Beispiel #39
0
 def _maybe_connect_iser(self):
     """
     Tries to connect the storage server over iSER.
     This applies if 'iser' is in the configuration option
     'iscsi_default_ifaces'.
     """
     # FIXME: remove this method when iface selection is in higher interface
     try:
         self._iface.initiatorName
     except KeyError:
         ifaces = config.get('irs', 'iscsi_default_ifaces').split(',')
         if 'iser' in ifaces:
             orig_iface = self._iface
             self._iface = iscsi.IscsiInterface('iser')
             try:
                 self._connect_iscsi()
                 self._disconnect_iscsi()
             except:
                 log.warning(
                     "Cannot connect to storage over iSER, using original "
                     "iface %r", orig_iface)
                 self._iface = orig_iface
Beispiel #40
0
def restore(force):
    if not force and _nets_already_restored(NETS_RESTORED_MARK):
        logging.info('networks already restored. doing nothing.')
        return

    _restore_sriov_numvfs()
    unified = config.get('vars', 'net_persistence') == 'unified'
    logging.info('starting network restoration.')
    try:
        if unified:
            unified_restoration()
        else:
            ifcfg_restoration()
            _copy_persistent_over_running_config()
    except Exception:
        logging.exception('%s restoration failed.',
                          'unified' if unified else 'ifcfg')
        raise
    else:
        logging.info('restoration completed successfully.')

    touch_file(NETS_RESTORED_MARK)
Beispiel #41
0
 def __init__(self, inbox, outbox, hostID, queue, monitorInterval):
     # Save arguments
     tpSize = config.getint('irs', 'thread_pool_size') / 2
     waitTimeout = 3
     maxTasks = config.getint('irs', 'max_tasks')
     self.tp = ThreadPool(tpSize, waitTimeout, maxTasks)
     self._stop = False
     self._flush = False
     self._queue = queue
     self._activeMessages = {}
     self._monitorInterval = monitorInterval
     self._hostID = int(hostID)
     self._used_slots_array = [0] * MESSAGES_PER_MAILBOX
     self._outgoingMail = EMPTYMAILBOX
     self._incomingMail = EMPTYMAILBOX
     # TODO: add support for multiple paths (multiple mailboxes)
     self._spmStorageDir = config.get('irs', 'repository')
     self._inCmd = [constants.EXT_DD,
                    'if=' + str(inbox),
                    'iflag=direct,fullblock',
                    'bs=' + str(BLOCK_SIZE),
                    'count=' + str(BLOCKS_PER_MAILBOX),
                    'skip=' + str(self._hostID * BLOCKS_PER_MAILBOX)
                    ]
     self._outCmd = [constants.EXT_DD,
                     'of=' + str(outbox),
                     'iflag=fullblock',
                     'oflag=direct',
                     'conv=notrunc',
                     'bs=' + str(BLOCK_SIZE),
                     'seek=' + str(self._hostID * BLOCKS_PER_MAILBOX)
                     ]
     self._init = False
     self._initMailbox()  # Read initial mailbox state
     self._msgCounter = 0
     self._sendMail()  # Clear outgoing mailbox
     self._thread = concurrent.thread(self.run, name="mailbox.HSMMonitor",
                                      logger=self.log.name)
     self._thread.start()
Beispiel #42
0
def init_nets():
    persistence = config.get('vars', 'net_persistence')
    if persistence != 'unified':
        logging.info('Skipping: Unified persistence is not used.')
        return

    if _nets_already_restored(NETS_RESTORED_MARK):
        logging.info('Skipping: Networks were already restored.')
        return

    logging.info('Starting initial network setup.')

    persistent_config = PersistentConfig()

    nets = _persisted_ovs_entries(persistent_config.networks)
    logging.info('Restoring networks configuration: {}'.format(nets))
    _set_blocking_dhcp(nets)

    bonds = _persisted_ovs_entries(persistent_config.bonds)
    logging.info('Restoring bondings configuration: {}'.format(bonds))

    for net, attrs in six.iteritems(nets):
        with _try2execute('IPv6autoconf for {} failed.'.format(net)):
            netswitch.setup_ipv6autoconf({net: attrs})

    for net, attrs in six.iteritems(nets):
        with _try2execute('Setting links up for {} failed.'.format(net)):
            netswitch.set_ovs_links_up({net: attrs}, {}, {})

    for bond, attrs in six.iteritems(bonds):
        with _try2execute('Setting links up for {} failed.'.format(bond)):
            netswitch.set_ovs_links_up({}, {bond: attrs}, {})

    for net, attrs in six.iteritems(nets):
        with _try2execute('IP config for {} failed.'.format(net)):
            netswitch.setup_ovs_ip_config({net: attrs}, {})

    logging.info('Initial network setup is done.')
Beispiel #43
0
    def wrapped(*args, **kwargs):
        if not config.get('vars', 'net_persistence') == 'unified':
            return func(*args, **kwargs)

        # Get args and kwargs in a single dictionary
        attrs = kwargs.copy()
        attrs.update(dict(zip(spec.args, args)))

        isolatedCommand = attrs.get('configurator') is None
        # Detect if we are running an isolated command, i.e., a command that is
        # not called as part of composed API operation like setupNetworks or
        # editNetwork, but rather as its own API verb. This is necessary in
        # order to maintain behavior of the addNetwork and delNetwork API
        # verbs
        if isolatedCommand:
            attrs['configurator'] = configurator = ConfiguratorClass()
            configurator.begin()
        else:
            configurator = attrs['configurator']

        ret = func(**attrs)

        nics = attrs.pop('nics', None)
        # Bond config handled in configurator so that operations only touching
        # bonds don't need special casing and the logic of this remains simpler
        if not attrs.get('bonding'):
            if nics:
                attrs['nic'], = nics

        if func.__name__ == '_delNetwork':
            configurator.runningConfig.removeNetwork(attrs.pop('network'))
        else:
            configurator.runningConfig.setNetwork(attrs.pop('network'), attrs)
        if isolatedCommand:  # Commit the no-rollback transaction.
            configurator.commit()
        return ret
Beispiel #44
0
 def __init__(self, vm, dst='', dstparams='',
              mode='remote', method='online',
              tunneled=False, dstqemu='', abortOnError=False, **kwargs):
     self.log = vm.log
     self._vm = vm
     self._dst = dst
     self._mode = mode
     self._method = method
     self._dstparams = dstparams
     self._machineParams = {}
     self._tunneled = utils.tobool(tunneled)
     self._abortOnError = utils.tobool(abortOnError)
     self._dstqemu = dstqemu
     self._downtime = kwargs.get('downtime') or \
         config.get('vars', 'migration_downtime')
     self.status = {
         'status': {
             'code': 0,
             'message': 'Migration in progress'},
         'progress': 0}
     threading.Thread.__init__(self)
     self._preparingMigrationEvt = True
     self._migrationCanceledEvt = False
     self._monitorThread = None
Beispiel #45
0
def main(domain, event, phase, stdin=sys.stdin, stdout=sys.stdout):
    if not tobool(config.get('vars', 'migration_ovs_hook_enabled')):
        sys.exit(0)

    if event not in ('migrate', 'restore'):
        sys.exit(0)

    with _logging(_DEBUG_MODE) as log:
        if log:
            print('\nHook input args are:', domain, event, phase, file=log)

        tree = ET.parse(stdin)

        try:
            _process_domxml(tree)
        except:
            traceback.print_exc(file=log)
            raise

        tree.write(stdout)

        if log:
            tree.write(log)
            print('\nEnd of hook', file=log)
Beispiel #46
0
 def __init__(self, obj):
     self._obj = obj
     self.storage_repository = config.get('irs', 'repository')
     self._exposeFunctions(obj)
     self.log.info("Starting StorageDispatcher...")
Beispiel #47
0
    def __init__(self, irs, log, scheduler):
        """
        Initialize the (single) clientIF instance

        :param irs: a Dispatcher object to be used as this object's irs.
        :type irs: :class:`vdsm.storage.dispatcher.Dispatcher`
        :param log: a log object to be used for this object's logging.
        :type log: :class:`logging.Logger`
        """
        self.vmContainerLock = threading.Lock()
        self._networkSemaphore = threading.Semaphore()
        self._shutdownSemaphore = threading.Semaphore()
        self.irs = irs
        if self.irs:
            self._contEIOVmsCB = partial(clientIF.contEIOVms, proxy(self))
            self.irs.registerDomainStateChangeCallback(self._contEIOVmsCB)
        self.log = log
        self._recovery = True
        # TODO: The guest agent related code spreads around too much. There is
        # QemuGuestAgentPoller and ChannelListner here and then many instances
        # of GuestAgent per VM in vm.py. This should be refactored and
        # operated by single object. Idealy the distinction between what is
        # served by QEMU-GA and what is server by oVirt GA should not be
        # visible to the rest of the code.
        self.channelListener = Listener(self.log)
        self.qga_poller = QemuGuestAgentPoller(self, log, scheduler)
        self.mom = None
        self.servers = {}
        self._broker_client = None
        self._subscriptions = defaultdict(list)
        self._scheduler = scheduler
        self._unknown_vm_ids = set()
        if _glusterEnabled:
            self.gluster = gapi.GlusterApi()
        else:
            self.gluster = None
        try:
            self.vmContainer = {}
            self.lastRemoteAccess = 0
            self._enabled = True
            self._netConfigDirty = False
            self.mom = MomClient(config.get("mom", "socket_path"))
            self.mom.connect()
            secret.clear()
            concurrent.thread(self._recoverThread, name='vmrecovery').start()
            self.channelListener.settimeout(
                config.getint('vars', 'guest_agent_timeout'))
            self.channelListener.start()
            self.qga_poller.start()
            self.threadLocal = threading.local()
            self.threadLocal.client = ''

            host = config.get('addresses', 'management_ip')
            port = config.getint('addresses', 'management_port')

            # When IPv6 is not enabled, fallback to listen on IPv4 address
            try:
                self._createAcceptor(host, port)
            except socket.error as e:
                if e.errno == errno.EAFNOSUPPORT and host in ('::', '::1'):
                    fallback_host = '0.0.0.0'
                    self._createAcceptor(fallback_host, port)
                else:
                    raise

            self._prepareHttpServer()
            self._prepareJSONRPCServer()
            self._connectToBroker()
        except:
            self.log.error('failed to init clientIF, '
                           'shutting down storage dispatcher')
            if self.irs:
                self.irs.prepareForShutdown()
            raise
Beispiel #48
0
 def _send_notification(message):
     json_binding.reactor.server.send(
         message, config.get('addresses', 'event_queue'))
Beispiel #49
0
class StorageDomain(object):
    log = logging.getLogger("storage.StorageDomain")
    mdBackupVersions = config.get('irs', 'md_backup_versions')
    mdBackupDir = config.get('irs', 'md_backup_dir')
    manifestClass = StorageDomainManifest

    supported_block_size = ()
    # Default supported domain versions unless overidden
    supported_versions = sc.SUPPORTED_DOMAIN_VERSIONS

    def __init__(self, manifest):
        self._manifest = manifest
        # Do not allow attaching SD with an unsupported version
        self.validate_version(manifest.getVersion())
        self._lock = threading.Lock()

    # Life cycle

    def setup(self):
        """
        Called after storage domain is produced in the storage domain monitor.
        """

    def teardown(self):
        """
        Called after storage domain monitor finished and will never access the
        storage domain object.
        """

    # Other

    @property
    def sdUUID(self):
        return self._manifest.sdUUID

    @property
    def alignment(self):
        return self._manifest.alignment

    @property
    def block_size(self):
        return self._manifest.block_size

    @property
    def domaindir(self):
        return self._manifest.domaindir

    @property
    def _metadata(self):
        # TODO: Remove this once refactoring is complete and it has no callers
        return self._manifest._metadata

    @property
    def mountpoint(self):
        return self._manifest.mountpoint

    @property
    def manifest(self):
        return self._manifest

    def replaceMetadata(self, md):
        """
        Used by FormatConverter to replace the metadata reader/writer
        """
        self._manifest.replaceMetadata(md)

    def getMonitoringPath(self):
        return self._manifest.getMonitoringPath()

    def getVolumeSize(self, imgUUID, volUUID):
        """
        Return VolumeSize named tuple for specified volume.
        """
        return self._manifest.getVolumeSize(imgUUID, volUUID)

    @deprecated
    def getVSize(self, imgUUID, volUUID):
        """
        Return volume apparent size.

        Deprecated - use getVolumeSize().apparentsize instead.
        """
        return self._manifest.getVSize(imgUUID, volUUID)

    @deprecated
    def getVAllocSize(self, imgUUID, volUUID):
        """
        Return volume true size.

        Deprecated - use getVolumeSize().truesize instead.
        """
        return self._manifest.getVAllocSize(imgUUID, volUUID)

    def deleteImage(self, sdUUID, imgUUID, volsImgs):
        self._manifest.deleteImage(sdUUID, imgUUID, volsImgs)

    def purgeImage(self, sdUUID, imgUUID, volsImgs, discard):
        self._manifest.purgeImage(sdUUID, imgUUID, volsImgs, discard)

    def getAllImages(self):
        return self._manifest.getAllImages()

    def getAllVolumes(self):
        return self._manifest.getAllVolumes()

    def dump(self):
        return self._manifest.dump()

    def iter_volumes(self):
        """
        Iterate over all volumes.

        Yields:
            Volume instance
        """
        all_volumes = self.getAllVolumes()
        for vol_id, (img_ids, _) in six.iteritems(all_volumes):
            # The first img_id is the id of the template or the only image
            # where the volume id appears.
            img_id = img_ids[0]

            yield self.produceVolume(img_id, vol_id)

    def prepareMailbox(self):
        """
        This method has been introduced in order to prepare the mailbox
        on those domains where the metadata for the inbox and outbox
        wasn't allocated on creation.
        """

    @property
    def supportsMailbox(self):
        return True

    @property
    def supportsSparseness(self):
        """
        This property advertises whether the storage domain supports
        sparseness or not.
        """
        return self._manifest.supportsSparseness

    def recommends_unordered_writes(self, format):
        return self._manifest.recommends_unordered_writes(format)

    @property
    def oop(self):
        return self._manifest.oop

    def qcow2_compat(self):
        return self._manifest.qcow2_compat()

    def _makeClusterLock(self, domVersion=None):
        return self._manifest._makeDomainLock(domVersion)

    @classmethod
    def create(cls,
               sdUUID,
               domainName,
               domClass,
               typeSpecificArg,
               version,
               block_size=sc.BLOCK_SIZE_512,
               max_hosts=sc.HOSTS_4K_1M):
        """
        Create a storage domain. The initial status is unattached.
        The storage domain underlying storage must be visible (connected)
        at that point.
        """
        pass

    @classmethod
    def _validate_block_size(cls, block_size, version):
        """
        Validate that block size can be used with this storage domain class.
        """
        if version < 5:
            if block_size != sc.BLOCK_SIZE_512:
                raise se.InvalidParameterException('block_size', block_size)
        else:
            if block_size not in cls.supported_block_size:
                raise se.InvalidParameterException('block_size', block_size)

    @classmethod
    def _validate_storage_block_size(cls, block_size, storage_block_size):
        """
        Validate that block size matches storage block size, returning the
        block size that should be used with this storage.
        """
        # If we cannot detect the storage block size, use the user block size
        # or fallback to safe default.
        if storage_block_size == sc.BLOCK_SIZE_NONE:
            if block_size != sc.BLOCK_SIZE_AUTO:
                return block_size
            else:
                return sc.BLOCK_SIZE_512

        # If we can detect the storage block size and the user does not care
        # about it, use it.
        if block_size == sc.BLOCK_SIZE_AUTO:
            return storage_block_size

        # Otherwise verify that the user block size matches the storage block
        # size.
        if block_size == storage_block_size:
            return block_size

        raise se.StorageDomainBlockSizeMismatch(block_size, storage_block_size)

    @classmethod
    def validate_version(cls, version):
        if version not in cls.supported_versions:
            raise se.UnsupportedDomainVersion(version)

    def _registerResourceNamespaces(self):
        """
        Register resources namespaces and create
        factories for it.
        """
        # Register image resource namespace
        imageResourceFactory = \
            resourceFactories.ImageResourceFactory(self.sdUUID)
        imageResourcesNamespace = rm.getNamespace(sc.IMAGE_NAMESPACE,
                                                  self.sdUUID)
        try:
            rm.registerNamespace(imageResourcesNamespace, imageResourceFactory)
        except rm.NamespaceRegistered:
            self.log.debug("Resource namespace %s already registered",
                           imageResourcesNamespace)

        volumeResourcesNamespace = rm.getNamespace(sc.VOLUME_NAMESPACE,
                                                   self.sdUUID)
        try:
            rm.registerNamespace(volumeResourcesNamespace,
                                 rm.SimpleResourceFactory())
        except rm.NamespaceRegistered:
            self.log.debug("Resource namespace %s already registered",
                           volumeResourcesNamespace)

    def produceVolume(self, imgUUID, volUUID):
        """
        Produce a type specific Volume object
        """
        return self.getVolumeClass()(self.mountpoint, self.sdUUID, imgUUID,
                                     volUUID)

    @classmethod
    def validateCreateVolumeParams(cls,
                                   volFormat,
                                   srcVolUUID,
                                   diskType=None,
                                   preallocate=None):
        return cls.manifestClass.validateCreateVolumeParams(
            volFormat, srcVolUUID, diskType=diskType, preallocate=preallocate)

    def createVolume(self,
                     imgUUID,
                     capacity,
                     volFormat,
                     preallocate,
                     diskType,
                     volUUID,
                     desc,
                     srcImgUUID,
                     srcVolUUID,
                     initial_size=None):
        """
        Create a new volume
        """
        return self.getVolumeClass().create(self._getRepoPath(),
                                            self.sdUUID,
                                            imgUUID,
                                            capacity,
                                            volFormat,
                                            preallocate,
                                            diskType,
                                            volUUID,
                                            desc,
                                            srcImgUUID,
                                            srcVolUUID,
                                            initial_size=initial_size)

    def getMDPath(self):
        return self._manifest.getMDPath()

    def initSPMlease(self):
        return self._manifest.initDomainLock()

    def getVersion(self):
        return self._manifest.getVersion()

    def getFormat(self):
        return self._manifest.getFormat()

    def getPools(self):
        return self._manifest.getPools()

    def getIdsFilePath(self):
        return self._manifest.getIdsFilePath()

    def getLeasesFilePath(self):
        return self._manifest.getLeasesFilePath()

    def getReservedId(self):
        return self._manifest.getReservedId()

    def acquireHostId(self, hostId, wait=True):
        self._manifest.acquireHostId(hostId, wait)

    def releaseHostId(self, hostId, wait=True, unused=False):
        self._manifest.releaseHostId(hostId, wait, unused)

    def hasHostId(self, hostId):
        return self._manifest.hasHostId(hostId)

    def getHostStatus(self, hostId):
        return self._manifest.getHostStatus(hostId)

    def hasVolumeLeases(self):
        return self._manifest.hasVolumeLeases()

    def getVolumeLease(self, imgUUID, volUUID):
        return self._manifest.getVolumeLease(imgUUID, volUUID)

    def getClusterLease(self):
        return self._manifest.getDomainLease()

    def acquireClusterLock(self, hostID):
        self._manifest.acquireDomainLock(hostID)

    def releaseClusterLock(self):
        self._manifest.releaseDomainLock()

    def inquireClusterLock(self):
        return self._manifest.inquireDomainLock()

    def attach(self, spUUID):
        self.invalidateMetadata()
        pools = self.getPools()
        if spUUID in pools:
            self.log.warn("domain `%s` is already attached to pool `%s`",
                          self.sdUUID, spUUID)
            return

        if len(pools) > 0 and not self.isISO():
            raise se.StorageDomainAlreadyAttached(pools[0], self.sdUUID)

        pools.append(spUUID)
        self.setMetaParam(DMDK_POOLS, pools)

    def detach(self, spUUID):
        self.log.info('detaching storage domain %s from pool %s', self.sdUUID,
                      spUUID)
        self.invalidateMetadata()
        pools = self.getPools()
        try:
            pools.remove(spUUID)
        except ValueError:
            self.log.error(
                "Can't remove pool %s from domain %s pool list %s, "
                "it does not exist", spUUID, self.sdUUID, str(pools))
            return
        # Make sure that ROLE is not MASTER_DOMAIN (just in case)
        with self._metadata.transaction():
            self.changeRole(REGULAR_DOMAIN)
            self.setMetaParam(DMDK_POOLS, pools)
        # Last thing to do is to remove pool from domain
        # do any required cleanup

    # I personally don't think there is a reason to pack these
    # but I already changed too much.
    def changeLeaseParams(self, leaseParamPack):
        self.setMetaParams(leaseParamPack)

    def getLeaseParams(self):
        keys = [
            DMDK_LOCK_RENEWAL_INTERVAL_SEC, DMDK_LEASE_TIME_SEC,
            DMDK_IO_OP_TIMEOUT_SEC, DMDK_LEASE_RETRIES
        ]
        params = {}
        for key in keys:
            params[key] = self.getMetaParam(key)
        return params

    def getMasterDir(self):
        return os.path.join(self.domaindir, MASTER_FS_DIR)

    def invalidate(self):
        """
        Make sure that storage domain is inaccessible
        """
        pass

    def validateMaster(self):
        """Validate that the master storage domain is correct.
        """
        stat = {'mount': True, 'valid': True}
        if not self.isMaster():
            return stat

        # If the host is SPM then at this point masterFS should be mounted
        # In HSM case we can return False and then upper logic should handle it
        if not self.validateMasterMount():
            stat['mount'] = False
            return stat

        pdir = self.getVMsDir()
        if not self.oop.fileUtils.pathExists(pdir):
            stat['valid'] = False
            return stat
        pdir = self.getTasksDir()
        if not self.oop.fileUtils.pathExists(pdir):
            stat['valid'] = False
            return stat

        return stat

    def getVMsDir(self):
        return os.path.join(self.domaindir, MASTER_FS_DIR, VMS_DIR)

    def getTasksDir(self):
        return os.path.join(self.domaindir, MASTER_FS_DIR, TASKS_DIR)

    def getVMsList(self):
        vmsPath = self.getVMsDir()
        # find out VMs list
        VM_PATTERN = os.path.join(vmsPath, sc.UUID_GLOB_PATTERN)
        vms = self.oop.glob.glob(VM_PATTERN)
        vmList = [os.path.basename(i) for i in vms]
        self.log.info("vmList=%s", str(vmList))

        return vmList

    def getVMsInfo(self, vmList=None):
        """
        Get list of VMs with their info from the pool.
        If 'vmList' are given get info of these VMs only
        """

        vmsInfo = {}
        vmsPath = self.getVMsDir()

        # Find out relevant VMs
        if not vmList:
            vmList = self.getVMsList()

        self.log.info("vmList=%s", str(vmList))

        for vm in vmList:
            vm_path = os.path.join(vmsPath, vm)
            # If VM doesn't exist, ignore it silently
            if not os.path.exists(vm_path):
                continue
            ovfPath = os.path.join(vm_path, vm + '.ovf')
            if not os.path.lexists(ovfPath):
                raise se.MissingOvfFileFromVM(vm)

            ovf = codecs.open(ovfPath, encoding='utf8').read()
            vmsInfo[vm] = ovf

        return vmsInfo

    def createMasterTree(self):
        """
        Make tasks and vms directories on master directory.
        """
        vmsDir = self.getVMsDir()
        self.log.info("Creating vms dir: %s" % vmsDir)
        self.oop.fileUtils.createdir(vmsDir)
        tasksDir = self.getTasksDir()
        self.log.info("Creating task dir: %s" % tasksDir)
        self.oop.fileUtils.createdir(tasksDir)

    def activate(self):
        """
        Activate a storage domain that is already a member in a storage pool.
        """
        if self.isBackup():
            self.log.info(
                "Storage Domain %s is of type backup, "
                "adding master directory", self.sdUUID)
            self.mountMaster()
            self.createMasterTree()

    def _getRepoPath(self):
        return self._manifest.getRepoPath()

    def getImageDir(self, imgUUID):
        return self._manifest.getImageDir(imgUUID)

    getLinkBCImagePath = getImageDir

    def getImageRundir(self, imgUUID):
        return os.path.join(sc.P_VDSM_STORAGE, self.sdUUID, imgUUID)

    def getIsoDomainImagesDir(self):
        return self._manifest.getIsoDomainImagesDir()

    def supportsUnicode(self):
        return supportsUnicode(self.getVersion())

    def setDescription(self, descr):
        """
        Set storage domain description
            'descr' - domain description
        """
        self.log.info("sdUUID=%s descr=%s", self.sdUUID, descr)
        if not misc.isAscii(descr) and not self.supportsUnicode():
            raise se.UnicodeArgumentException()

        self.setMetaParam(DMDK_DESCRIPTION, descr)

    def getInfo(self):
        """
        Get storage domain info
        """
        info = {}
        info['uuid'] = self.sdUUID
        info['type'] = type2name(self.getMetaParam(DMDK_TYPE))
        info['class'] = class2name(self.getMetaParam(DMDK_CLASS))
        info['name'] = self.getMetaParam(DMDK_DESCRIPTION)
        info['role'] = self.getMetaParam(DMDK_ROLE)
        info['pool'] = self.getPools()
        info['version'] = str(self.getMetaParam(DMDK_VERSION))
        info['block_size'] = self.block_size
        info['alignment'] = self.alignment

        return info

    def getStats(self):
        """
        """
        pass

    def validateMasterMount(self):
        raise NotImplementedError

    def mountMaster(self):
        """
        Mount the master metadata file system. Should be called only by SPM.
        """
        pass

    def unmountMaster(self):
        """
        Unmount the master metadata file system. Should be called only by SPM.
        """
        pass

    def extendVolume(self, volumeUUID, size, isShuttingDown=None):
        pass

    def reduceVolume(self, imgUUID, volumeUUID, allowActive=False):
        pass

    @staticmethod
    def findDomainPath(sdUUID):
        raise NotImplementedError

    def getMetadata(self):
        return self._manifest.getMetadata()

    def setMetadata(self, newMetadata):
        with self._metadata.transaction():
            self._metadata.clear()
            self._metadata.update(newMetadata)

    def invalidateMetadata(self):
        self._metadata.invalidate()

    def getMetaParam(self, key):
        return self._manifest.getMetaParam(key)

    def getStorageType(self):
        return self._manifest.getStorageType()

    def getDomainRole(self):
        return self._manifest.getDomainRole()

    def getDomainClass(self):
        return self._manifest.getDomainClass()

    def getRemotePath(self):
        pass

    def templateRelink(self, imgUUID, volUUID):
        """
        Relink all hardlinks of the template 'volUUID' in all VMs based on it.
        No need to relink template for block domains.
        """
        self.log.debug(
            "Skipping relink of template, domain %s is not file "
            "based", self.sdUUID)

    def changeRole(self, newRole):
        # TODO: Move to a validator?
        if newRole not in [REGULAR_DOMAIN, MASTER_DOMAIN]:
            raise ValueError(newRole)

        self.setMetaParam(DMDK_ROLE, newRole)

    def setMetaParams(self, params):
        self._metadata.update(params)

    def setMetaParam(self, key, value):
        """
        Set new meta data KEY=VALUE pair
        """
        self.setMetaParams({key: value})

    def refreshDirTree(self):
        self._manifest.refreshDirTree()

    def refresh(self):
        self._manifest.refresh()

    def extend(self, devlist, force):
        pass

    def isMaster(self):
        return self.getMetaParam(DMDK_ROLE).capitalize() == MASTER_DOMAIN

    def initMaster(self, spUUID, leaseParams):
        self.invalidateMetadata()
        pools = self.getPools()

        if len(pools) > 1 or (len(pools) == 1 and pools[0] != spUUID):
            raise se.StorageDomainAlreadyAttached(pools[0], self.sdUUID)

        with self._metadata.transaction():
            self.changeLeaseParams(leaseParams)
            self.setMetaParam(DMDK_POOLS, [spUUID])
            self.changeRole(MASTER_DOMAIN)

    def isISO(self):
        return self._manifest.isISO()

    def isBackup(self):
        return self._manifest.isBackup()

    def isData(self):
        return self._manifest.isData()

    def imageGarbageCollector(self):
        """
        Image Garbage Collector
        remove the remnants of the removed images (they could be left sometimes
        (on NFS mostly) due to lazy file removal
        """
        pass

    def getVolumeClass(self):
        """
        Return a type specific volume generator object
        """
        raise NotImplementedError

    # External leases support

    @classmethod
    def supports_external_leases(cls, version):
        return cls.manifestClass.supports_external_leases(version)

    @classmethod
    def format_external_leases(cls,
                               lockspace,
                               path,
                               alignment=sc.ALIGNMENT_1M,
                               block_size=sc.BLOCK_SIZE_512):
        """
        Format the special xleases volume.

        Called when creating a new storage domain, or when upgrading storage
        domain to version 4.

        WARNING: destructive operation, must not be called on active external
        leases volume.

        TODO: should move to storage domain subclasses of each subclass can use
        its own backend.

        Must be called only on the SPM.
        """
        with cls.manifestClass.external_leases_backend(lockspace,
                                                       path) as backend:
            xlease.format_index(lockspace,
                                backend,
                                alignment=alignment,
                                block_size=block_size)

    @classmethod
    def is_block(cls):
        """
        Returns whether a Storage Domain is block-based
        """
        return False

    def external_leases_path(self):
        return self._manifest.external_leases_path()

    def create_external_leases(self):
        """
        Create the external leases special volume.

        Called during upgrade from version 3 to version 4.

        Must be called only on the SPM.
        """
        raise NotImplementedError

    def create_lease(self, lease_id):
        """
        Create an external lease on the external leases volume.

        Must be called only on the SPM.
        """
        with self._manifest.external_leases_lock.exclusive:
            with self._manifest.external_leases_volume() as vol:
                vol.add(lease_id)

    def delete_lease(self, lease_id):
        """
        Delete an external lease on the external leases volume.

        Must be called only on the SPM.
        """
        with self._manifest.external_leases_lock.exclusive:
            with self._manifest.external_leases_volume() as vol:
                vol.remove(lease_id)

    def rebuild_external_leases(self):
        """
        Rebuild the external leases volume index from volume contents.

        Must be called only on the SPM.
        """
        with self._manifest.external_leases_lock.exclusive:
            path = self.external_leases_path()
            backend = xlease.DirectFile(path)
            with utils.closing(backend):
                xlease.rebuild_index(self.sdUUID,
                                     backend,
                                     alignment=self._manifest.alignment,
                                     block_size=self._manifest.block_size)

    # Images

    def create_image(self, imgUUID):
        """
        Create placeholder for image's volumes
        """
        image_dir = self.getImageDir(imgUUID)
        if not os.path.isdir(image_dir):
            self.log.info("Create placeholder %s for image's volumes",
                          image_dir)
            task_name = "create image rollback: " + imgUUID
            recovery = task.Recovery(task_name, "sd", "StorageDomain",
                                     "create_image_rollback", [image_dir])
            vars.task.pushRecovery(recovery)
            os.mkdir(image_dir)
        return image_dir

    @classmethod
    def create_image_rollback(cls, task, image_dir):
        """
        Remove empty image folder
        """
        cls.log.info("create image rollback (image_dir=%s)", image_dir)
        if os.path.exists(image_dir):
            if not len(os.listdir(image_dir)):
                cls.log.info("Removing image directory %r", image_dir)
                fileUtils.cleanupdir(image_dir)
            else:
                cls.log.error(
                    "create image rollback: Cannot remove dirty "
                    "image (image_dir=%s)", image_dir)

    # Format conversion

    def convert_volumes_metadata(self, target_version):
        """
        Add new keys for version target_version to volumes metadata. The
        operation must be completed by calling finalize_volumes_metadata().

        Must be called before domain metadata was converted.

        Must be implemented by concrete storge domains.
        """
        raise NotImplementedError

    def convert_metadata(self, target_version):
        """
        Convert domain metadata to version target_version.

        Must be called after convert_volumes_metadata().
        """
        current_version = self.getVersion()

        if not (current_version == 4 and target_version == 5):
            raise RuntimeError(
                "Cannot convert domain {} from version {} to version {}".
                format(self.sdUUID, current_version, target_version))

        self.log.info(
            "Converting domain %s metadata from version %s to version %s",
            self.sdUUID, current_version, target_version)

        with self._metadata.transaction():
            self._metadata[DMDK_VERSION] = target_version

            # V4 domain never supported anything else, no need to probe
            # storage.
            self._metadata[DMDK_BLOCK_SIZE] = sc.BLOCK_SIZE_512
            self._metadata[DMDK_ALIGNMENT] = sc.ALIGNMENT_1M

            # Keys removed in v5, may exists in block storage domain.
            if DMDK_LOGBLKSIZE in self._metadata:
                del self._metadata[DMDK_LOGBLKSIZE]
            if DMDK_PHYBLKSIZE in self._metadata:
                del self._metadata[DMDK_PHYBLKSIZE]

    def finalize_volumes_metadata(self, target_version):
        """
        Rewrite volumes metadata, removing older keys kept during
        convert_volumes_metadata().

        Must be called after domain version was converted.

        Must be implemented by concrete storge domains.
        """
        raise NotImplementedError
Beispiel #50
0
 def _networkBackup(self, network):
     self._atomicNetworkBackup(network)
     if config.get('vars', 'net_persistence') != 'unified':
         self._persistentNetworkBackup(network)
Beispiel #51
0
from xmlrpclib import Transport, dumps, Fault
from contextlib import contextmanager
from itertools import product
from rpc.bindingxmlrpc import BindingXMLRPC, XmlDetector
from yajsonrpc.betterAsyncore import Reactor
from yajsonrpc.stompreactor import StompDetector, StompRpcClient
from yajsonrpc.stomp import (LEGACY_SUBSCRIPTION_ID_REQUEST,
                             LEGACY_SUBSCRIPTION_ID_RESPONSE)
from yajsonrpc import Notification
from protocoldetector import MultiProtocolAcceptor
from rpc.bindingjsonrpc import BindingJsonRpc
from vdsm.config import config
from vdsm import schedule
from vdsm import utils

if config.get('vars', 'ssl_implementation') == 'm2c':
    from integration.m2chelper import DEAFAULT_SSL_CONTEXT
else:
    from integration.sslhelper import DEAFAULT_SSL_CONTEXT

PERMUTATIONS = tuple(product((True, False), ("xml", "stomp")))

TIMEOUT = 3


class FakeClientIf(object):
    log = logging.getLogger("FakeClientIf")

    def __init__(self, binding, dest):
        self.threadLocal = threading.local()
        self.dest = dest
Beispiel #52
0
 prioritise_write_locks=1
 wait_for_locks=1
 use_lvmetad=0
}

backup {
 retain_min = 50
 retain_days = 0
}
"""

VAR_RUN_VDSM = constants.P_VDSM_RUN
VDSM_LVM_SYSTEM_DIR = os.path.join(VAR_RUN_VDSM, "lvm")
VDSM_LVM_CONF = os.path.join(VDSM_LVM_SYSTEM_DIR, "lvm.conf")

USER_DEV_LIST = filter(None, config.get("irs", "lvm_dev_whitelist").split(","))


def _buildFilter(devices):
    strippeds = set(d.strip() for d in devices)
    strippeds.discard('')  # Who has put a blank here?
    strippeds = sorted(strippeds)
    dmPaths = [dev.replace(r'\x', r'\\x') for dev in strippeds]
    filt = '|'.join(dmPaths)
    if len(filt) > 0:
        filt = "'a|" + filt + "|', "

    filt = "filter = [ " + filt + "'r|.*|' ]"
    return filt

Beispiel #53
0
        opts, args = getopt.getopt(sys.argv[1:], 'ht', ['help', 'test'])
    except getopt.GetoptError as err:
        print(str(err))
        _usage()
        sys.exit(1)

    for option, _ in opts:
        if option in ('-h', '--help'):
            _usage()
            sys.exit()
        elif option in ('-t', '--test'):
            _test()
            sys.exit()

    fake_kvm_support = config.getboolean('vars', 'fake_kvm_support')
    fake_kvm_arch = config.get('vars', 'fake_kvm_architecture')

    if fake_kvm_support:
        # Why here? So anyone can run -t and -h without setting the path.
        try:
            import hooking
        except ImportError:
            print('Could not import hooking module. You should only run this '
                  'script directly with option specified.')
            _usage()
            sys.exit(1)

        caps = hooking.read_json()
        _fake_caps_arch(caps, fake_kvm_arch)
        hooking.write_json(caps)
Beispiel #54
0
class GlusterFSConnection(MountConnection):

    # Run the mount command as a systemd service, so glusterfs helper run in
    # its own cgroup, and will not die when vdsm is terminated.
    #
    # - vdsm.slice
    #   - vdsm-glusterfs.slice
    #     - run-22137.scope
    #       - 22180 /usr/bin/glusterfs ...
    #     - run-21649.scope
    #       - 21692 /usr/bin/glusterfs ...
    #
    CGROUP = "vdsm-glusterfs"
    DIR = "glusterSD"
    ALLOWED_REPLICA_COUNTS = tuple(
        config.get("gluster", "allowed_replica_counts").split(","))

    def __init__(self,
                 id,
                 spec,
                 vfsType=None,
                 options="",
                 mountClass=mount.Mount):
        super(GlusterFSConnection, self).__init__(id,
                                                  spec,
                                                  vfsType=vfsType,
                                                  options=options,
                                                  mountClass=mountClass)
        self._volinfo = None
        self._volfileserver, volname = self._remotePath.split(":", 1)
        self._volname = volname.strip('/')
        self._have_gluster_cli = gluster_cli.exists()

    @property
    def options(self):
        backup_servers_option = ""
        if "backup-volfile-servers" in self._options:
            self.log.warn("Using user specified backup-volfile-servers option")
        elif self._have_gluster_cli:
            backup_servers_option = self._get_backup_servers_option()
        return ",".join(p for p in (self._options, backup_servers_option) if p)

    @property
    def volinfo(self):
        if self._volinfo is None:
            self._volinfo = self._get_gluster_volinfo()
        return self._volinfo

    def validate(self):
        if not self._have_gluster_cli:
            self.log.warning("Required glusterfs-cli package is missing "
                             "on this host. Note that automatic detection "
                             "of backup servers will be disabled! Please "
                             "install the missing package in order to "
                             "automatically mount gluster storage backup "
                             "servers")
            return

        if not self.volinfo:
            return

        replicaCount = self.volinfo['replicaCount']
        if replicaCount not in self.ALLOWED_REPLICA_COUNTS:
            self.log.warning(
                "Unsupported replica count (%s) for volume %r, "
                "please upgrade volume to replica 3", replicaCount,
                self._volname)

    def _get_backup_servers_option(self):
        if not self.volinfo:
            return ""

        servers = utils.unique(
            brick.split(":")[0] for brick in self.volinfo['bricks'])
        self.log.debug("Using bricks: %s", servers)
        if self._volfileserver in servers:
            servers.remove(self._volfileserver)
        else:
            self.log.warning(
                "gluster server %r is not in bricks %s, possibly "
                "mounting duplicate servers", self._volfileserver, servers)

        if not servers:
            return ""

        return "backup-volfile-servers=" + ":".join(servers)

    def _get_gluster_volinfo(self):
        try:
            superVdsmProxy = supervdsm.getProxy()
            volinfo = superVdsmProxy.glusterVolumeInfo(self._volname,
                                                       self._volfileserver)
            return volinfo[self._volname]
        except ge.GlusterCmdExecFailedException as e:
            self.log.warning("Failed to get volume info: %s", e)
            return {}
Beispiel #55
0
def setSafeNetworkConfig():
    """Declare current network configuration as 'safe'"""
    utils.execCmd([constants.EXT_VDSM_STORE_NET_CONFIG,
                  config.get('vars', 'net_persistence')])
Beispiel #56
0
class ImageResourceFactory(rm.SimpleResourceFactory):
    """
    This factory produce resources for images
    """
    storage_repository = config.get('irs', 'repository')
    # Resource timeouts are in seconds. It's written in ms in the config for
    # backward competability reasons
    resource_default_timeout = config.getint('irs',
                                             'prepare_image_timeout') / 1000.0

    def __init__(self, sdUUID):
        rm.SimpleResourceFactory.__init__(self)
        self.sdUUID = sdUUID
        self.volumeResourcesNamespace = rm.getNamespace(
            sc.VOLUME_NAMESPACE, self.sdUUID)

    def __getResourceCandidatesList(self, resourceName, lockType):
        """
        Return list of lock candidates (template and volumes)
        """
        volResourcesList = []
        template = None
        dom = sdCache.produce(sdUUID=self.sdUUID)
        # Get the list of the volumes
        repoPath = os.path.join(self.storage_repository, dom.getPools()[0])
        try:
            chain = image.Image(repoPath).getChain(sdUUID=self.sdUUID,
                                                   imgUUID=resourceName)
        except se.ImageDoesNotExistInSD:
            log.debug("Image %s does not exist in domain %s", resourceName,
                      self.sdUUID)
            return []

        # check if the chain is build above a template, or it is a standalone
        pvol = chain[0].getParentVolume()
        if pvol:
            template = pvol.volUUID
        elif chain[0].isShared():
            # Image of template itself,
            # with no other volumes in chain
            template = chain[0].volUUID
            del chain[:]

        volUUIDChain = [vol.volUUID for vol in chain]
        volUUIDChain.sort()

        # Activate all volumes in chain at once.
        # We will attempt to activate all volumes again down to the flow with
        # no consequence, since they are already active.
        # TODO Fix resource framework to hold images, instead of specific vols.
        # This assumes that chains can not spread into more than one SD.
        if dom.__class__.__name__ == "BlockStorageDomain":
            lvm.activateLVs(self.sdUUID, volUUIDChain)

        failed = False
        # Acquire template locks:
        # - 'lockType' for template's image itself
        # - Always 'shared' lock for image based on template
        try:
            if template:
                if len(volUUIDChain) > 0:
                    volRes = rm.acquireResource(
                        self.volumeResourcesNamespace,
                        template,
                        rm.SHARED,
                        timeout=self.resource_default_timeout)
                else:
                    volRes = rm.acquireResource(
                        self.volumeResourcesNamespace,
                        template,
                        lockType,
                        timeout=self.resource_default_timeout)
                volResourcesList.append(volRes)

            # Acquire 'lockType' volume locks
            for volUUID in volUUIDChain:
                volRes = rm.acquireResource(
                    self.volumeResourcesNamespace,
                    volUUID,
                    lockType,
                    timeout=self.resource_default_timeout)

                volResourcesList.append(volRes)
        except (rm.RequestTimedOutError, se.ResourceAcqusitionFailed) as e:
            log.debug("Cannot acquire volume resource (%s)", str(e))
            failed = True
            raise
        except Exception:
            log.debug("Cannot acquire volume resource", exc_info=True)
            failed = True
            raise
        finally:
            if failed:
                # Release already acquired template/volumes locks
                for volRes in volResourcesList:
                    volRes.release()

        return volResourcesList

    def createResource(self, resourceName, lockType):
        volResourcesList = self.__getResourceCandidatesList(
            resourceName, lockType)
        return ImageResource(volResourcesList)
Beispiel #57
0
    def getUUIDs(self):
        import blockSD
        import fileSD

        uuids = []
        for mod in (blockSD, fileSD):
            uuids.extend(mod.getStorageDomainsList())

        return uuids

    def refresh(self):
        with self._syncroot:
            lvm.invalidateCache()
            self.__domainCache.clear()

    def manuallyAddDomain(self, domain):
        with self._syncroot:
            self.__domainCache[domain.sdUUID] = domain

    def manuallyRemoveDomain(self, sdUUID):
        with self._syncroot:
            try:
                del self.__domainCache[sdUUID]
            except KeyError:
                pass


storage_repository = config.get('irs', 'repository')
sdCache = StorageDomainCache(storage_repository)
Beispiel #58
0
    def _prepareMOM(self):
        momconf = config.get("mom", "conf")

        self.mom = MomThread(momconf)
Beispiel #59
0
class SafeLease(object):
    log = logging.getLogger("storage.Safelease")

    lockUtilPath = config.get('irs', 'lock_util_path')
    lockCmd = config.get('irs', 'lock_cmd')
    freeLockCmd = config.get('irs', 'free_lock_cmd')

    def __init__(self, sdUUID, idsPath, lease, lockRenewalIntervalSec,
                 leaseTimeSec, leaseFailRetry, ioOpTimeoutSec, **kwargs):
        """
        Note: kwargs are not used. They were added to keep forward
              compatibility with more recent locks.
        """
        self._lock = threading.Lock()
        self._sdUUID = sdUUID
        self._idsPath = idsPath
        self._lease = lease
        self.setParams(lockRenewalIntervalSec, leaseTimeSec, leaseFailRetry,
                       ioOpTimeoutSec)

    @property
    def supports_multiple_leases(self):
        return False

    def initLock(self, lease):
        if lease != self._lease:
            raise MultipleLeasesNotSupported("init", lease)
        lockUtil = constants.EXT_SAFELEASE
        initCommand = [lockUtil, "release", "-f", lease.path, "0"]
        rc, out, err = misc.execCmd(initCommand, cwd=self.lockUtilPath)
        if rc != 0:
            self.log.warn("could not initialise spm lease (%s): %s", rc, out)
            raise se.ClusterLockInitError()

    def setParams(self, lockRenewalIntervalSec, leaseTimeSec, leaseFailRetry,
                  ioOpTimeoutSec):
        self._lockRenewalIntervalSec = lockRenewalIntervalSec
        self._leaseTimeSec = leaseTimeSec
        self._leaseFailRetry = leaseFailRetry
        self._ioOpTimeoutSec = ioOpTimeoutSec

    def getReservedId(self):
        return 1000

    def acquireHostId(self, hostId, wait):
        self.log.debug("Host id for domain %s successfully acquired (id: %s)",
                       self._sdUUID, hostId)

    def releaseHostId(self, hostId, wait, unused):
        self.log.debug("Host id for domain %s released successfully (id: %s)",
                       self._sdUUID, hostId)

    def hasHostId(self, hostId):
        return True

    def getHostStatus(self, hostId):
        return HOST_STATUS_UNAVAILABLE

    def acquire(self, hostID, lease):
        if lease != self._lease:
            raise MultipleLeasesNotSupported("acquire", lease)
        leaseTimeMs = self._leaseTimeSec * 1000
        ioOpTimeoutMs = self._ioOpTimeoutSec * 1000
        with self._lock:
            self.log.debug("Acquiring cluster lock for domain %s" %
                           self._sdUUID)

            lockUtil = self.getLockUtilFullPath()
            acquireLockCommand = subprocess.list2cmdline([
                lockUtil, "start", self._sdUUID, str(hostID),
                str(self._lockRenewalIntervalSec), str(lease.path),
                str(leaseTimeMs), str(ioOpTimeoutMs),
                str(self._leaseFailRetry), str(os.getpid())
            ])

            cmd = [constants.EXT_SU, misc.IOUSER, '-s', constants.EXT_SH, '-c',
                   acquireLockCommand]
            (rc, out, err) = misc.execCmd(cmd, cwd=self.lockUtilPath,
                                          sudo=True,
                                          ioclass=utils.IOCLASS.REALTIME,
                                          ioclassdata=0, setsid=True)
            if rc != 0:
                raise se.AcquireLockFailure(self._sdUUID, rc, out, err)
            self.log.debug("Clustered lock acquired successfully")

    def inquire(self, lease):
        raise se.InquireNotSupportedError()

    def getLockUtilFullPath(self):
        return os.path.join(self.lockUtilPath, self.lockCmd)

    def release(self, lease):
        if lease != self._lease:
            raise MultipleLeasesNotSupported("release", lease)
        with self._lock:
            freeLockUtil = os.path.join(self.lockUtilPath, self.freeLockCmd)
            releaseLockCommand = [freeLockUtil, self._sdUUID]
            self.log.info("Releasing cluster lock for domain %s" %
                          self._sdUUID)
            (rc, out, err) = misc.execCmd(releaseLockCommand, raw=True,
                                          cwd=self.lockUtilPath)
            if rc != 0:
                # TODO: should raise
                self.log.error("Could not release cluster lock for domain %s "
                               "(rc=%d, out=%s, err=%s)" %
                               (self._sdUUID, rc, out, err))
                return

            self.log.debug("Cluster lock for domain %s released successfully",
                           self._sdUUID)
Beispiel #60
0
def _vdscli():
    request_queues = config.get('addresses', 'request_queues')
    request_queue = request_queues.split(',')[0]
    return jsonrpcvdscli.connect(request_queue)