Exemple #1
0
def get():
    caps = {}

    caps['kvmEnabled'] = \
        str(config.getboolean('vars', 'fake_kvm_support') or
            os.path.exists('/dev/kvm')).lower()

    cpuInfo = CpuInfo()
    cpuTopology = CpuTopology()
    if config.getboolean('vars', 'report_host_threads_as_cores'):
        caps['cpuCores'] = str(cpuTopology.threads())
    else:
        caps['cpuCores'] = str(cpuTopology.cores())

    caps['cpuThreads'] = str(cpuTopology.threads())
    caps['cpuSockets'] = str(cpuTopology.sockets())
    caps['cpuSpeed'] = cpuInfo.mhz()
    if config.getboolean('vars', 'fake_kvm_support'):
        caps['cpuModel'] = 'Intel(Fake) CPU'
        flags = set(cpuInfo.flags() + ['vmx', 'sse2', 'nx'])
        caps['cpuFlags'] = ','.join(flags) + 'model_486,model_pentium,' \
            'model_pentium2,model_pentium3,model_pentiumpro,model_qemu32,' \
            'model_coreduo,model_core2duo,model_n270,model_Conroe,' \
            'model_Penryn,model_Nehalem,model_Opteron_G1'
    else:
        caps['cpuModel'] = cpuInfo.model()
        caps['cpuFlags'] = ','.join(cpuInfo.flags() +
                                    _getCompatibleCpuModels())

    caps.update(dsaversion.version_info)
    caps.update(netinfo.get())

    try:
        caps['hooks'] = hooks.installed()
    except:
        logging.debug('not reporting hooks', exc_info=True)

    caps['operatingSystem'] = osversion()
    caps['uuid'] = utils.getHostUUID()
    caps['packages2'] = _getKeyPackages()
    caps['emulatedMachines'] = _getEmulatedMachines()
    caps['ISCSIInitiatorName'] = _getIscsiIniName()
    caps['HBAInventory'] = storage.hba.HBAInventory()
    caps['vmTypes'] = ['kvm']

    caps['memSize'] = str(utils.readMemInfo()['MemTotal'] / 1024)
    caps['reservedMem'] = str(config.getint('vars', 'host_mem_reserve') +
                              config.getint('vars', 'extra_mem_reserve'))
    caps['guestOverhead'] = config.get('vars', 'guest_ram_overhead')

    return caps
Exemple #2
0
    def _prepareBindings(self):
        self.bindings = {}
        if config.getboolean('vars', 'xmlrpc_enable'):
            try:
                self._loadBindingXMLRPC()
            except ImportError:
                self.log.error('Unable to load the xmlrpc server module. '
                               'Please make sure it is installed.')

        if config.getboolean('vars', 'jsonrpc_enable'):
            try:
                self._loadBindingJsonRpc()
            except ImportError:
                self.log.warn('Unable to load the json rpc server module. '
                              'Please make sure it is installed.')
Exemple #3
0
    def _setupVdsConnection(self):
        if self.hibernating:
            return

        # FIXME: The port will depend on the binding being used.
        # This assumes xmlrpc
        hostPort = vdscli.cannonizeHostPort(
            self._dst,
            config.getint('addresses', 'management_port'))
        self.remoteHost, _ = hostPort.rsplit(':', 1)

        if config.getboolean('vars', 'ssl'):
            self._destServer = vdscli.connect(
                hostPort,
                useSSL=True,
                TransportClass=kaxmlrpclib.TcpkeepSafeTransport)
        else:
            self._destServer = kaxmlrpclib.Server('http://' + hostPort)
        self.log.debug('Destination server is: ' + hostPort)
        try:
            self.log.debug('Initiating connection with destination')
            status = self._destServer.getVmStats(self._vm.id)
            if not status['status']['code']:
                self.log.error("Machine already exists on the destination")
                self.status = errCode['exist']
        except Exception:
            self.log.exception("Error initiating connection")
            self.status = errCode['noConPeer']
Exemple #4
0
 def _initIRS(self):
     self.irs = None
     if config.getboolean('irs', 'irs_enable'):
         try:
             self.irs = Dispatcher(HSM())
         except:
             self.log.error("Error initializing IRS", exc_info=True)
Exemple #5
0
    def configure(self):
        if os.getuid() != 0:
            raise NotRootError()

        self._sysvToUpstart()

        if utils.isOvirtNode():
            if not os.path.exists(P_VDSM_CERT):
                raise InvalidRun(
                    "vdsm: Missing certificate, vdsm not registered")
            validate_ovirt_certs.validate_ovirt_certs()

        # Remove a previous configuration (if present)
        self.removeConf()

        config.read(self._getFile('VDSM_CONF'))
        vdsmConfiguration = {
            'certs_exist': all(os.path.isfile(f) for f in [
                self.CA_FILE,
                self.CERT_FILE,
                self.KEY_FILE
            ]),
            'ssl_enabled': config.getboolean('vars', 'ssl'),
            'sanlock_enabled': SANLOCK_ENABLED,
            'libvirt_selinux': LIBVIRT_SELINUX
        }

        # write configuration
        for cfile, content in self.FILES.items():
            content['configure'](self, content, vdsmConfiguration)
Exemple #6
0
def start(cif, scheduler):
    """
    Starts all the periodic Operations, to be run in one executor.Executor
    instance owned by the `periodic` module.
    There is no guarantee on the order on which the operations will be
    started; this function only guarantees that it will attempt to
    start every known Operation.
    """
    global _executor
    global _operations

    _executor = executor.Executor(name="periodic",
                                  workers_count=_WORKERS,
                                  max_tasks=_TASKS,
                                  scheduler=scheduler,
                                  max_workers=_MAX_WORKERS)

    _executor.start()

    _operations = _create(cif, scheduler)

    if config.getboolean('sampling', 'enable'):
        host.stats.start()

    for op in _operations:
        try:
            op.start()
        except Error as e:
            logging.warning('Operation not started: %s', e)
Exemple #7
0
    def _setupVdsConnection(self):
        if self.hibernating:
            return

        hostPort = vdscli.cannonizeHostPort(
            self._dst,
            config.getint('addresses', 'management_port'))
        self.remoteHost, port = hostPort.rsplit(':', 1)

        try:
            client = self._createClient(port)
            requestQueues = config.get('addresses', 'request_queues')
            requestQueue = requestQueues.split(",")[0]
            self._destServer = jsonrpcvdscli.connect(requestQueue, client)
            self.log.debug('Initiating connection with destination')
            self._destServer.ping()

        except (JsonRpcBindingsError, JsonRpcNoResponseError):
            if config.getboolean('vars', 'ssl'):
                self._destServer = vdscli.connect(
                    hostPort,
                    useSSL=True,
                    TransportClass=kaxmlrpclib.TcpkeepSafeTransport)
            else:
                self._destServer = kaxmlrpclib.Server('http://' + hostPort)

        self.log.debug('Destination server is: ' + hostPort)
Exemple #8
0
def calculate_required_deallocation(vm_hugepages, vm_hugepagesz):
    """

    Args:
        vm_hugepages: The number of hugepages VM requires.
        vm_hugepagesz: VM's hugepage size.

    It is a responsibility of the caller to properly handle concurrency.

    Returns:
        Number of hugepages to be deallocated while making sure not to break
        any constraints (reserved and preallocated pages).
    """
    # Similar to allocation: hugepagesz == 0 indicates disabled hugepages.
    if vm_hugepagesz == 0:
        return 0

    if not config.getboolean('performance', 'use_preallocated_hugepages'):
        return vm_hugepages

    nr_hugepages = int(state()[vm_hugepagesz]['nr_hugepages'])

    to_deallocate = min(
        # At most, deallocate VMs hugepages,
        vm_hugepages,
        # while making sure we don't touch reserved or preallocated ones. That
        # is done since some of the pages initially allocated by VDSM could be
        # moved to reserved pages.
        nr_hugepages - max(_reserved_hugepages(vm_hugepagesz),
                           _preallocated_hugepages(vm_hugepagesz))
    )

    return to_deallocate
Exemple #9
0
Fichier : vm.py Projet : ekohl/vdsm
 def _setupVdsConnection(self):
     if self._mode == 'file': return
     self.remoteHost = self._dst.split(':')[0]
     # FIXME: The port will depend on the binding being used.
     # This assumes xmlrpc
     self.remotePort = self._vm.cif.bindings['xmlrpc'].serverPort
     try:
         self.remotePort = self._dst.split(':')[1]
     except:
         pass
     serverAddress = self.remoteHost + ':' + self.remotePort
     if config.getboolean('vars', 'ssl'):
         self.destServer = vdscli.connect(serverAddress, useSSL=True,
                 TransportClass=kaxmlrpclib.TcpkeepSafeTransport)
     else:
         self.destServer = kaxmlrpclib.Server('http://' + serverAddress)
     self.log.debug('Destination server is: ' + serverAddress)
     try:
         self.log.debug('Initiating connection with destination')
         status = self.destServer.getVmStats(self._vm.id)
         if not status['status']['code']:
             self.log.error("Machine already exists on the destination")
             self.status = errCode['exist']
     except:
         self.log.error("Error initiating connection", exc_info=True)
         self.status = errCode['noConPeer']
Exemple #10
0
    def _startUnderlyingMigration(self, startTime):
        if self.hibernating:
            hooks.before_vm_hibernate(self._vm._dom.XMLDesc(0), self._vm.conf)
            fname = self._vm.cif.prepareVolumePath(self._dst)
            try:
                self._vm._dom.save(fname)
            finally:
                self._vm.cif.teardownVolumePath(self._dst)
        else:
            for dev in self._vm._customDevices():
                hooks.before_device_migrate_source(
                    dev._deviceXML, self._vm.conf, dev.custom)
            hooks.before_vm_migrate_source(self._vm._dom.XMLDesc(0),
                                           self._vm.conf)

            # Do not measure the time spent for creating the VM on the
            # destination. In some cases some expensive operations can cause
            # the migration to get cancelled right after the transfer started.
            destCreateStartTime = time.time()
            result = self._destServer.migrationCreate(self._machineParams,
                                                      self._incomingLimit)
            destCreationTime = time.time() - destCreateStartTime
            startTime += destCreationTime
            self.log.info('Creation of destination VM took: %d seconds',
                          destCreationTime)

            if response.is_error(result):
                self.status = result
                if response.is_error(result, 'migrateLimit'):
                    raise MigrationLimitExceeded()
                else:
                    raise MigrationDestinationSetupError(
                        'migration destination error: ' +
                        result['status']['message'])
            if config.getboolean('vars', 'ssl'):
                transport = 'tls'
            else:
                transport = 'tcp'
            duri = 'qemu+%s://%s/system' % (transport, self.remoteHost)
            if self._vm.conf['_migrationParams']['dstqemu']:
                muri = 'tcp://%s' % \
                       self._vm.conf['_migrationParams']['dstqemu']
            else:
                muri = 'tcp://%s' % self.remoteHost

            self._vm.log.info('starting migration to %s '
                              'with miguri %s', duri, muri)

            self._monitorThread = MonitorThread(self._vm, startTime,
                                                self._convergence_schedule,
                                                self._use_convergence_schedule)

            if self._use_convergence_schedule:
                self._perform_with_conv_schedule(duri, muri)
            else:
                self._perform_with_downtime_thread(duri, muri)

            self.log.info("migration took %d seconds to complete",
                          (time.time() - startTime) + destCreationTime)
Exemple #11
0
    def _startUnderlyingMigration(self, startTime, migrationParams,
                                  machineParams):
        if self.hibernating:
            self._started = True
            self._vm.hibernate(self._dst)
        else:
            self._vm.prepare_migration()

            # Do not measure the time spent for creating the VM on the
            # destination. In some cases some expensive operations can cause
            # the migration to get cancelled right after the transfer started.
            destCreateStartTime = time.time()
            result = self._destServer.migrationCreate(machineParams,
                                                      self._incomingLimit)
            destCreationTime = time.time() - destCreateStartTime
            startTime += destCreationTime
            self.log.info('Creation of destination VM took: %d seconds',
                          destCreationTime)

            if response.is_error(result):
                self.status = result
                if response.is_error(result, 'migrateLimit'):
                    raise MigrationLimitExceeded()
                else:
                    raise MigrationDestinationSetupError(
                        'migration destination error: ' +
                        result['status']['message'])

            self._started = True

            if config.getboolean('vars', 'ssl'):
                transport = 'tls'
            else:
                transport = 'tcp'
            duri = 'qemu+{}://{}/system'.format(
                transport, normalize_literal_addr(self.remoteHost))

            dstqemu = migrationParams['dstqemu']
            if dstqemu:
                muri = 'tcp://{}'.format(
                    normalize_literal_addr(dstqemu))
            else:
                muri = 'tcp://{}'.format(
                    normalize_literal_addr(self.remoteHost))

            self._vm.log.info('starting migration to %s '
                              'with miguri %s', duri, muri)

            self._monitorThread = MonitorThread(self._vm, startTime,
                                                self._convergence_schedule,
                                                self._use_convergence_schedule)

            if self._use_convergence_schedule:
                self._perform_with_conv_schedule(duri, muri)
            else:
                self._perform_with_downtime_thread(duri, muri)

            self.log.info("migration took %d seconds to complete",
                          (time.time() - startTime) + destCreationTime)
Exemple #12
0
def resume_paused_vm(vm_id):
    unpause_file = MARK_FOR_UNPAUSE_PATH % vm_id
    if os.path.isfile(unpause_file):
        use_tls = config.getboolean('vars', 'ssl')
        cli = client.connect('localhost', use_tls=use_tls)
        with utils.closing(cli):
            cli.VM.cont(vmID=vm_id)
        os.remove(unpause_file)
Exemple #13
0
    def _startUnderlyingMigration(self, startTime):
        if self.hibernating:
            hooks.before_vm_hibernate(self._vm._dom.XMLDesc(0), self._vm.conf)
            try:
                self._vm._vmStats.pause()
                fname = self._vm.cif.prepareVolumePath(self._dst)
                try:
                    self._vm._dom.save(fname)
                finally:
                    self._vm.cif.teardownVolumePath(self._dst)
            except Exception:
                self._vm._vmStats.cont()
                raise
        else:
            for dev in self._vm._customDevices():
                hooks.before_device_migrate_source(
                    dev._deviceXML, self._vm.conf, dev.custom)
            hooks.before_vm_migrate_source(self._vm._dom.XMLDesc(0),
                                           self._vm.conf)

            # Do not measure the time spent for creating the VM on the
            # destination. In some cases some expensive operations can cause
            # the migration to get cancelled right after the transfer started.
            destCreateStartTime = time.time()
            result = self._destServer.migrationCreate(self._machineParams)
            destCreationTime = time.time() - destCreateStartTime
            startTime += destCreationTime
            self.log.info('Creation of destination VM took: %d seconds',
                          destCreationTime)

            if result['status']['code']:
                self.status = result
                raise RuntimeError('migration destination error: ' +
                                   result['status']['message'])
            if config.getboolean('vars', 'ssl'):
                transport = 'tls'
            else:
                transport = 'tcp'
            duri = 'qemu+%s://%s/system' % (transport, self.remoteHost)
            if self._vm.conf['_migrationParams']['dstqemu']:
                muri = 'tcp://%s' % \
                       self._vm.conf['_migrationParams']['dstqemu']
            else:
                muri = 'tcp://%s' % self.remoteHost

            self._vm.log.info('starting migration to %s '
                              'with miguri %s', duri, muri)

            downtimeThread = DowntimeThread(self._vm, int(self._downtime))
            self._monitorThread = MonitorThread(self._vm, startTime)
            with utils.running(downtimeThread):
                with utils.running(self._monitorThread):
                    # we need to support python 2.6, so two nested with-s.
                    self._perform_migration(duri, muri)

            self.log.info("migration took %d seconds to complete",
                          (time.time() - startTime) + destCreationTime)
Exemple #14
0
 def _initIRS(self):
     self.irs = None
     if config.getboolean('irs', 'irs_enable'):
         try:
             self.irs = Dispatcher(HSM())
         except:
             self.log.error("Error initializing IRS", exc_info=True)
         else:
             self.irs.registerDomainStateChangeCallback(self.contEIOVms)
Exemple #15
0
def discard_enabled():
    """
    Tell if user configured automatic discard of block devices, regardless of
    the devices capabilities.

    Returns:
        bool
    """
    return config.getboolean("irs", "discard_enable")
Exemple #16
0
    def __init__(self):
        api_strict_mode = config.getboolean('devel', 'api_strict_mode')
        self._schema = vdsmapi.Schema.vdsm_api(api_strict_mode,
                                               with_gluster=_glusterEnabled)

        self._event_schema = vdsmapi.Schema.vdsm_events(api_strict_mode)

        self._threadLocal = threading.local()
        self.log = logging.getLogger('DynamicBridge')
Exemple #17
0
def _certsExist():
    config.read(
        os.path.join(
            SYSCONF_PATH,
            'vdsm/vdsm.conf'
        )
    )
    return not config.getboolean('vars', 'ssl') or\
        os.path.isfile(CERT_FILE)
Exemple #18
0
 def _createSSLContext(self):
     sslctx = None
     if config.getboolean('vars', 'ssl'):
         truststore_path = config.get('vars', 'trust_store_path')
         key_file = os.path.join(truststore_path, 'keys', 'vdsmkey.pem')
         cert_file = os.path.join(truststore_path, 'certs', 'vdsmcert.pem')
         ca_cert = os.path.join(truststore_path, 'certs', 'cacert.pem')
         sslctx = SSLContext(cert_file, key_file, ca_cert)
     return sslctx
Exemple #19
0
def get():
    caps = {}

    caps["kvmEnabled"] = str(config.getboolean("vars", "fake_kvm_support") or os.path.exists("/dev/kvm")).lower()

    cpuInfo = CpuInfo()
    caps["cpuCores"] = str(cpuInfo.cores())
    caps["cpuSockets"] = str(cpuInfo.sockets())
    caps["cpuSpeed"] = cpuInfo.mhz()
    if config.getboolean("vars", "fake_kvm_support"):
        caps["cpuModel"] = "Intel(Fake) CPU"
        flags = set(cpuInfo.flags() + ["vmx", "sse2", "nx"])
        caps["cpuFlags"] = (
            ",".join(flags) + "model_486,model_pentium,"
            "model_pentium2,model_pentium3,model_pentiumpro,model_qemu32,"
            "model_coreduo,model_core2duo,model_n270,model_Conroe,"
            "model_Penryn,model_Nehalem,model_Opteron_G1"
        )
    else:
        caps["cpuModel"] = cpuInfo.model()
        caps["cpuFlags"] = ",".join(cpuInfo.flags() + _getCompatibleCpuModels())

    caps.update(dsaversion.version_info)
    caps.update(netinfo.get())

    try:
        caps["hooks"] = hooks.installed()
    except:
        logging.debug("not reporting hooks", exc_info=True)

    caps["operatingSystem"] = osversion()
    caps["uuid"] = utils.getHostUUID()
    caps["packages2"] = _getKeyPackages()
    caps["emulatedMachines"] = _getEmulatedMachines()
    caps["ISCSIInitiatorName"] = _getIscsiIniName()
    caps["HBAInventory"] = storage.hba.HBAInventory()
    caps["vmTypes"] = ["kvm"]

    caps["memSize"] = str(utils.readMemInfo()["MemTotal"] / 1024)
    caps["reservedMem"] = str(config.getint("vars", "host_mem_reserve") + config.getint("vars", "extra_mem_reserve"))
    caps["guestOverhead"] = config.get("vars", "guest_ram_overhead")

    return caps
Exemple #20
0
def _reserved_hugepages(hugepagesz):
    reserved_hugepages = 0
    if config.getboolean('performance', 'use_preallocated_hugepages'):
        reserved_hugepages = (
            config.getint('performance', 'reserved_hugepage_count') if
            config.get('performance', 'reserved_hugepage_size') ==
            str(hugepagesz) else 0
        )

    return reserved_hugepages
Exemple #21
0
 def _prepareBindings(self):
     self.bindings = {}
     xmlrpc_params = {
         'ip': config.get('addresses', 'management_ip'),
         'port': config.get('addresses', 'management_port'),
         'ssl': config.getboolean('vars', 'ssl'),
         'vds_responsiveness_timeout':
             config.getint('vars', 'vds_responsiveness_timeout'),
         'trust_store_path': config.get('vars', 'trust_store_path'),
         'default_bridge': config.get("vars", "default_bridge"), }
     self.bindings['xmlrpc'] = BindingXMLRPC(self, self.log, xmlrpc_params)
Exemple #22
0
 def _loadBindingJsonRpc(self):
     from BindingJsonRpc import BindingJsonRpc
     from Bridge import DynamicBridge
     ip = config.get('addresses', 'management_ip')
     port = config.getint('addresses', 'json_port')
     truststore_path = None
     if config.getboolean('vars', 'ssl'):
         truststore_path = config.get('vars', 'trust_store_path')
     conf = [('tcp', {"ip": ip, "port": port})]
     self.bindings['json'] = BindingJsonRpc(DynamicBridge(), conf,
                                            truststore_path)
Exemple #23
0
 def _loadBindingXMLRPC(self):
     from BindingXMLRPC import BindingXMLRPC
     ip = config.get('addresses', 'management_ip')
     xmlrpc_port = config.get('addresses', 'management_port')
     use_ssl = config.getboolean('vars', 'ssl')
     resp_timeout = config.getint('vars', 'vds_responsiveness_timeout')
     truststore_path = config.get('vars', 'trust_store_path')
     default_bridge = config.get("vars", "default_bridge")
     self.bindings['xmlrpc'] = BindingXMLRPC(self, self.log, ip,
                                             xmlrpc_port, use_ssl,
                                             resp_timeout, truststore_path,
                                             default_bridge)
Exemple #24
0
def _isSslConflict():
    """
    return True if libvirt configuration files match ssl configuration of
    vdsm.conf.
    """
    config.read(_getFile('VDSM_CONF'))
    ssl = config.getboolean('vars', 'ssl')

    lconf_p = ParserWrapper({
        'listen_tcp': '0',
        'auth_tcp': 'sasl',
        'listen_tls': '1',
    })
    lconf_p.read(_getFile('LCONF'))
    listen_tcp = lconf_p.getint('listen_tcp')
    auth_tcp = lconf_p.get('auth_tcp')
    listen_tls = lconf_p.getint('listen_tls')
    qconf_p = ParserWrapper({'spice_tls': '0'})
    qconf_p.read(_getFile('QCONF'))
    spice_tls = qconf_p.getboolean('spice_tls')
    ret = True
    if ssl:
        if listen_tls != 0 and listen_tcp != 1 and auth_tcp != '"none"' and \
                spice_tls != 0:
            sys.stdout.write(
                "SUCCESS: ssl configured to true. No conflicts\n")
        else:
            sys.stdout.write(
                "FAILED: "
                "conflicting vdsm and libvirt-qemu tls configuration.\n"
                "vdsm.conf with ssl=True "
                "requires the following changes:\n"
                "libvirtd.conf: listen_tcp=0, auth_tcp=\"sasl\", "
                "listen_tls=1\nqemu.conf: spice_tls=1.\n"
            )
            ret = False
    else:
        if listen_tls == 0 and listen_tcp == 1 and auth_tcp == '"none"' and \
                spice_tls == 0:
            sys.stdout.write(
                "SUCCESS: ssl configured to false. No conflicts.\n")
        else:
            sys.stdout.write(
                "FAILED: "
                "conflicting vdsm and libvirt-qemu tls configuration.\n"
                "vdsm.conf with ssl=False "
                "requires the following changes:\n"
                "libvirtd.conf: listen_tcp=1, auth_tcp=\"none\", "
                "listen_tls=0\n qemu.conf: spice_tls=0.\n"
            )
            ret = False
    return ret
Exemple #25
0
    def __init__(self):
        paths = [vdsmapi.find_schema()]
        api_strict_mode = config.getboolean('devel', 'api_strict_mode')
        if _glusterEnabled:
            paths.append(vdsmapi.find_schema('vdsm-api-gluster'))
        self._schema = vdsmapi.Schema(paths, api_strict_mode)

        self._event_schema = vdsmapi.Schema(
            [vdsmapi.find_schema('vdsm-events')],
            api_strict_mode)

        self._threadLocal = threading.local()
        self.log = logging.getLogger('DynamicBridge')
Exemple #26
0
 def _prepareHttpServer(self):
     if config.getboolean('vars', 'http_enable'):
         try:
             from vdsm.rpc.http import Server
             from vdsm.rpc.http import HttpDetector
         except ImportError:
             self.log.error('Unable to load the http server module. '
                            'Please make sure it is installed.')
         else:
             http_server = Server(self, self.log)
             self.servers['http'] = http_server
             http_detector = HttpDetector(http_server)
             self._acceptor.add_detector(http_detector)
Exemple #27
0
 def _prepareXMLRPCBinding(self):
     if config.getboolean('vars', 'xmlrpc_enable'):
         try:
             from vdsm.rpc.bindingxmlrpc import BindingXMLRPC
             from vdsm.rpc.bindingxmlrpc import XmlDetector
         except ImportError:
             self.log.error('Unable to load the xmlrpc server module. '
                            'Please make sure it is installed.')
         else:
             xml_binding = BindingXMLRPC(self, self.log)
             self.bindings['xmlrpc'] = xml_binding
             xml_detector = XmlDetector(xml_binding)
             self._acceptor.add_detector(xml_detector)
Exemple #28
0
def configure():
    # Remove a previous configuration (if present)
    removeConf()

    vdsmConfiguration = {
        'ssl_enabled': config.getboolean('vars', 'ssl'),
        'sanlock_enabled': constants.SANLOCK_ENABLED,
        'libvirt_selinux': constants.LIBVIRT_SELINUX
    }

    # write configuration
    for cfile, content in FILES.items():
        content['configure'](content, vdsmConfiguration)
Exemple #29
0
    def __init__(self, paths):
        self._strict_mode = config.getboolean('devel', 'api_strict_mode')
        self._methods = {}
        self._types = {}
        try:
            for path in paths:
                with open(path) as f:
                    loaded_schema = yaml.load(f)

                types = loaded_schema.pop('types')
                self._types.update(types)
                self._methods.update(loaded_schema)
        except EnvironmentError:
            raise SchemaNotFound("Unable to find API schema file")
Exemple #30
0
def _getLiveSnapshotSupport(arch, capabilities=None):
    if capabilities is None:
        capabilities = _getCapsXMLStr()
    caps = minidom.parseString(capabilities)

    for guestTag in caps.getElementsByTagName('guest'):
        archTag = guestTag.getElementsByTagName('arch')[0]
        if archTag.getAttribute('name') == arch:
            return _findLiveSnapshotSupport(guestTag)

    if not config.getboolean('vars', 'fake_kvm_support'):
        logging.error("missing guest arch tag in the capabilities XML")

    return None
Exemple #31
0
def main():
    try:
        __assertSingleInstance()
        __assertVdsmUser()
        __assertVdsmHome()
        __assertLogPermission()
        __assertSudoerPermissions()

        if not config.getboolean('vars', 'core_dump_enable'):
            resource.setrlimit(resource.RLIMIT_CORE, (0, 0))

        run()
    except FatalError as e:
        syslog.syslog("VDSM failed to start: %s" % e)
        # Make it easy to debug via the shell
        raise
Exemple #32
0
 def start(self):
     if not config.getboolean('guest_agent', 'enable_qga_poller'):
         self.log.info('Not starting QEMU-GA poller. It is disabled in'
                       ' configuration')
         return
     self._operation = periodic.Operation(self._poller,
                                          config.getint(
                                              'guest_agent',
                                              'qga_polling_period'),
                                          self._scheduler,
                                          timeout=_TASK_TIMEOUT,
                                          executor=self._executor,
                                          exclusive=True)
     self.log.info("Starting QEMU-GA poller")
     self._executor.start()
     self._operation.start()
Exemple #33
0
def calculate_required_allocation(cif, vm_hugepages, vm_hugepagesz):
    """

    Args:
        cif: The ClientIF instance. Used as we need to iterate over VMs to
            reason about hugepages consumed by them.
        vm_hugepages: The number of hugepages VM requires.
        vm_hugepagesz: VM's hugepage size.

    It is a responsibility of the caller to properly handle concurrency.

    Returns:
        Number of hugepages to be allocated considering system resources at
        our disposal.
    """
    # Special case: hugepages of size 0 do not exist, but 0 (False) may be
    # used as indicator of disabled hugepages. In that case, we avoid any
    # allocation.
    if vm_hugepagesz == 0:
        return 0

    if not config.getboolean('performance', 'use_preallocated_hugepages'):
        return vm_hugepages

    all_vm_hugepages = _all_vm_hugepages(cif, vm_hugepages, vm_hugepagesz)
    system_hugepages = state()
    free_hugepages = int(system_hugepages[vm_hugepagesz]['free_hugepages'])
    nr_hugepages = int(system_hugepages[vm_hugepagesz]['nr_hugepages'])

    # Number of free_hugepages that are really available (= out of reserved
    # zone)
    really_free_hugepages = min(
        free_hugepages,
        # In this case, some of the hugepages may not be deallocated later.
        # That is not a problem because we're only adjusting to user's
        # configuration.
        nr_hugepages - all_vm_hugepages - _reserved_hugepages(vm_hugepagesz)
    )

    # >= 0
    really_free_hugepages = max(really_free_hugepages, 0)

    # Let's figure out how many hugepages we have to allocate for the VM to
    # fit.
    to_allocate = max(vm_hugepages - really_free_hugepages, 0)

    return to_allocate
Exemple #34
0
    def _setExtSharedState(self):
        # We cannot use tobool here as shared can take several values
        # (e.g. none, exclusive) that would be all mapped to False.
        shared = str(getattr(self, "shared", "false")).lower()

        # Backward compatibility with the old values (true, false)
        if shared == 'true':
            self.extSharedState = DRIVE_SHARED_TYPE.SHARED
        elif shared == 'false':
            if config.getboolean('irs', 'use_volume_leases'):
                self.extSharedState = DRIVE_SHARED_TYPE.EXCLUSIVE
            else:
                self.extSharedState = DRIVE_SHARED_TYPE.NONE
        elif shared in DRIVE_SHARED_TYPE.getAllValues():
            self.extSharedState = shared
        else:
            raise ValueError("Unknown shared value %s" % shared)
Exemple #35
0
    def _isSslConflict(self):
        """
        return True if libvirt configuration files match ssl configuration of
        vdsm.conf.
        """
        config.read(self._getFile('VDSM_CONF'))
        ssl = config.getboolean('vars', 'ssl')

        lconf_p = ParserWrapper({
            'listen_tcp': '0',
            'auth_tcp': 'sasl',
        })
        lconf_p.read(self._getFile('LCONF'))
        listen_tcp = lconf_p.getint('listen_tcp')
        auth_tcp = lconf_p.get('auth_tcp')
        qconf_p = ParserWrapper({'spice_tls': '0'})
        qconf_p.read(self._getFile('QCONF'))
        spice_tls = qconf_p.getboolean('spice_tls')
        ret = True
        if ssl:
            if listen_tcp != 1 and auth_tcp != '"none"' and spice_tls != 0:
                sys.stdout.write(
                    "SUCCESS: ssl configured to true. No conflicts\n")
            else:
                sys.stdout.write(
                    "FAILED: "
                    "conflicting vdsm and libvirt-qemu tls configuration.\n"
                    "vdsm.conf with ssl=True "
                    "requires the following changes:\n"
                    "libvirtd.conf: listen_tcp=0, auth_tcp=\"sasl\", \n"
                    "qemu.conf: spice_tls=1.\n")
                ret = False
        else:
            if listen_tcp == 1 and auth_tcp == '"none"' and spice_tls == 0:
                sys.stdout.write(
                    "SUCCESS: ssl configured to false. No conflicts.\n")
            else:
                sys.stdout.write(
                    "FAILED: "
                    "conflicting vdsm and libvirt-qemu tls configuration.\n"
                    "vdsm.conf with ssl=False "
                    "requires the following changes:\n"
                    "libvirtd.conf: listen_tcp=1, auth_tcp=\"none\", \n"
                    "qemu.conf: spice_tls=0.\n")
                ret = False
        return ret
Exemple #36
0
    def _startUnderlyingMigration(self, startTime):
        if self.hibernating:
            hooks.before_vm_hibernate(self._vm._dom.XMLDesc(0), self._vm.conf)
            try:
                self._vm._vmStats.pause()
                fname = self._vm.cif.prepareVolumePath(self._dst)
                try:
                    self._vm._dom.save(fname)
                finally:
                    self._vm.cif.teardownVolumePath(self._dst)
            except Exception:
                self._vm._vmStats.cont()
                raise
        else:
            for dev in self._vm._customDevices():
                hooks.before_device_migrate_source(
                    dev._deviceXML, self._vm.conf, dev.custom)
            hooks.before_vm_migrate_source(self._vm._dom.XMLDesc(0),
                                           self._vm.conf)
            response = self.destServer.migrationCreate(self._machineParams)
            if response['status']['code']:
                self.status = response
                raise RuntimeError('migration destination error: ' +
                                   response['status']['message'])
            if config.getboolean('vars', 'ssl'):
                transport = 'tls'
            else:
                transport = 'tcp'
            duri = 'qemu+%s://%s/system' % (transport, self.remoteHost)
            if self._vm.conf['_migrationParams']['dstqemu']:
                muri = 'tcp://%s' % \
                       self._vm.conf['_migrationParams']['dstqemu']
            else:
                muri = 'tcp://%s' % self.remoteHost

            self._vm.log.debug('starting migration to %s '
                               'with miguri %s', duri, muri)

            t = DowntimeThread(self._vm, int(self._downtime))
            self._monitorThread = MonitorThread(self._vm, startTime)
            with utils.running(self._monitorThread):
                try:
                    self._perform_migration(duri, muri)
                finally:
                    t.cancel()
Exemple #37
0
    def start(self):
        if not config.getboolean('guest_agent', 'enable_qga_poller'):
            self.log.info('Not starting QEMU-GA poller. It is disabled in'
                          ' configuration')
            return

        def per_vm_operation(job, period):
            disp = periodic.VmDispatcher(
                self._cif.getVMs, self._executor,
                lambda vm: job(vm, self),
                _TASK_TIMEOUT)
            return periodic.Operation(
                disp, period, self._scheduler, timeout=_TASK_TIMEOUT,
                executor=self._executor)

        self._operations = [

            periodic.Operation(
                self._cleanup,
                config.getint('guest_agent', 'cleanup_period'),
                self._scheduler, executor=self._executor),

            # Monitor what QEMU-GA offers
            per_vm_operation(
                CapabilityCheck,
                config.getint('guest_agent', 'qga_info_period')),

            # Basic system information
            per_vm_operation(
                SystemInfoCheck,
                config.getint('guest_agent', 'qga_sysinfo_period')),
            per_vm_operation(
                NetworkInterfacesCheck,
                config.getint('guest_agent', 'qga_sysinfo_period')),

            # List of active users
            per_vm_operation(
                ActiveUsersCheck,
                config.getint('guest_agent', 'qga_active_users_period')),
        ]

        self.log.info("Starting QEMU-GA poller")
        self._executor.start()
        for op in self._operations:
            op.start()
Exemple #38
0
 def _prepareJSONRPCServer(self):
     if config.getboolean('vars', 'jsonrpc_enable'):
         try:
             from vdsm.rpc import Bridge
             from vdsm.rpc.bindingjsonrpc import BindingJsonRpc
             from yajsonrpc.stompserver import StompDetector
         except ImportError:
             self.log.warn('Unable to load the json rpc server module. '
                           'Please make sure it is installed.')
         else:
             bridge = Bridge.DynamicBridge()
             json_binding = BindingJsonRpc(
                 bridge, self._subscriptions,
                 config.getint('vars', 'connection_stats_timeout'),
                 self._scheduler, self)
             self.servers['jsonrpc'] = json_binding
             stomp_detector = StompDetector(json_binding)
             self._acceptor.add_detector(stomp_detector)
Exemple #39
0
    def _connectToBroker(self):
        if config.getboolean('vars', 'broker_enable'):
            broker_address = config.get('addresses', 'broker_address')
            broker_port = config.getint('addresses', 'broker_port')
            request_queues = config.get('addresses', 'request_queues')

            sslctx = sslutils.create_ssl_context()
            sock = socket.socket()
            sock.connect((broker_address, broker_port))
            if sslctx:
                sock = sslctx.wrapSocket(sock)

            self._broker_client = StompClient(sock, self._reactor)
            for destination in request_queues.split(","):
                self._subscriptions[destination] = StompRpcServer(
                    self.servers['jsonrpc'].server, self._broker_client,
                    destination, broker_address,
                    config.getint('vars', 'connection_stats_timeout'), self)
Exemple #40
0
def configure():
    if utils.isOvirtNode():
        if not os.path.exists(constants.P_VDSM_CERT):
            raise InvalidRun("vdsm: Missing certificate, vdsm not registered")
        validate_ovirt_certs()

    # Remove a previous configuration (if present)
    removeConf()

    vdsmConfiguration = {
        'ssl_enabled': config.getboolean('vars', 'ssl'),
        'sanlock_enabled': constants.SANLOCK_ENABLED,
        'libvirt_selinux': constants.LIBVIRT_SELINUX
    }

    # write configuration
    for cfile, content in FILES.items():
        content['configure'](content, vdsmConfiguration)
Exemple #41
0
 def __init__(self, cif):
     threading.Thread.__init__(self, name='KsmMonitor')
     self.setDaemon(True)
     self._cif = cif
     self.state, self.pages = False, 0
     self._lock = threading.Lock()
     if config.getboolean('ksm', 'ksm_monitor_thread'):
         pids = utils.execCmd([constants.EXT_PGREP, '-xf', 'ksmd'],
                              raw=False)[1]
         if pids:
             self._pid = pids[0].strip()
             self._cif.log.info(
                 'starting ksm monitor thread, ksm pid is %s', self._pid)
             self.start()
         else:
             self._cif.log.error('failed to find ksmd thread')
     else:
         self._cif.log.info('ksm monitor thread disabled, not starting')
     self.cpuUsage = 0
Exemple #42
0
    def _wait_for_events(self):
        """
        Wait until an event is received in the event block, or the monitor
        interval has passed.

        With the default monitor and event intervals, we expect to check
        event 3 times between mail checks.

        check mail   |---------------------|-------------------|
        check event       |     |     |        |     |     |

        With this configuraion we run 3 event checks per 2 seconds,
        which is expected to consume less than 1% cpu.
        """
        if not config.getboolean("mailbox", "events_enable"):
            time.sleep(self._monitorInterval)
            return

        now = time.monotonic()
        deadline = now + self._monitorInterval

        while now < deadline:
            remaining = deadline - now
            if remaining <= self._eventInterval:
                # The last interval before checking mail.
                time.sleep(remaining)
                return

            time.sleep(self._eventInterval)

            try:
                event = self._read_event()
            except ReadEventError as e:
                self.log.warning("Error reading event block: %s", e)
            else:
                if event != self._last_event:
                    self.log.debug("Received event: %s", event)
                    self._last_event = event
                    return

            now = time.monotonic()
Exemple #43
0
 def _loadBindingJsonRpc(self):
     from BindingJsonRpc import BindingJsonRpc
     from Bridge import DynamicBridge
     ip = config.get('addresses', 'management_ip')
     port = config.getint('addresses', 'json_port')
     truststore_path = None
     if config.getboolean('vars', 'ssl'):
         truststore_path = config.get('vars', 'trust_store_path')
     # TODO: update config.py
     conf = [('tcp', {
         "ip": ip,
         "port": port
     }), ('amqp', {
         "ip": ip,
         "port": 5672
     }), ('stomp', {
         "ip": ip,
         "port": 61613
     })]
     self.bindings['json'] = BindingJsonRpc(DynamicBridge(), conf,
                                            truststore_path)
Exemple #44
0
def install_manhole(locals):
    if not config.getboolean('devel', 'manhole_enable'):
        return

    import manhole  # pylint: disable=import-error

    # locals:             Set the locals in the manhole shell
    # socket_path:        Set to create secure and easy to use manhole socket,
    #                     instead of /tmp/manhole-<vdsm-pid>.
    # daemon_connection:  Enable to ensure that manhole connection thread will
    #                     not block shutdown.
    # patch_fork:         Disable to avoid creation of a manhole thread in the
    #                     child process after fork.
    # sigmask:            Disable to avoid pointless modification of the
    #                     process signal mask if signlfd module is available.
    # redirect_stderr:    Disable since Python prints ignored exepctions to
    #                     stderr.

    path = os.path.join(constants.P_VDSM_RUN, 'vdsmd.manhole')
    manhole.install(locals=locals, socket_path=path, daemon_connection=True,
                    patch_fork=False, sigmask=None, redirect_stderr=False)
Exemple #45
0
def configure():
    # Remove a previous configuration (if present)
    confutils.remove_conf(FILES, CONF_VERSION)

    vdsmConfiguration = {
        'ssl_enabled': config.getboolean('vars', 'ssl'),
        'sanlock_enabled': constants.SANLOCK_ENABLED,
        'libvirt_selinux': constants.LIBVIRT_SELINUX
    }

    # write configuration
    for cfile, content in FILES.items():
        content['configure'](content, CONF_VERSION, vdsmConfiguration)

    # enable and acivate dev-hugepages1G mounth path
    if not _is_hugetlbfs_1g_mounted():
        try:
            service.service_start('dev-hugepages1G.mount')
        except service.ServiceOperationError:
            status = service.service_status('dev-hugepages1G.mount', False)
            if status == 0:
                raise
Exemple #46
0
def _isSslConflict():
    """
    return True if libvirt configuration files match ssl configuration of
    vdsm.conf.
    """
    ssl = config.getboolean('vars', 'ssl')

    cfg = _read_libvirt_connection_config()
    ret = True
    if ssl:
        if (cfg.listen_tls != 0 and cfg.listen_tcp != 1
                and cfg.auth_tcp != '"none"' and cfg.spice_tls != 0):
            sys.stdout.write("SUCCESS: ssl configured to true. No conflicts\n")
        else:
            sys.stdout.write(
                "FAILED: "
                "conflicting vdsm and libvirt-qemu tls configuration.\n"
                "vdsm.conf with ssl=True "
                "requires the following changes:\n"
                "libvirtd.conf: listen_tcp=0, auth_tcp=\"sasl\", "
                "listen_tls=1\nqemu.conf: spice_tls=1.\n")
            ret = False
    else:
        if (cfg.listen_tls == 0 and cfg.listen_tcp == 1
                and cfg.auth_tcp == '"none"' and cfg.spice_tls == 0):
            sys.stdout.write(
                "SUCCESS: ssl configured to false. No conflicts.\n")
        else:
            sys.stdout.write(
                "FAILED: "
                "conflicting vdsm and libvirt-qemu tls configuration.\n"
                "vdsm.conf with ssl=False "
                "requires the following changes:\n"
                "libvirtd.conf: listen_tcp=1, auth_tcp=\"none\", "
                "listen_tls=0\n qemu.conf: spice_tls=0.\n")
            ret = False
    return ret
Exemple #47
0
    def _startUnderlyingMigration(self, startTime, machineParams):
        if self.hibernating:
            self._switch_state(State.STARTED)
            self._vm.hibernate(self._dst)
        else:
            self._vm.prepare_migration()
            self._switch_state(State.PREPARED)

            # Do not measure the time spent for creating the VM on the
            # destination. In some cases some expensive operations can cause
            # the migration to get cancelled right after the transfer started.
            destCreateStartTime = time.time()
            result = self._destServer.migrationCreate(machineParams,
                                                      self._incomingLimit)
            destCreationTime = time.time() - destCreateStartTime
            startTime += destCreationTime
            self.log.info('Creation of destination VM took: %d seconds',
                          destCreationTime)

            if response.is_error(result):
                self.status = result
                if response.is_error(result, 'migrateLimit'):
                    raise MigrationLimitExceeded()
                else:
                    raise MigrationDestinationSetupError(
                        'migration destination error: ' +
                        result['status']['message'])

            self._switch_state(State.STARTED)

            # REQUIRED_FOR: destination Vdsm < 4.3
            if not self._vm.min_cluster_version(4, 3):
                payload_drives = self._vm.payload_drives()
                if payload_drives:
                    # Currently, only a single payload device may be present
                    payload_alias = payload_drives[0].alias
                    result = self._destServer.fullList(vmList=(self._vm.id, ))
                    vm_list = result.get('items')
                    remote_devices = vm_list[0].get('devices')
                    if remote_devices is not None:
                        payload_path = next(
                            (d['path'] for d in remote_devices
                             if d.get('alias') == payload_alias), None)
                        if payload_path is not None:
                            self._legacy_payload_path = \
                                (payload_alias, payload_path)

            if config.getboolean('vars', 'ssl'):
                transport = 'tls'
            else:
                transport = 'tcp'
            duri = 'qemu+{}://{}/system'.format(
                transport, normalize_literal_addr(self.remoteHost))
            dstqemu = self._dstqemu
            if dstqemu:
                muri = 'tcp://{}'.format(normalize_literal_addr(dstqemu))
            else:
                muri = 'tcp://{}'.format(
                    normalize_literal_addr(self.remoteHost))

            self._vm.log.info('starting migration to %s '
                              'with miguri %s', duri, muri)
            self._monitorThread = MonitorThread(self._vm, startTime,
                                                self._convergence_schedule)
            self._perform_with_conv_schedule(duri, muri)
            self.log.info("migration took %d seconds to complete",
                          (time.time() - startTime) + destCreationTime)
Exemple #48
0
def is_enabled():
    return config.getboolean('devel', 'memory_profile_enable')
Exemple #49
0
import time

from vdsm import hugepages
from vdsm import numa
from vdsm import utils
import vdsm.common.time
from vdsm.common.units import KiB, MiB
from vdsm.config import config
from vdsm.constants import P_VDSM_RUN
from vdsm.host import api as hostapi
from vdsm.virt.utils import ExpiringCache

_THP_STATE_PATH = '/sys/kernel/mm/transparent_hugepage/enabled'
if not os.path.exists(_THP_STATE_PATH):
    _THP_STATE_PATH = '/sys/kernel/mm/redhat_transparent_hugepage/enabled'
_METRICS_ENABLED = config.getboolean('metrics', 'enabled')
_NOWAIT_ENABLED = config.getboolean('vars', 'nowait_domain_stats')


class TotalCpuSample(object):
    """
    A sample of total CPU consumption.

    The sample is taken at initialization time and can't be updated.
    """
    def __init__(self):
        with open('/proc/stat') as f:
            self.user, userNice, self.sys, self.idle = \
                map(int, f.readline().split()[1:5])
        self.user += userNice
Exemple #50
0
    def getXML(self):
        """
        Create domxml for a graphics framebuffer.

        <graphics type='spice' port='5900' tlsPort='5901' autoport='yes'
                  listen='0' keymap='en-us'
                  passwdValidTo='1970-01-01T00:00:01'>
          <listen type='address' address='0'/>
          <clipboard copypaste='no'/>
        </graphics>
        OR
        <graphics type='vnc' port='5900' autoport='yes' listen='0'
                  keymap='en-us' passwdValidTo='1970-01-01T00:00:01'>
          <listen type='address' address='0'/>
        </graphics>

        """

        graphicsAttrs = {
            'type': self.device,
            'port': self.port,
            'autoport': 'yes',
        }
        if config.getboolean('vars', 'ssl'):
            graphicsAttrs['defaultMode'] = 'secure'
        # the default, 'any', has automatic fallback to
        # insecure mode, so works with ssl off.

        if self.device == 'spice':
            graphicsAttrs['tlsPort'] = self.tlsPort

        self._setPasswd(graphicsAttrs)

        if 'keyMap' in self.specParams:
            graphicsAttrs['keymap'] = self.specParams['keyMap']

        graphics = vmxml.Element('graphics', **graphicsAttrs)

        if not conv.tobool(self.specParams.get('copyPasteEnable', True)):
            clipboard = vmxml.Element('clipboard', copypaste='no')
            graphics.appendChild(clipboard)

        if not conv.tobool(self.specParams.get('fileTransferEnable', True)):
            filetransfer = vmxml.Element('filetransfer', enable='no')
            graphics.appendChild(filetransfer)

        # This list could be dropped in 4.1. We should keep only
        # the default mode, which is both simpler and safer.
        if (self.device == 'spice'
                and 'spiceSecureChannels' in self.specParams):
            for chan in self._getSpiceChannels():
                graphics.appendChildWithArgs('channel',
                                             name=chan,
                                             mode='secure')

        display_network = self.specParams.get('displayNetwork')
        if display_network:
            graphics.appendChildWithArgs(
                'listen',
                type='network',
                network=libvirtnetwork.netname_o2l(display_network))
        else:
            graphics.setAttrs(listen='0')

        return graphics
Exemple #51
0
def serve_clients(log):
    cif = None
    irs = None
    scheduler = None
    running = [True]

    def sigtermHandler(signum, frame):
        log.info("Received signal %s, shutting down" % signum)
        running[0] = False

    def sigusr1Handler(signum, frame):
        if irs:
            log.info("Received signal %s, stopping SPM" % signum)
            # pylint: disable=no-member
            # TODO remove when side effect removed from HSM.__init__ and
            # initialize it in line #63
            irs.spmStop(irs.getConnectedStoragePoolsList()['poollist'][0])

    sigutils.register()
    signal.signal(signal.SIGTERM, sigtermHandler)
    signal.signal(signal.SIGUSR1, sigusr1Handler)
    zombiereaper.registerSignalHandler()

    profile.start()
    metrics.start()

    libvirtconnection.start_event_loop()

    try:
        if config.getboolean('irs', 'irs_enable'):
            try:
                irs = Dispatcher(HSM())
            except:
                panic("Error initializing IRS")

        scheduler = schedule.Scheduler(name="vdsm.Scheduler",
                                       clock=time.monotonic_time)
        scheduler.start()

        from vdsm.clientIF import clientIF  # must import after config is read
        cif = clientIF.getInstance(irs, log, scheduler)

        jobs.start(scheduler, cif)

        install_manhole({'irs': irs, 'cif': cif})

        cif.start()

        init_unprivileged_network_components(cif)

        periodic.start(cif, scheduler)
        health.start()
        try:
            while running[0]:
                sigutils.wait_for_signal()

            profile.stop()
        finally:
            metrics.stop()
            health.stop()
            periodic.stop()
            cif.prepareForShutdown()
            jobs.stop()
            scheduler.stop()
    finally:
        libvirtconnection.stop_event_loop(wait=False)
Exemple #52
0
import time

from vdsm import numa
from vdsm import utils
from vdsm.config import config
from vdsm.constants import P_VDSM_RUN, P_VDSM_CLIENT_LOG
from vdsm.host import api as hostapi
from vdsm.network import ipwrapper
from vdsm.network.netinfo import nics, bonding, vlans
from vdsm.virt import vmstats
from vdsm.virt.utils import ExpiringCache

_THP_STATE_PATH = '/sys/kernel/mm/transparent_hugepage/enabled'
if not os.path.exists(_THP_STATE_PATH):
    _THP_STATE_PATH = '/sys/kernel/mm/redhat_transparent_hugepage/enabled'
_METRICS_ENABLED = config.getboolean('metrics', 'enabled')


class InterfaceSample(object):
    """
    A network interface sample.

    The sample is set at the time of initialization and can't be updated.
    """
    def readIfaceStat(self, ifid, stat):
        """
        Get and interface's stat.

        .. note::
            Really ugly implementation; from time to time, Linux returns an
            empty line. TODO: understand why this happens and fix it!
Exemple #53
0
def get():
    numa.update()
    caps = {}
    cpu_topology = numa.cpu_topology()

    caps['kvmEnabled'] = str(os.path.exists('/dev/kvm')).lower()

    if config.getboolean('vars', 'report_host_threads_as_cores'):
        caps['cpuCores'] = str(cpu_topology.threads)
    else:
        caps['cpuCores'] = str(cpu_topology.cores)

    caps['cpuThreads'] = str(cpu_topology.threads)
    caps['cpuSockets'] = str(cpu_topology.sockets)
    caps['onlineCpus'] = ','.join(
        [str(cpu_id) for cpu_id in cpu_topology.online_cpus])

    caps['cpuTopology'] = [{
        'cpu_id': cpu.cpu_id,
        'numa_cell_id': cpu.numa_cell_id,
        'socket_id': cpu.socket_id,
        'die_id': cpu.die_id,
        'core_id': cpu.core_id,
    } for cpu in numa.cpu_info()]

    caps['cpuSpeed'] = cpuinfo.frequency()
    caps['cpuModel'] = cpuinfo.model()
    caps['cpuFlags'] = ','.join(_getFlagsAndFeatures())
    caps['vdsmToCpusAffinity'] = list(taskset.get(os.getpid()))

    caps.update(dsaversion.version_info())

    proxy = supervdsm.getProxy()
    net_caps = proxy.network_caps()
    caps.update(net_caps)
    caps['ovnConfigured'] = proxy.is_ovn_configured()

    try:
        caps['hooks'] = hooks.installed()
    except:
        logging.debug('not reporting hooks', exc_info=True)

    caps['operatingSystem'] = osinfo.version()
    caps['uuid'] = host.uuid()
    caps['packages2'] = osinfo.package_versions()
    caps['realtimeKernel'] = osinfo.runtime_kernel_flags().realtime
    caps['kernelArgs'] = osinfo.kernel_args()
    caps['nestedVirtualization'] = osinfo.nested_virtualization().enabled
    caps['emulatedMachines'] = machinetype.emulated_machines(
        cpuarch.effective())
    caps['ISCSIInitiatorName'] = _getIscsiIniName()
    caps['HBAInventory'] = hba.HBAInventory()
    caps['vmTypes'] = ['kvm']

    caps['memSize'] = str(utils.readMemInfo()['MemTotal'] // 1024)
    caps['reservedMem'] = str(
        config.getint('vars', 'host_mem_reserve') +
        config.getint('vars', 'extra_mem_reserve'))
    caps['guestOverhead'] = config.get('vars', 'guest_ram_overhead')

    caps['rngSources'] = rngsources.list_available()

    caps['numaNodes'] = dict(numa.topology())
    caps['numaNodeDistance'] = dict(numa.distances())
    caps['autoNumaBalancing'] = numa.autonuma_status()

    caps['selinux'] = osinfo.selinux_status()

    caps['liveSnapshot'] = 'true'
    caps['liveMerge'] = 'true'
    caps['kdumpStatus'] = osinfo.kdump_status()
    caps["deferred_preallocation"] = True

    caps['hostdevPassthrough'] = str(hostdev.is_supported()).lower()
    # TODO This needs to be removed after adding engine side support
    # and adding gdeploy support to enable libgfapi on RHHI by default
    caps['additionalFeatures'] = ['libgfapi_supported']
    if osinfo.glusterEnabled:
        from vdsm.gluster.api import glusterAdditionalFeatures
        caps['additionalFeatures'].extend(glusterAdditionalFeatures())
    caps['hostedEngineDeployed'] = _isHostedEngineDeployed()
    caps['hugepages'] = hugepages.supported()
    caps['kernelFeatures'] = osinfo.kernel_features()
    caps['vncEncrypted'] = _isVncEncrypted()
    caps['backupEnabled'] = True
    caps['coldBackupEnabled'] = True
    caps['clearBitmapsEnabled'] = True
    caps['fipsEnabled'] = _getFipsEnabled()
    try:
        caps['boot_uuid'] = osinfo.boot_uuid()
    except Exception:
        logging.exception("Can not find boot uuid")
    caps['tscFrequency'] = _getTscFrequency()
    caps['tscScaling'] = _getTscScaling()

    try:
        caps["connector_info"] = managedvolume.connector_info()
    except se.ManagedVolumeNotSupported as e:
        logging.info("managedvolume not supported: %s", e)
    except se.ManagedVolumeHelperFailed as e:
        logging.exception("Error getting managedvolume connector info: %s", e)

    # Which domain versions are supported by this host.
    caps["domain_versions"] = sc.DOMAIN_VERSIONS

    caps["supported_block_size"] = backends.supported_block_size()
    caps["cd_change_pdiv"] = True
    caps["refresh_disk_supported"] = True

    return caps
Exemple #54
0
def _certsExist():
    config.read(os.path.join(SYSCONF_PATH, 'vdsm/vdsm.conf'))
    return not config.getboolean('vars', 'ssl') or\
        os.path.isfile(CERT_FILE)
Exemple #55
0
def _certsExist():
    return not config.getboolean('vars', 'ssl') or\
        os.path.isfile(CERT_FILE)
Exemple #56
0
    try:
        opts, args = getopt.getopt(sys.argv[1:], 'ht', ['help', 'test'])
    except getopt.GetoptError as err:
        print(str(err))
        _usage()
        sys.exit(1)

    for option, _ in opts:
        if option in ('-h', '--help'):
            _usage()
            sys.exit()
        elif option in ('-t', '--test'):
            _test()
            sys.exit()

    fake_kvm_support = config.getboolean('vars', 'fake_kvm_support')
    fake_kvm_arch = config.get('vars', 'fake_kvm_architecture')

    if fake_kvm_support:
        # Why here? So anyone can run -t and -h without setting the path.
        try:
            import hooking
        except ImportError:
            print('Could not import hooking module. You should only run this '
                  'script directly with option specified.')
            _usage()
            sys.exit(1)

        caps = hooking.read_json()
        _fake_caps_arch(caps, fake_kvm_arch)
        hooking.write_json(caps)
Exemple #57
0
def _create(cif, scheduler):
    def per_vm_operation(func, period):
        disp = VmDispatcher(
            cif.getVMs, _executor, func, _timeout_from(period))
        return Operation(disp, period, scheduler)

    ops = [
        # Needs dispatching because updating the volume stats needs
        # access to the storage, thus can block.
        per_vm_operation(
            UpdateVolumes,
            config.getint('irs', 'vol_size_sample_interval')),

        # Job monitoring need QEMU monitor access.
        per_vm_operation(
            BlockjobMonitor,
            config.getint('vars', 'vm_sample_jobs_interval')),

        # We do this only until we get high water mark notifications
        # from QEMU. It accesses storage and/or QEMU monitor, so can block,
        # thus we need dispatching.
        per_vm_operation(
            DriveWatermarkMonitor,
            config.getint('vars', 'vm_watermark_interval')),

        Operation(
            lambda: recovery.lookup_external_vms(cif),
            config.getint('sampling', 'external_vm_lookup_interval'),
            scheduler,
            exclusive=True,
            discard=False),

        Operation(
            lambda: _kill_long_paused_vms(cif),
            config.getint('vars', 'vm_kill_paused_time') // 2,
            scheduler,
            exclusive=True,
            discard=False),

        Operation(
            containersconnection.monitor,
            config.getint('vars', 'vm_sample_interval'),
            scheduler),
    ]

    if config.getboolean('sampling', 'enable'):
        ops.extend([
            # libvirt sampling using bulk stats can block, but unresponsive
            # domains are handled inside VMBulkstatsMonitor for performance
            # reasons; thus, does not need dispatching.
            Operation(
                sampling.VMBulkstatsMonitor(
                    libvirtconnection.get(cif),
                    cif.getVMs,
                    sampling.stats_cache),
                config.getint('vars', 'vm_sample_interval'),
                scheduler),

            Operation(
                sampling.HostMonitor(cif=cif),
                config.getint('vars', 'host_sample_stats_interval'),
                scheduler,
                timeout=config.getint('vars', 'host_sample_stats_interval'),
                exclusive=True,
                discard=False),
        ])

    return ops
Exemple #58
0
def get():
    targetArch = getTargetArch()

    caps = {}

    caps['kvmEnabled'] = \
        str(config.getboolean('vars', 'fake_kvm_support') or
            os.path.exists('/dev/kvm')).lower()

    cpuInfo = CpuInfo()
    cpuTopology = CpuTopology()
    if config.getboolean('vars', 'report_host_threads_as_cores'):
        caps['cpuCores'] = str(cpuTopology.threads())
    else:
        caps['cpuCores'] = str(cpuTopology.cores())

    caps['cpuThreads'] = str(cpuTopology.threads())
    caps['cpuSockets'] = str(cpuTopology.sockets())
    caps['onlineCpus'] = ','.join(cpuTopology.onlineCpus())
    caps['cpuSpeed'] = cpuInfo.mhz()
    if config.getboolean('vars', 'fake_kvm_support'):
        if targetArch == Architecture.X86_64:
            caps['cpuModel'] = 'Intel(Fake) CPU'

            flagList = ['vmx', 'sse2', 'nx']

            if targetArch == platform.machine():
                flagList += cpuInfo.flags()

            flags = set(flagList)

            caps['cpuFlags'] = ','.join(flags) + ',model_486,model_pentium,' \
                'model_pentium2,model_pentium3,model_pentiumpro,' \
                'model_qemu32,model_coreduo,model_core2duo,model_n270,' \
                'model_Conroe,model_Penryn,model_Nehalem,model_Opteron_G1'
        elif targetArch in Architecture.POWER:
            caps['cpuModel'] = 'POWER 8 (fake)'
            caps['cpuFlags'] = 'powernv,model_power8'
        else:
            raise RuntimeError('Unsupported architecture: %s' % targetArch)
    else:
        caps['cpuModel'] = cpuInfo.model()
        caps['cpuFlags'] = ','.join(cpuInfo.flags() +
                                    _getCompatibleCpuModels())

    caps.update(_getVersionInfo())
    caps.update(netinfo.get())
    _report_legacy_bondings(caps)
    _report_network_qos(caps)

    try:
        caps['hooks'] = hooks.installed()
    except:
        logging.debug('not reporting hooks', exc_info=True)

    caps['operatingSystem'] = osversion()
    caps['uuid'] = utils.getHostUUID()
    caps['packages2'] = _getKeyPackages()
    caps['emulatedMachines'] = _getEmulatedMachines(targetArch)
    caps['ISCSIInitiatorName'] = _getIscsiIniName()
    caps['HBAInventory'] = storage.hba.HBAInventory()
    caps['vmTypes'] = ['kvm']

    caps['memSize'] = str(utils.readMemInfo()['MemTotal'] / 1024)
    caps['reservedMem'] = str(
        config.getint('vars', 'host_mem_reserve') +
        config.getint('vars', 'extra_mem_reserve'))
    caps['guestOverhead'] = config.get('vars', 'guest_ram_overhead')

    # Verify that our libvirt supports virtio RNG (since 10.0.2-31)
    libvirtVer = LooseVersion('-'.join(
        (caps['packages2']['libvirt']['version'],
         caps['packages2']['libvirt']['release'])))
    requiredVer = LooseVersion('0.10.2-31')

    if libvirtVer >= requiredVer:
        caps['rngSources'] = _getRngSources()
    else:
        logging.debug('VirtioRNG DISABLED: libvirt version %s required >= %s',
                      libvirtVer, requiredVer)

    caps['numaNodes'] = getNumaTopology()
    caps['numaNodeDistance'] = getNumaNodeDistance()
    caps['autoNumaBalancing'] = getAutoNumaBalancingInfo()

    caps['selinux'] = _getSELinux()

    liveSnapSupported = _getLiveSnapshotSupport(targetArch)
    if liveSnapSupported is not None:
        caps['liveSnapshot'] = str(liveSnapSupported).lower()
    caps['liveMerge'] = str(getLiveMergeSupport()).lower()
    caps['kdumpStatus'] = _getKdumpStatus()

    caps['hostdevPassthrough'] = str(_getHostdevPassthorughSupport()).lower()

    return caps
Exemple #59
0
    def _startUnderlyingMigration(self, startTime, machineParams):
        if self.hibernating:
            self._started = True
            self._vm.hibernate(self._dst)
        else:
            self._vm.prepare_migration()

            # Do not measure the time spent for creating the VM on the
            # destination. In some cases some expensive operations can cause
            # the migration to get cancelled right after the transfer started.
            destCreateStartTime = time.time()
            result = self._destServer.migrationCreate(machineParams,
                                                      self._incomingLimit)
            destCreationTime = time.time() - destCreateStartTime
            startTime += destCreationTime
            self.log.info('Creation of destination VM took: %d seconds',
                          destCreationTime)

            if response.is_error(result):
                self.status = result
                if response.is_error(result, 'migrateLimit'):
                    raise MigrationLimitExceeded()
                else:
                    raise MigrationDestinationSetupError(
                        'migration destination error: ' +
                        result['status']['message'])

            self._started = True

            # REQUIRED_FOR: destination Vdsm < 4.3
            if not self._vm.min_cluster_version(4, 3):
                payload_drives = self._vm.payload_drives()
                if payload_drives:
                    # Currently, only a single payload device may be present
                    payload_alias = payload_drives[0].alias
                    result = self._destServer.fullList(vmList=(self._vm.id, ))
                    vm_list = result.get('items')
                    remote_devices = vm_list[0].get('devices')
                    if remote_devices is not None:
                        payload_path = next(
                            (d['path'] for d in remote_devices
                             if d.get('alias') == payload_alias), None)
                        if payload_path is not None:
                            self._legacy_payload_path = \
                                (payload_alias, payload_path)

            if config.getboolean('vars', 'ssl'):
                transport = 'tls'
            else:
                transport = 'tcp'
            duri = 'qemu+{}://{}/system'.format(
                transport, normalize_literal_addr(self.remoteHost))

            if self._encrypted:
                # TODO: Stop using host names here and set the host
                # name based certificate verification parameter once
                # the corresponding functionality is available in
                # libvirt, see https://bugzilla.redhat.com/1754533
                #
                # When an encrypted migration is requested, we must
                # use the host name (stored in 'dst') rather than the
                # IP address (stored in 'dstqemu') in order to match
                # the target certificate.  That means that encrypted
                # migrations are incompatible with setups that require
                # an IP address to identify the host properly, such as
                # when a separate migration network should be used or
                # when using IPv4/IPv6 dual stack configurations.
                dstqemu = self.remoteHost
            else:
                dstqemu = self._dstqemu
            if dstqemu:
                muri = 'tcp://{}'.format(normalize_literal_addr(dstqemu))
            else:
                muri = 'tcp://{}'.format(
                    normalize_literal_addr(self.remoteHost))

            self._vm.log.info('starting migration to %s '
                              'with miguri %s', duri, muri)
            self._monitorThread = MonitorThread(self._vm, startTime,
                                                self._convergence_schedule)
            self._perform_with_conv_schedule(duri, muri)
            self.log.info("migration took %d seconds to complete",
                          (time.time() - startTime) + destCreationTime)
Exemple #60
0
    def _startUnderlyingMigration(self, startTime):
        if self._mode == 'file':
            hooks.before_vm_hibernate(self._vm._dom.XMLDesc(0), self._vm.conf)
            try:
                self._vm._vmStats.pause()
                fname = self._vm.cif.prepareVolumePath(self._dst)
                try:
                    self._vm._dom.save(fname)
                finally:
                    self._vm.cif.teardownVolumePath(self._dst)
            except Exception:
                self._vm._vmStats.cont()
                raise
        else:
            for dev in self._vm._customDevices():
                hooks.before_device_migrate_source(
                    dev._deviceXML, self._vm.conf, dev.custom)
            hooks.before_vm_migrate_source(self._vm._dom.XMLDesc(0),
                                           self._vm.conf)
            response = self.destServer.migrationCreate(self._machineParams)
            if response['status']['code']:
                self.status = response
                raise RuntimeError('migration destination error: ' +
                                   response['status']['message'])
            if config.getboolean('vars', 'ssl'):
                transport = 'tls'
            else:
                transport = 'tcp'
            duri = 'qemu+%s://%s/system' % (transport, self.remoteHost)
            if self._vm.conf['_migrationParams']['dstqemu']:
                muri = 'tcp://%s' % \
                       self._vm.conf['_migrationParams']['dstqemu']
            else:
                muri = 'tcp://%s' % self.remoteHost

            self._vm.log.debug('starting migration to %s '
                               'with miguri %s', duri, muri)

            t = DowntimeThread(self._vm, int(self._downtime))

            self._monitorThread = MonitorThread(self._vm, startTime)
            self._monitorThread.start()

            try:
                if self._vm.hasSpice and self._vm.conf.get('clientIp'):
                    SPICE_MIGRATION_HANDOVER_TIME = 120
                    self._vm._reviveTicket(SPICE_MIGRATION_HANDOVER_TIME)

                maxBandwidth = config.getint('vars', 'migration_max_bandwidth')
                # FIXME: there still a race here with libvirt,
                # if we call stop() and libvirt migrateToURI2 didn't start
                # we may return migration stop but it will start at libvirt
                # side
                self._preparingMigrationEvt = False
                if not self._migrationCanceledEvt:
                    self._vm._dom.migrateToURI2(
                        duri, muri, None,
                        libvirt.VIR_MIGRATE_LIVE |
                        libvirt.VIR_MIGRATE_PEER2PEER |
                        (libvirt.VIR_MIGRATE_TUNNELLED if
                            self._tunneled else 0) |
                        (libvirt.VIR_MIGRATE_ABORT_ON_ERROR if
                            self._abortOnError else 0),
                        None, maxBandwidth)
                else:
                    self._raiseAbortError()

            finally:
                t.cancel()
                self._monitorThread.stop()