Ejemplo n.º 1
0
def _alloc(count, size, path):
    """Helper to actually (de)allocate hugepages, called by public facing
        methods.

    Args:
        count: Number of hugepages to allocate (can be negative)
        size: The target hugepage size (must be supported by the system)
        path: Path to the hugepages directory.

    Returns: The amount of allocated pages (can be negative,
        implicating deallocation).

    Raises:
    """
    if size is None:
        size = DEFAULT_HUGEPAGESIZE[cpuarch.real()]

    path = path.format(size)

    ret = supervdsm.getProxy().hugepages_alloc(count, path)
    if ret != count:
        supervdsm.getProxy().hugepages_alloc(-ret, path)
        raise NonContiguousMemory

    return ret
Ejemplo n.º 2
0
Archivo: iscsi.py Proyecto: EdDev/vdsm
def setRpFilterIfNeeded(netIfaceName, hostname, loose_mode):
    """
    Set rp_filter to loose or strict mode if there's no session using the
    netIfaceName device and it's not the device used by the OS to reach the
    'hostname'.
    loose mode is needed to allow multiple iSCSI connections in a multiple NIC
    per subnet configuration. strict mode is needed to avoid the security
    breach where an untrusted VM can DoS the host by sending it packets with
    spoofed random sources.

    Arguments:
        netIfaceName: the device used by the iSCSI session
        target: iSCSI target object cointaining the portal hostname
        loose_mode: boolean
    """
    if netIfaceName is None:
        log.debug("iface.net_ifacename not provided, skipping rp filter setup")
        return

    sessions = _sessionsUsingNetiface(netIfaceName)

    if not any(sessions) and netIfaceName != getRouteDeviceTo(hostname):
        if loose_mode:
            log.info("Setting loose mode rp_filter for device %r." %
                     netIfaceName)
            supervdsm.getProxy().set_rp_filter_loose(netIfaceName)
        else:
            log.info("Setting strict mode rp_filter for device %r." %
                     netIfaceName)
            supervdsm.getProxy().set_rp_filter_strict(netIfaceName)
Ejemplo n.º 3
0
def reattach_detachable(device_name):
    libvirt_device, device_params = _get_device_ref_and_params(device_name)

    if CAPABILITY_TO_XML_ATTR[device_params['capability']] == 'pci':
        try:
            iommu_group = device_params['iommu_group']
        except KeyError:
            raise NoIOMMUSupportException
        supervdsm.getProxy().rmAppropriateIommuGroup(iommu_group)
        libvirt_device.reAttach()
Ejemplo n.º 4
0
Archivo: hba.py Proyecto: kanalun/vdsm
def rescan():
    """
    Rescan HBAs discovering new devices.
    """
    log.debug("Starting scan")
    try:
        supervdsm.getProxy().hbaRescan()
    except Error as e:
        log.error("Scan failed: %s", e)
    else:
        log.debug("Scan finished")
Ejemplo n.º 5
0
def detach_detachable(device_name):
    libvirt_device, device_params = _get_device_ref_and_params(device_name)

    if CAPABILITY_TO_XML_ATTR[device_params['capability']] == 'pci':
        try:
            iommu_group = device_params['iommu_group']
        except KeyError:
            raise NoIOMMUSupportException('hostdev passthrough without iommu')
        supervdsm.getProxy().appropriateIommuGroup(iommu_group)
        libvirt_device.detachFlags(None)

    return device_params
def main():
    setup_nets_config = hooking.read_json()

    in_rollback = setup_nets_config['request']['options'].get('_inRollback')

    if in_rollback:
        log('Configuration failed with _inRollback=True.')
    else:
        log('Configuration failed. At this point, non-OVS rollback should be '
            'done. Executing OVS rollback.')
        supervdsm.getProxy().setupNetworks(
            {}, {}, {'connectivityCheck': False, '_inRollback': True,
                     '_inOVSRollback': True})
Ejemplo n.º 7
0
    def teardown(self):
        if self.is_hostdevice:
            self.log.info('Reattaching device %s to host.' % self.hostdev)
            try:
                # TODO: avoid reattach when Engine can tell free VFs otherwise
                reattach_detachable(self.hostdev)
            except NoIOMMUSupportException:
                self.log.exception('Could not reattach device %s back to host '
                                   'due to missing IOMMU support.',
                                   self.hostdev)

            device_params = get_device_params(self.hostdev)
            supervdsm.getProxy().rmAppropriateIommuGroup(
                device_params['iommu_group'])
Ejemplo n.º 8
0
 def umount(self, force=False, lazy=False, freeloop=False, timeout=None):
     umount = supervdsm.getProxy().umount if os.geteuid() != 0 else _umount
     self.log.info("unmounting %s", self.fs_file)
     with utils.stopwatch("%s unmounted" % self.fs_file, log=self.log):
         umount(self.fs_file, force=force, lazy=lazy, freeloop=freeloop,
                timeout=timeout)
     self._wait_for_events()
Ejemplo n.º 9
0
 def mount(self, mntOpts=None, vfstype=None, timeout=None, cgroup=None):
     mount = supervdsm.getProxy().mount if os.geteuid() != 0 else _mount
     self.log.info("mounting %s at %s", self.fs_spec, self.fs_file)
     with utils.stopwatch("%s mounted" % self.fs_file, log=self.log):
         mount(self.fs_spec, self.fs_file, mntOpts=mntOpts, vfstype=vfstype,
               timeout=timeout, cgroup=cgroup)
     self._wait_for_events()
Ejemplo n.º 10
0
Archivo: fileSD.py Proyecto: EdDev/vdsm
def validateDirAccess(dirPath):
    try:
        getProcPool().fileUtils.validateAccess(dirPath)
        supervdsm.getProxy().validateAccess(
            constants.VDSM_USER,
            (constants.VDSM_GROUP,), dirPath,
            (os.R_OK | os.W_OK | os.X_OK))
        supervdsm.getProxy().validateAccess(
            constants.QEMU_PROCESS_USER,
            (constants.DISKIMAGE_GROUP, constants.METADATA_GROUP), dirPath,
            (os.R_OK | os.X_OK))
    except OSError as e:
        if e.errno == errno.EACCES:
            raise se.StorageServerAccessPermissionError(dirPath)
        raise

    return True
Ejemplo n.º 11
0
 def _get_gluster_volinfo(self):
     try:
         superVdsmProxy = supervdsm.getProxy()
         volinfo = superVdsmProxy.glusterVolumeInfo(self._volname,
                                                    self._volfileserver)
         return volinfo[self._volname]
     except ge.GlusterCmdExecFailedException as e:
         self.log.warning("Failed to get volume info: %s", e)
         return {}
Ejemplo n.º 12
0
def _resize_if_needed(guid):
    name = devicemapper.getDmId(guid)
    slaves = [(slave, getDeviceSize(slave))
              for slave in devicemapper.getSlaves(name)]

    if len(slaves) == 0:
        log.warning("Map %r has no slaves" % guid)
        return False

    if len(set(size for slave, size in slaves)) != 1:
        raise Error("Map %r slaves size differ %s" % (guid, slaves))

    map_size = getDeviceSize(name)
    slave_size = slaves[0][1]
    if map_size == slave_size:
        return False

    log.info("Resizing map %r (map_size=%d, slave_size=%d)",
             guid, map_size, slave_size)
    supervdsm.getProxy().resizeMap(name)
    return True
Ejemplo n.º 13
0
def rollback(running_config, initial_config):
    diff = running_config.diffFrom(initial_config)
    if diff:
        for libvirt_ovs_nets in (iter_ovs_nets(running_config.networks),
                                 iter_ovs_nets(initial_config.networks)):
            for net, attrs in libvirt_ovs_nets:
                with suppress(libvirtError):  # network not found
                    libvirt.removeNetwork(net)

        destroy_ovs_bridge()
        for net, attrs in running_config.networks.items():
            if is_ovs_network(attrs):
                running_config.networks.pop(net)
        for bond, attrs in running_config.bonds.items():
            if is_ovs_bond(attrs):
                running_config.bonds.pop(bond)
        running_config.save()

        supervdsm.getProxy().setupNetworks(
            initial_config.networks, initial_config.bonds,
            {'connectivityCheck': False, '_inRollback': True})
Ejemplo n.º 14
0
    def testKsmAction(self):
        dropPrivileges()
        proxy = supervdsm.getProxy()
        ksmParams = {"run": 0,
                     "merge_across_nodes": 1,
                     "sleep_millisecs": 0xffff,
                     "pages_to_scan": 0xffff}
        proxy.ksmTune(ksmParams)

        for k, v in ksmParams.iteritems():
            self.assertEqual(str(v),
                             open("/sys/kernel/mm/ksm/%s" % k, "r").read())
Ejemplo n.º 15
0
def getVmNumaNodeRuntimeInfo(vm):
    """
    Collect vm numa nodes runtime pinning to which host numa nodes
    information.
    Host numa node topology:
    'numaNodes': {'<nodeIndex>': {'cpus': [int], 'totalMemory': 'str'},
                  ...}
    We can get each physical cpu core belongs to which host numa node.

    Vm numa node configuration:
    'guestNumaNodes': [{'cpus': 'str', 'memory': 'str'}, ...]
    We can get each vcpu belongs to which vm numa node.

    Vcpu runtime pinning to physical cpu core information:
    ([(0, 1, 19590000000L, 1), (1, 1, 10710000000L, 1)],
     [(True, True, True, True), (True, True, True, True)])
    The first list element of the above tuple describe each vcpu(list[0])
    runtime pinning to which physical cpu core(list[3]).

    Get the mapping info between vcpu and pid from
    /var/run/libvirt/qemu/<vmName>.xml

    Get each vcpu(pid) backed memory mapping to which host numa nodes info
    from /proc/<vm_pid>/<vcpu_pid>/numa_maps

    From all the above information, we can calculate each vm numa node
    runtime pinning to which host numa node.
    The output is a map like:
    '<vm numa node index>': [<host numa node index>, ...]
    """

    vmNumaNodeRuntimeMap = {}

    vcpu_to_pcpu = _get_mapping_vcpu_to_pcpu(
        _get_vcpu_positioning(vm))
    if vcpu_to_pcpu:
        vm_numa_placement = defaultdict(set)

        vcpu_to_pnode = supervdsm.getProxy().getVcpuNumaMemoryMapping(
            vm.conf['vmName'].encode('utf-8'))
        pcpu_to_pnode = _get_mapping_pcpu_to_pnode()
        vcpu_to_vnode = _get_mapping_vcpu_to_vnode(vm)

        for vcpu_id, pcpu_id in vcpu_to_pcpu.iteritems():
            vnode_index = str(vcpu_to_vnode[vcpu_id])
            vm_numa_placement[vnode_index].add(pcpu_to_pnode[pcpu_id])
            vm_numa_placement[vnode_index].update(
                vcpu_to_pnode.get(vcpu_id, ()))

        vmNumaNodeRuntimeMap = dict((k, list(v)) for k, v in
                                    vm_numa_placement.iteritems())

    return vmNumaNodeRuntimeMap
Ejemplo n.º 16
0
Archivo: numa.py Proyecto: rexhsu/vdsm
def getVmNumaNodeRuntimeInfo(vm):
    """
    Collect vm numa nodes runtime pinning to which host numa nodes
    information.
    Host numa node topology:
    'numaNodes': {'<nodeIndex>': {'cpus': [int], 'totalMemory': 'str'},
                  ...}
    We can get each physical cpu core belongs to which host numa node.

    Vm numa node configuration:
    'guestNumaNodes': [{'cpus': 'str', 'memory': 'str'}, ...]
    We can get each vcpu belongs to which vm numa node.

    Vcpu runtime pinning to physical cpu core information:
    ([(0, 1, 19590000000L, 1), (1, 1, 10710000000L, 1)],
     [(True, True, True, True), (True, True, True, True)])
    The first list element of the above tuple describe each vcpu(list[0])
    runtime pinning to which physical cpu core(list[3]).

    Get the mapping info between vcpu and pid from
    /var/run/libvirt/qemu/<vmName>.xml

    Get each vcpu(pid) backed memory mapping to which host numa nodes info
    from /proc/<vm_pid>/<vcpu_pid>/numa_maps

    From all the above information, we can calculate each vm numa node
    runtime pinning to which host numa node.
    The output is a map like:
    '<vm numa node index>': [<host numa node index>, ...]
    """

    vmNumaNodeRuntimeMap = {}

    vcpu_to_pcpu = _get_mapping_vcpu_to_pcpu(_get_vcpu_positioning(vm))
    if vcpu_to_pcpu:
        vm_numa_placement = defaultdict(set)

        vcpu_to_pnode = supervdsm.getProxy().getVcpuNumaMemoryMapping(
            vm.conf['vmName'].encode('utf-8'))
        pcpu_to_pnode = _get_mapping_pcpu_to_pnode()
        vcpu_to_vnode = _get_mapping_vcpu_to_vnode(vm)

        for vcpu_id, pcpu_id in vcpu_to_pcpu.iteritems():
            vnode_index = str(vcpu_to_vnode[vcpu_id])
            vm_numa_placement[vnode_index].add(pcpu_to_pnode[pcpu_id])
            vm_numa_placement[vnode_index].update(
                vcpu_to_pnode.get(vcpu_id, ()))

        vmNumaNodeRuntimeMap = dict(
            (k, list(v)) for k, v in vm_numa_placement.iteritems())

    return vmNumaNodeRuntimeMap
Ejemplo n.º 17
0
    def testKsmAction(self):
        self.dropPrivileges()
        proxy = supervdsm.getProxy()
        ksmParams = {
            "run": 0,
            "merge_across_nodes": 1,
            "sleep_millisecs": 0xffff,
            "pages_to_scan": 0xffff
        }
        proxy.ksmTune(ksmParams)

        for k, v in ksmParams.iteritems():
            self.assertEqual(str(v),
                             open("/sys/kernel/mm/ksm/%s" % k, "r").read())
Ejemplo n.º 18
0
 def _prepareVolumePathFromPayload(self, vmId, device, payload):
     """
     param vmId:
         VM UUID or None
     param device:
         either 'floppy' or 'cdrom'
     param payload:
         a dict formed like this:
         {'volId': 'volume id',   # volId is optional
          'file': {'filename': 'content', ...}}
     """
     funcs = {'cdrom': 'mkIsoFs', 'floppy': 'mkFloppyFs'}
     if device not in funcs:
         raise vm.VolumeError("Unsupported 'device': %s" % device)
     func = getattr(supervdsm.getProxy(), funcs[device])
     return func(vmId, payload['file'], payload.get('volId'))
Ejemplo n.º 19
0
 def _prepareVolumePathFromPayload(self, vmId, device, payload):
     """
     param vmId:
         VM UUID or None
     param device:
         either 'floppy' or 'cdrom'
     param payload:
         a dict formed like this:
         {'volId': 'volume id',   # volId is optional
          'file': {'filename': 'content', ...}}
     """
     funcs = {'cdrom': 'mkIsoFs', 'floppy': 'mkFloppyFs'}
     if device not in funcs:
         raise vm.VolumeError("Unsupported 'device': %s" % device)
     func = getattr(supervdsm.getProxy(), funcs[device])
     return func(vmId, payload['file'], payload.get('volId'))
Ejemplo n.º 20
0
    def getVmVolumeInfo(self):
        """
        Send info to represent Gluster volume as a network block device
        """
        rpath = sdCache.produce(self.sdUUID).getRealPath()
        volfileServer, volname = rpath.rsplit(":", 1)
        volname = volname.strip('/')

        # Volume transport to Libvirt transport mapping
        VOLUME_TRANS_MAP = {'TCP': 'tcp', 'RDMA': 'rdma'}

        # Extract the volume's transport using gluster cli
        svdsmProxy = svdsm.getProxy()

        try:
            volInfo = svdsmProxy.glusterVolumeInfo(volname, volfileServer)
            volTrans = VOLUME_TRANS_MAP[volInfo[volname]['transportType'][0]]
        except GlusterException:
            # In case of issues with finding transport type, default to tcp
            self.log.warning(
                "Unable to find transport type for GlusterFS"
                " volume %s. GlusterFS server = %s."
                "Defaulting to tcp", (volname, volfileServer),
                exc_info=True)
            volTrans = VOLUME_TRANS_MAP['TCP']

        # Use default port
        volPort = "0"

        imgFilePath = self.getVolumePath()
        imgFilePath_list = imgFilePath.rsplit("/")

        # Extract path to the image, relative to the gluster mount
        imgFileRelPath = "/".join(imgFilePath_list[-4:])

        glusterPath = volname + '/' + imgFileRelPath

        return {
            'volType': VmVolumeInfo.TYPE_NETWORK,
            'path': glusterPath,
            'protocol': 'gluster',
            'volPort': volPort,
            'volTransport': volTrans,
            'volfileServer': volfileServer
        }
Ejemplo n.º 21
0
def fixNetworks(xml_str):
    networks = set(re.findall('(?<=NIC-BRIDGE:)[\w:-]+', xml_str))
    for network in networks:
        ovs_bridge = supervdsm.getProxy().ovs_bridge(network)
        if ovs_bridge:
            new_str = "<source bridge='{bridge}'/>" +  \
                "<virtualport type='openvswitch'/>" \
                .format(bridge=ovs_bridge)
            vlan_tag = net_api.net2vlan(network)
            if vlan_tag:
                new_str = new_str + \
                    "<vlan><tag id='{tag_id}'/></vlan>" \
                    .format(tag_id=str(vlan_tag))
            xml_str = xml_str.replace(
                '<source bridge="NIC-BRIDGE:' + network + '"/>', new_str)
        else:
            xml_str = xml_str.replace('NIC-BRIDGE:' + network, network)
    return xml_str
Ejemplo n.º 22
0
    def getVmVolumeInfo(self):
        """
        Send info to represent Gluster volume as a network block device
        """
        rpath = sdCache.produce(self.sdUUID).getRealPath()
        volfileServer, volname = rpath.rsplit(":", 1)
        volname = volname.strip('/')

        # Volume transport to Libvirt transport mapping
        VOLUME_TRANS_MAP = {'TCP': 'tcp', 'RDMA': 'rdma'}

        # Extract the volume's transport using gluster cli
        svdsmProxy = svdsm.getProxy()

        try:
            volInfo = svdsmProxy.glusterVolumeInfo(volname, volfileServer)
            volTrans = VOLUME_TRANS_MAP[volInfo[volname]['transportType'][0]]
        except GlusterException:
            # In case of issues with finding transport type, default to tcp
            self.log.warning("Unable to find transport type for GlusterFS"
                             " volume %s. GlusterFS server = %s."
                             "Defaulting to tcp",
                             (volname, volfileServer), exc_info=True)
            volTrans = VOLUME_TRANS_MAP['TCP']

        # Use default port
        volPort = "0"

        imgFilePath = self.getVolumePath()
        imgFilePath_list = imgFilePath.rsplit("/")

        # Extract path to the image, relative to the gluster mount
        imgFileRelPath = "/".join(imgFilePath_list[-4:])

        glusterPath = volname + '/' + imgFileRelPath

        return {'volType': VmVolumeInfo.TYPE_NETWORK, 'path': glusterPath,
                'protocol': 'gluster', 'volPort': volPort,
                'volTransport': volTrans,
                'volfileServer': volfileServer}
Ejemplo n.º 23
0
def get():
    caps = {}
    cpu_topology = numa.cpu_topology()

    caps['kvmEnabled'] = str(os.path.exists('/dev/kvm')).lower()

    if config.getboolean('vars', 'report_host_threads_as_cores'):
        caps['cpuCores'] = str(cpu_topology.threads)
    else:
        caps['cpuCores'] = str(cpu_topology.cores)

    caps['cpuThreads'] = str(cpu_topology.threads)
    caps['cpuSockets'] = str(cpu_topology.sockets)
    caps['onlineCpus'] = ','.join(cpu_topology.online_cpus)
    caps['cpuSpeed'] = cpuinfo.frequency()
    caps['cpuModel'] = cpuinfo.model()
    caps['cpuFlags'] = ','.join(cpuinfo.flags() +
                                machinetype.compatible_cpu_models())

    caps.update(_getVersionInfo())

    net_caps = supervdsm.getProxy().network_caps()
    caps.update(net_caps)

    try:
        caps['hooks'] = hooks.installed()
    except:
        logging.debug('not reporting hooks', exc_info=True)

    caps['operatingSystem'] = osinfo.version()
    caps['uuid'] = host.uuid()
    caps['packages2'] = osinfo.package_versions()
    caps['realtimeKernel'] = osinfo.runtime_kernel_flags().realtime
    caps['kernelArgs'] = osinfo.kernel_args()
    caps['nestedVirtualization'] = osinfo.nested_virtualization().enabled
    caps['emulatedMachines'] = machinetype.emulated_machines(
        cpuarch.effective())
    caps['ISCSIInitiatorName'] = _getIscsiIniName()
    caps['HBAInventory'] = hba.HBAInventory()
    caps['vmTypes'] = ['kvm']

    caps['memSize'] = str(utils.readMemInfo()['MemTotal'] / 1024)
    caps['reservedMem'] = str(
        config.getint('vars', 'host_mem_reserve') +
        config.getint('vars', 'extra_mem_reserve'))
    caps['guestOverhead'] = config.get('vars', 'guest_ram_overhead')

    caps['rngSources'] = rngsources.list_available()

    caps['numaNodes'] = dict(numa.topology())
    caps['numaNodeDistance'] = dict(numa.distances())
    caps['autoNumaBalancing'] = numa.autonuma_status()

    caps['selinux'] = osinfo.selinux_status()

    liveSnapSupported = _getLiveSnapshotSupport(cpuarch.effective())
    if liveSnapSupported is not None:
        caps['liveSnapshot'] = str(liveSnapSupported).lower()
    caps['liveMerge'] = str(getLiveMergeSupport()).lower()
    caps['kdumpStatus'] = osinfo.kdump_status()

    caps['hostdevPassthrough'] = str(hostdev.is_supported()).lower()
    caps['additionalFeatures'] = []
    if osinfo.glusterEnabled:
        from vdsm.gluster.api import glusterAdditionalFeatures
        caps['additionalFeatures'].extend(glusterAdditionalFeatures())
    caps['containers'] = containersconnection.is_supported()
    caps['hostedEngineDeployed'] = _isHostedEngineDeployed()
    caps['hugepages'] = hugepages.supported()
    return caps
Ejemplo n.º 24
0
def getVmNumaNodeRuntimeInfo(vm):
    """
    Collect vm numa nodes runtime pinning to which host numa nodes
    information.
    Host numa node topology:
    'numaNodes': {'<nodeIndex>': {'cpus': [int], 'totalMemory': 'str'},
                  ...}
    We can get each physical cpu core belongs to which host numa node.

    Vm numa node configuration:
    'guestNumaNodes': [{'cpus': 'str', 'memory': 'str'}, ...]
    We can get each vcpu belongs to which vm numa node.

    Vcpu runtime pinning to physical cpu core information:
    ([(0, 1, 19590000000L, 1), (1, 1, 10710000000L, 1)],
     [(True, True, True, True), (True, True, True, True)])
    The first list element of the above tuple describe each vcpu(list[0])
    runtime pinning to which physical cpu core(list[3]).

    Get the mapping info between vcpu and pid from
    /var/run/libvirt/qemu/<vmName>.xml

    Get each vcpu(pid) backed memory mapping to which host numa nodes info
    from /proc/<vm_pid>/<vcpu_pid>/numa_maps

    From all the above information, we can calculate each vm numa node
    runtime pinning to which host numa node.
    The output is a map like:
    '<vm numa node index>': [<host numa node index>, ...]
    """

    vmNumaNodeRuntimeMap = {}

    vcpu_to_pcpu = _get_mapping_vcpu_to_pcpu(
        _get_vcpu_positioning(vm))
    if vcpu_to_pcpu:
        vm_numa_placement = defaultdict(set)

        vcpu_to_pnode = supervdsm.getProxy().getVcpuNumaMemoryMapping(
            vm.conf['vmName'].encode('utf-8'))
        pcpu_to_pnode = _get_mapping_pcpu_to_pnode()
        vcpu_to_vnode = _get_mapping_vcpu_to_vnode(vm)

        for vcpu_id, pcpu_id in vcpu_to_pcpu.iteritems():
            try:
                vnode_index = str(vcpu_to_vnode[vcpu_id])
            except KeyError:
                # Not all CPUs are mapped to NUMA nodes, e.g.:
                # - We don't assign hotplugged CPUs to NUMA nodes.
                # - When Engine assigns equal number of CPUs to each of the
                #   NUMA nodes, the contingent remaining CPUs are left
                #   unassigned.
                # We simply skip the unassigned CPUs here.
                log = logging.getLogger('NUMA')
                log.debug("Virtual CPU #%s not assigned to any virtual "
                          "NUMA node",
                          vcpu_id)
                continue
            vm_numa_placement[vnode_index].add(pcpu_to_pnode[pcpu_id])
            vm_numa_placement[vnode_index].update(
                vcpu_to_pnode.get(vcpu_id, ()))

        vmNumaNodeRuntimeMap = dict((k, list(v)) for k, v in
                                    vm_numa_placement.iteritems())

    return vmNumaNodeRuntimeMap
Ejemplo n.º 25
0
 def _create_vhost_port(self, ovs_bridge):
     port = self._get_vhostuser_port_name()
     socket_path = os.path.join(VHOST_SOCK_DIR, port)
     supervdsm.getProxy().add_ovs_vhostuser_port(ovs_bridge, port,
                                                 socket_path)
Ejemplo n.º 26
0
 def _prepare_socket(self):
     supervdsm.getProxy().prepareVmChannel(self._socketName)
Ejemplo n.º 27
0
 def restoreNetConfig(self):
     supervdsm.getProxy().restoreNetworks()
Ejemplo n.º 28
0
if 'mdev_type' in os.environ:
    domxml = hooking.read_domxml()

    vm_name = str(domxml.getElementsByTagName('name')[0].firstChild.nodeValue)

    target_mdev_type = os.environ['mdev_type']
    # Sufficient as the hook only supports single mdev instance per VM.
    mdev_uuid = str(uuid.uuid3(_OVIRT_MDEV_NAMESPACE, vm_name))
    device = _suitable_device_for_mdev_type(target_mdev_type)
    if device is None:
        sys.stderr.write('vgpu: No device with type {} is available.\n'.format(
            target_mdev_type))
        sys.exit(1)
    try:
        supervdsm.getProxy().mdev_create(device, target_mdev_type, mdev_uuid)
    except IOError:
        sys.stderr.write(
            'vgpu: Failed to create mdev type {}.\n'.format(target_mdev_type))
        sys.exit(1)

    supervdsm.getProxy().appropriateIommuGroup(
        os.path.basename(
            os.path.realpath(
                os.path.join(_MDEV_PATH, device, mdev_uuid, 'iommu_group'))))

    hostdev = domxml.createElement('hostdev')
    hostdev.setAttribute('mode', 'subsystem')
    hostdev.setAttribute('type', 'mdev')
    hostdev.setAttribute('model', 'vfio-pci')
Ejemplo n.º 29
0
Archivo: api.py Proyecto: igoihman/vdsm
 def __init__(self, cif, log):
     self.cif = cif
     self.log = log
     self.svdsmProxy = svdsm.getProxy()
Ejemplo n.º 30
0
 def _add_sourceroute(iface, ip, mask, route):
     supervdsm.getProxy().add_sourceroute(iface, ip, mask, route)
Ejemplo n.º 31
0
 def testPingCall(self):
     dropPrivileges()
     proxy = supervdsm.getProxy()
     self.assertTrue(proxy.ping())
Ejemplo n.º 32
0
def systemctl_stop(name):
    return _result(supervdsm.getProxy().systemctl_stop(name))
Ejemplo n.º 33
0
def docker_net_create(subnet, gw, nic, network):
    return _result(supervdsm.getProxy().docker_net_create(
        subnet, gw, nic, network))
Ejemplo n.º 34
0
def getPathsStatus():
    return getProxy().getPathsStatus()
Ejemplo n.º 35
0
 def testPingCall(self):
     self.dropPrivileges()
     proxy = supervdsm.getProxy()
     self.assertTrue(proxy.ping())
Ejemplo n.º 36
0
 def prepare(self):
     if self._path:
         supervdsm.getProxy().prepareVmChannel(
             self._path,
             constants.OVIRT_VMCONSOLE_GROUP)
Ejemplo n.º 37
0
 def recover(self):
     if self.network:
         bridge_info = supervdsm.getProxy().ovs_bridge(self.network)
         if bridge_info and bridge_info['dpdk_enabled']:
             self._is_vhostuser = True
Ejemplo n.º 38
0
Archivo: utils.py Proyecto: mykaul/vdsm
 def restoreNetConfig(self):
     supervdsm.getProxy().restoreNetworks()
Ejemplo n.º 39
0
 def prepare(self):
     if self._path:
         supervdsm.getProxy().prepareVmChannel(
             self._path,
             constants.OVIRT_VMCONSOLE_GROUP)
Ejemplo n.º 40
0
def systemd_run(unit_name, cgroup_slice, *args):
    return _result(
        supervdsm.getProxy().systemd_run(unit_name, cgroup_slice, *args)
    )
Ejemplo n.º 41
0
def getSessionInfo(sessionID):
    return supervdsm.getProxy().readSessionInfo(sessionID)
Ejemplo n.º 42
0
def docker_net_inspect(network):
    return _result(
        supervdsm.getProxy().docker_net_inspect(network)
    )
Ejemplo n.º 43
0
def pathListIter(filterGuids=()):
    filterLen = len(filterGuids) if filterGuids else -1
    devsFound = 0

    knownSessions = {}

    svdsm = supervdsm.getProxy()
    pathStatuses = devicemapper.getPathsStatus()

    for dmId, guid in getMPDevsIter():
        if devsFound == filterLen:
            break

        if filterGuids and guid not in filterGuids:
            continue

        devsFound += 1

        devInfo = {
            "guid": guid,
            "dm": dmId,
            "capacity": str(getDeviceSize(dmId)),
            "serial": svdsm.getScsiSerial(dmId),
            "paths": [],
            "connections": [],
            "devtypes": [],
            "devtype": "",
            "vendor": "",
            "product": "",
            "fwrev": "",
            "logicalblocksize": "",
            "physicalblocksize": "",
            "discard_max_bytes": getDeviceDiscardMaxBytes(dmId),
            "discard_zeroes_data": getDeviceDiscardZeroesData(dmId),
        }

        for slave in devicemapper.getSlaves(dmId):
            if not devicemapper.isBlockDevice(slave):
                log.warning("No such physdev '%s' is ignored" % slave)
                continue

            if not devInfo["vendor"]:
                try:
                    devInfo["vendor"] = getVendor(slave)
                except Exception:
                    log.warn("Problem getting vendor from device `%s`",
                             slave, exc_info=True)

            if not devInfo["product"]:
                try:
                    devInfo["product"] = getModel(slave)
                except Exception:
                    log.warn("Problem getting model name from device `%s`",
                             slave, exc_info=True)

            if not devInfo["fwrev"]:
                try:
                    devInfo["fwrev"] = getFwRev(slave)
                except Exception:
                    log.warn("Problem getting fwrev from device `%s`",
                             slave, exc_info=True)

            if (not devInfo["logicalblocksize"] or
                    not devInfo["physicalblocksize"]):
                try:
                    logBlkSize, phyBlkSize = getDeviceBlockSizes(slave)
                    devInfo["logicalblocksize"] = str(logBlkSize)
                    devInfo["physicalblocksize"] = str(phyBlkSize)
                except Exception:
                    log.warn("Problem getting blocksize from device `%s`",
                             slave, exc_info=True)

            pathInfo = {}
            pathInfo["physdev"] = slave
            pathInfo["state"] = pathStatuses.get(slave, "failed")
            pathInfo["capacity"] = str(getDeviceSize(slave))
            try:
                hbtl = getHBTL(slave)
            except OSError as e:
                if e.errno == errno.ENOENT:
                    log.warn("Device has no hbtl: %s", slave)
                    pathInfo["lun"] = 0
                else:
                    log.error("Error: %s while trying to get hbtl of device: "
                              "%s", str(e.message), slave)
                    raise
            else:
                pathInfo["lun"] = hbtl.lun

            if iscsi.devIsiSCSI(slave):
                devInfo["devtypes"].append(DEV_ISCSI)
                pathInfo["type"] = DEV_ISCSI
                sessionID = iscsi.getiScsiSession(slave)
                if sessionID not in knownSessions:
                    # FIXME: This entire part is for BC. It should be moved to
                    # hsm and not preserved for new APIs. New APIs should keep
                    # numeric types and sane field names.
                    sess = iscsi.getSessionInfo(sessionID)
                    sessionInfo = {
                        "connection": sess.target.portal.hostname,
                        "port": str(sess.target.portal.port),
                        "iqn": sess.target.iqn,
                        "portal": str(sess.target.tpgt),
                        "initiatorname": sess.iface.name
                    }

                    # Note that credentials must be sent back in order for
                    # the engine to tell vdsm how to reconnect later
                    if sess.credentials:
                        cred = sess.credentials
                        sessionInfo['user'] = cred.username
                        sessionInfo['password'] = cred.password

                    knownSessions[sessionID] = sessionInfo
                devInfo["connections"].append(knownSessions[sessionID])
            else:
                devInfo["devtypes"].append(DEV_FCP)
                pathInfo["type"] = DEV_FCP

            if devInfo["devtype"] == "":
                devInfo["devtype"] = pathInfo["type"]
            elif (devInfo["devtype"] != DEV_MIXED and
                  devInfo["devtype"] != pathInfo["type"]):
                devInfo["devtype"] == DEV_MIXED

            devInfo["paths"].append(pathInfo)

        yield devInfo
Ejemplo n.º 44
0
def docker_net_remove(network):
    return _result(
        supervdsm.getProxy().docker_net_remove(network)
    )
Ejemplo n.º 45
0
    def getXML(self):
        """
        Create domxml for network interface.

        <interface type="bridge">
            <mac address="aa:bb:dd:dd:aa:bb"/>
            <model type="virtio"/>
            <source bridge="engine"/>
            [<driver name="vhost/qemu" queues="int"/>]
            [<filterref filter='filter name'>
              [<parameter name='parameter name' value='parameter value'>]
             </filterref>]
            [<tune><sndbuf>0</sndbuf></tune>]
            [<link state='up|down'/>]
            [<bandwidth>
              [<inbound average="int" [burst="int"]  [peak="int"]/>]
              [<outbound average="int" [burst="int"]  [peak="int"]/>]
             </bandwidth>]
        </interface>

        -- or -- a slightly different SR-IOV network interface
        <interface type='hostdev' managed='no'>
          <driver name='vfio'/>
          <source>
           <address type='pci' domain='0x0000' bus='0x00' slot='0x07'
           function='0x0'/>
          </source>
          <mac address='52:54:00:6d:90:02'/>
          <vlan>
           <tag id=100/>
          </vlan>
          <address type='pci' domain='0x0000' bus='0x00' slot='0x07'
          function='0x0'/>
          <boot order='1'/>
         </interface>
        """
        iface = self.createXmlElem('interface', self.device, ['address'])
        iface.appendChildWithArgs('mac', address=self.macAddr)

        if hasattr(self, 'nicModel'):
            iface.appendChildWithArgs('model', type=self.nicModel)

        if self.is_hostdevice:
            # SR-IOV network interface
            iface.setAttrs(managed='no')
            host_address = get_device_params(self.hostdev)['address']
            source = iface.appendChildWithArgs('source')
            source.appendChildWithArgs('address', type='pci', **host_address)

            if self.vlanId is not None:
                vlan = iface.appendChildWithArgs('vlan')
                vlan.appendChildWithArgs('tag', id=str(self.vlanId))
        else:
            ovs_bridge = supervdsm.getProxy().ovs_bridge(self.network)
            if ovs_bridge:
                self._source_ovs_bridge(iface, ovs_bridge)
            else:
                iface.appendChildWithArgs('source', bridge=self.network)

        if hasattr(self, 'filter'):
            filter = iface.appendChildWithArgs('filterref', filter=self.filter)
            self._set_parameters_filter(filter)

        if hasattr(self, 'linkActive'):
            iface.appendChildWithArgs('link', state='up'
                                      if utils.tobool(self.linkActive)
                                      else 'down')

        if hasattr(self, 'bootOrder'):
            iface.appendChildWithArgs('boot', order=self.bootOrder)

        if self.driver:
            iface.appendChildWithArgs('driver', **self.driver)
        elif self.is_hostdevice:
            iface.appendChildWithArgs('driver', name='vfio')

        if self.sndbufParam:
            tune = iface.appendChildWithArgs('tune')
            tune.appendChildWithArgs('sndbuf', text=self.sndbufParam)

        if 'inbound' in self.specParams or 'outbound' in self.specParams:
            iface.appendChild(self.paramsToBandwidthXML(self.specParams))

        return iface
Ejemplo n.º 46
0
 def _remove_sourceroute(iface):
     supervdsm.getProxy().remove_sourceroute(iface)
Ejemplo n.º 47
0
def systemctl_stop(name):
    return _result(
        supervdsm.getProxy().systemctl_stop(name)
    )
Ejemplo n.º 48
0
 def teardown(self):
     if self.uses_source('/dev/hwrng'):
         supervdsm.getProxy().rmAppropriateHwrngDevice(self.vmid)
Ejemplo n.º 49
0
def docker_net_create(subnet, gw, nic, network):
    return _result(
        supervdsm.getProxy().docker_net_create(subnet, gw, nic, network)
    )
Ejemplo n.º 50
0
 def _prepare_socket(self):
     supervdsm.getProxy().prepareVmChannel(self._socketName)
Ejemplo n.º 51
0
def systemd_run(unit_name, cgroup_slice, *args):
    return _result(supervdsm.getProxy().systemd_run(unit_name, cgroup_slice,
                                                    *args))
Ejemplo n.º 52
0
def removeMapping(deviceName):
    return getProxy().removeDeviceMapping(deviceName)
Ejemplo n.º 53
0
def docker_net_inspect(network):
    return _result(supervdsm.getProxy().docker_net_inspect(network))
Ejemplo n.º 54
0
    vm_name = str(
        domxml.getElementsByTagName('name')[0].firstChild.nodeValue
    )

    target_mdev_type = os.environ['mdev_type']
    # Sufficient as the hook only supports single mdev instance per VM.
    mdev_uuid = str(uuid.uuid3(_OVIRT_MDEV_NAMESPACE, vm_name))
    device = _suitable_device_for_mdev_type(target_mdev_type)
    if device is None:
        sys.stderr.write('vgpu: No device with type {} is available.\n'.format(
            target_mdev_type)
        )
        sys.exit(1)
    try:
        supervdsm.getProxy().mdev_create(device, target_mdev_type, mdev_uuid)
    except IOError:
        sys.stderr.write('vgpu: Failed to create mdev type {}.\n'.format(
            target_mdev_type)
        )
        sys.exit(1)

    supervdsm.getProxy().appropriateIommuGroup(
        os.path.basename(os.path.realpath(
            os.path.join(_MDEV_PATH, device, mdev_uuid, 'iommu_group')
        ))
    )

    hostdev = domxml.createElement('hostdev')
    hostdev.setAttribute('mode', 'subsystem')
    hostdev.setAttribute('type', 'mdev')
Ejemplo n.º 55
0
def docker_net_remove(network):
    return _result(supervdsm.getProxy().docker_net_remove(network))
Ejemplo n.º 56
0
    def getXML(self):
        """
        Create domxml for network interface.

        <interface type="bridge">
            <mac address="aa:bb:dd:dd:aa:bb"/>
            <model type="virtio"/>
            <source bridge="engine"/>
            [<driver name="vhost/qemu" queues="int"/>]
            [<filterref filter='filter name'>
              [<parameter name='parameter name' value='parameter value'>]
             </filterref>]
            [<tune><sndbuf>0</sndbuf></tune>]
            [<link state='up|down'/>]
            [<bandwidth>
              [<inbound average="int" [burst="int"]  [peak="int"]/>]
              [<outbound average="int" [burst="int"]  [peak="int"]/>]
             </bandwidth>]
        </interface>

        -- or -- a slightly different SR-IOV network interface
        <interface type='hostdev' managed='no'>
          <driver name='vfio'/>
          <source>
           <address type='pci' domain='0x0000' bus='0x00' slot='0x07'
           function='0x0'/>
          </source>
          <mac address='52:54:00:6d:90:02'/>
          <vlan>
           <tag id=100/>
          </vlan>
          <address type='pci' domain='0x0000' bus='0x00' slot='0x07'
          function='0x0'/>
          <boot order='1'/>
         </interface>

         -- In case of an ovs dpdk bridge --

        <interface type="vhostuser">
          <address bus="0x00" domain="0x0000" slot="0x04" type="pci"/>
          <mac address="00:1a:4a:16:01:54"/>
          <model type="virtio"/>
          <source mode="server" path='socket path' type="unix"/>
        </interface>


        """
        devtype = 'vhostuser' if self._is_vhostuser else self.device
        iface = self.createXmlElem('interface', devtype, ['address'])
        iface.appendChildWithArgs('mac', address=self.macAddr)

        if hasattr(self, 'nicModel'):
            iface.appendChildWithArgs('model', type=self.nicModel)

        if self.is_hostdevice:
            # SR-IOV network interface
            iface.setAttrs(managed='no')
            host_address = self._device_params['address']
            source = iface.appendChildWithArgs('source')
            source.appendChildWithArgs(
                'address',
                type='pci',
                **validate.normalize_pci_address(**host_address))

            if self.vlanId is not None:
                vlan = iface.appendChildWithArgs('vlan')
                vlan.appendChildWithArgs('tag', id=str(self.vlanId))
        else:
            ovs_bridge = supervdsm.getProxy().ovs_bridge(self.network)
            if ovs_bridge:
                if ovs_bridge['dpdk_enabled']:
                    self._source_ovsdpdk_bridge(iface, ovs_bridge['name'])
                else:
                    self._source_ovs_bridge(iface, ovs_bridge['name'])
            else:
                iface.appendChildWithArgs('source', bridge=self.network)

        if hasattr(self, 'filter'):
            filter = iface.appendChildWithArgs('filterref', filter=self.filter)
            self._set_parameters_filter(filter)

        if hasattr(self, 'linkActive'):
            iface.appendChildWithArgs(
                'link', state='up' if conv.tobool(self.linkActive) else 'down')

        if hasattr(self, 'bootOrder'):
            iface.appendChildWithArgs('boot', order=self.bootOrder)

        if self.driver:
            iface.appendChildWithArgs('driver', **self.driver)
        elif self.is_hostdevice:
            iface.appendChildWithArgs('driver', name='vfio')

        if self.sndbufParam:
            tune = iface.appendChildWithArgs('tune')
            tune.appendChildWithArgs('sndbuf', text=self.sndbufParam)

        if 'inbound' in self.specParams or 'outbound' in self.specParams:
            iface.appendChild(self.get_bandwidth_xml(self.specParams))

        return iface
Ejemplo n.º 57
0
 def setup(self):
     if self.uses_source('/dev/hwrng'):
         supervdsm.getProxy().appropriateHwrngDevice(self.vmid)
Ejemplo n.º 58
0
 def teardown(self):
     supervdsm.getProxy().rmAppropriateIommuGroup(
         self._deviceParams['iommu_group'])