def usb_attach(vm, backend_vm, device, frontend=None, auto_detach=False, wait=True): device_attach_check(vm, backend_vm, device, frontend) xs_trans = vmm.xs.transaction_start() xs_encoded_device = usb_encode_device_for_xs(device) usb_ver = vmm.xs.read(xs_trans, '/local/domain/%s/qubes-usb-devices/%s/usb-ver' % (backend_vm.xid, xs_encoded_device)) if usb_ver is None or not usb_ver_re.match(usb_ver): vmm.xs.transaction_end(xs_trans) raise QubesException("Invalid %s device USB version in VM '%s'" % (device, backend_vm.name)) if frontend is None: frontend = usb_find_unused_frontend(xs_trans, backend_vm.xid, vm.xid, usb_ver) else: # Check if any device attached at this frontend #if usb_check_frontend_busy(vm, frontend): # raise QubesException("Frontend %s busy in VM %s, detach it first" % (frontend, vm.name)) vmm.xs.transaction_end(xs_trans) raise NotImplementedError("Explicit USB frontend specification is not implemented yet") # Check if this device is attached to some domain attached_vm = usb_check_attached(xs_trans, backend_vm.xid, device) vmm.xs.transaction_end(xs_trans) if attached_vm: if auto_detach: usb_detach(backend_vm, attached_vm) else: raise QubesException("Device %s from %s already connected to VM %s as %s" % (device, backend_vm.name, attached_vm['vm'], attached_vm['frontend'])) # Run helper script xl_cmd = [ '/usr/lib/qubes/xl-qvm-usb-attach.py', str(vm.xid), device, frontend, str(backend_vm.xid) ] subprocess.check_call(xl_cmd)
def usb_attach(qvmc, vm, device, auto_detach=False, wait=True): if not vm.is_running(): raise QubesException("VM {} not running".format(vm.name)) if not device['vm'].is_running(): raise QubesException("VM {} not running".format(device['vm'].name)) connected_to = usb_check_attached(qvmc, device) if connected_to: if auto_detach: usb_detach(qvmc, device) else: raise QubesException("Device {} already connected, to {}".format( device['name'], connected_to)) # set qrexec policy to allow this device policy_line = '{} {} allow\n'.format(vm.name, device['vm'].name) policy_path = '/etc/qubes-rpc/policy/qubes.USB+{}'.format(device['device']) policy_exists = os.path.exists(policy_path) if not policy_exists: try: fd = os.open(policy_path, os.O_CREAT | os.O_EXCL | os.O_WRONLY) with os.fdopen(fd, 'w') as f: f.write(policy_line) except OSError as e: if e.errno == errno.EEXIST: pass else: raise else: with open(policy_path, 'r+') as f: policy = f.readlines() policy.insert(0, policy_line) f.truncate(0) f.seek(0) f.write(''.join(policy)) try: # and actual attach p = vm.run_service('qubes.USBAttach', passio_popen=True, user='******') (stdout, stderr) = p.communicate('{} {}\n'.format(device['vm'].name, device['device'])) if p.returncode != 0: # TODO: sanitize and include stdout raise QubesException('Device attach failed') finally: # FIXME: there is a race condition here - some other process might # modify the file in the meantime. This may result in unexpected # denials, but will not allow too much if not policy_exists: os.unlink(policy_path) else: with open(policy_path, 'r+') as f: policy = f.readlines() policy.remove('{} {} allow\n'.format(vm.name, device['vm'].name)) f.truncate(0) f.seek(0) f.write(''.join(policy))
def device_attach_check(vm, backend_vm, device, frontend, mode): """ Checks all the parameters, dies on errors """ if not vm.is_running(): raise QubesException("VM %s not running" % vm.name) if not backend_vm.is_running(): raise QubesException("VM %s not running" % backend_vm.name) if device['mode'] == 'r' and mode == 'w': raise QubesException("Cannot attach read-only device in read-write " "mode")
def block_attach(qvmc, vm, device, frontend=None, mode="w", auto_detach=False, wait=True): backend_vm = qvmc.get_vm_by_name(device['vm']) device_attach_check(vm, backend_vm, device, frontend, mode) if frontend is None: frontend = block_find_unused_frontend(vm) if frontend is None: raise QubesException("No unused frontend found") else: # Check if any device attached at this frontend xml = vm.libvirt_domain.XMLDesc() parsed_xml = etree.fromstring(xml) disks = parsed_xml.xpath("//domain/devices/disk/target[@dev='%s']" % frontend) if len(disks): raise QubesException("Frontend %s busy in VM %s, detach it first" % (frontend, vm.name)) # Check if this device is attached to some domain attached_vm = block_check_attached(qvmc, device) if attached_vm: if auto_detach: block_detach(attached_vm['vm'], attached_vm['frontend']) else: raise QubesException("Device %s from %s already connected to VM " "%s as %s" % (device['device'], backend_vm.name, attached_vm['vm'], attached_vm['frontend'])) disk = Element("disk") disk.set('type', 'block') disk.set('device', 'disk') SubElement(disk, 'driver').set('name', 'phy') SubElement(disk, 'source').set('dev', device['device']) SubElement(disk, 'target').set('dev', frontend) if backend_vm.qid != 0: SubElement(disk, 'backenddomain').set('name', device['vm']) if mode == "r": SubElement(disk, 'readonly') vm.libvirt_domain.attachDevice(etree.tostring(disk, encoding='utf-8')) try: # trigger watches to update device status # FIXME: this should be removed once libvirt will report such # events itself vm.qdb.write('/qubes-block-devices', '') except Error: pass
def usb_detach(qvmc, vm, device): connected_to = usb_check_attached(qvmc, device) # detect race conditions; there is still race here, but much smaller if connected_to is None or connected_to.qid != vm.qid: raise QubesException("Device {} not connected to VM {}".format( device['name'], vm.name)) p = device['vm'].run_service('qubes.USBDetach', passio_popen=True, user='******') (stdout, stderr) = p.communicate('{}\n'.format(device['device'])) if p.returncode != 0: # TODO: sanitize and include stdout raise QubesException('Device detach failed')
def vmdir_path(self, vm, pool_dir): """ Returns the path to vmdir depending on the type of the VM. The default QubesOS file storage saves the vm images in three different directories depending on the ``QubesVM`` type: * ``appvms`` for ``QubesAppVm`` or ``QubesHvm`` * ``vm-templates`` for ``QubesTemplateVm`` or ``QubesTemplateHvm`` * ``servicevms`` for any subclass of ``QubesNetVm`` Args: vm: a QubesVM pool_dir: the root directory of the pool Returns: string (str) absolute path to the directory where the vm files are stored """ if vm.is_appvm(): subdir = 'appvms' elif vm.is_template(): subdir = 'vm-templates' elif vm.is_netvm(): subdir = 'servicevms' elif vm.is_disposablevm(): subdir = 'appvms' return os.path.join(pool_dir, subdir, vm.template.name + '-dvm') else: raise QubesException(vm.type() + ' unknown vm type') return os.path.join(pool_dir, subdir, vm.name)
def run(self, command, **kwargs): if self.qrexec_installed: if 'gui' in kwargs and kwargs['gui']==False: command = "nogui:" + command return super(QubesHVm, self).run(command, **kwargs) else: raise QubesException("Needs qrexec agent installed in VM to use this function. See also qvm-prefs.")
def start(self, *args, **kwargs): for vm in self.appvms.values(): if vm.is_running(): raise QubesException( "Cannot start HVM template while VMs based on it are running" ) return super(QubesTemplateHVm, self).start(*args, **kwargs)
def resize_root_img(self, size, allow_start=False): if self.template: raise QubesException("Cannot resize root.img of template-based VM" ". Resize the root.img of the template " "instead.") if self.is_running(): raise QubesException("Cannot resize root.img of running VM") if size < self.get_root_img_sz(): raise QubesException( "For your own safety shringing of root.img is disabled. If " "you really know what you are doing, use 'truncate' manually.") f_root = open(self.root_img, "a+b") f_root.truncate(size) f_root.close()
def suspend(self): if dry_run: return if not self.is_running() and not self.is_paused(): raise QubesException ("VM not running!") self.pause()
def verify_files(self): if not os.path.exists(self.vmdir): raise QubesException ( "VM directory doesn't exist: {0}".\ format(self.vmdir)) try: # TemplateVm in WNI is quite virtual, so do not require the user if not self.vm.is_template(): win32net.NetUserGetInfo(None, self._get_username(), 0) except pywintypes.error, details: if details[0] == 2221: # "The user name cannot be found." raise QubesException("User %s doesn't exist" % self._get_username()) else: raise
def shutdown(self, force=False): if dry_run: return connected_vms = [vm for vm in self.connected_vms.values() if vm.is_running()] if connected_vms and not force: raise QubesException("There are other VMs connected to this VM: " + str([vm.name for vm in connected_vms])) super(QubesNetVm, self).shutdown(force=force)
def verify_files(self): if not os.path.exists(self.vmdir): raise QubesException ( "VM directory doesn't exist: {0}".\ format(self.vmdir)) if self.root_img and not os.path.exists(self.root_img): raise QubesException ( "VM root image file doesn't exist: {0}".\ format(self.root_img)) if self.private_img and not os.path.exists(self.private_img): raise QubesException ( "VM private image file doesn't exist: {0}".\ format(self.private_img)) if self.modules_img is not None: if not os.path.exists(self.modules_img): raise QubesException ( "VM kernel modules image does not exists: {0}".\ format(self.modules_img))
def block_check_attached(qvmc, device): """ @type qvmc: QubesVmCollection """ if qvmc is None: # TODO: ValueError raise QubesException("You need to pass qvmc argument") for vm in qvmc.values(): if vm.qid == 0: # Connecting devices to dom0 not supported continue if not vm.is_running(): continue try: libvirt_domain = vm.libvirt_domain if libvirt_domain: xml = libvirt_domain.XMLDesc() else: xml = None except libvirt.libvirtError: if vmm.libvirt_conn.virConnGetLastError( )[0] == libvirt.VIR_ERR_NO_DOMAIN: xml = None else: raise if xml: parsed_xml = etree.fromstring(xml) disks = parsed_xml.xpath("//domain/devices/disk") for disk in disks: backend_name = 'dom0' if disk.find('backenddomain') is not None: backend_name = disk.find('backenddomain').get('name') source = disk.find('source') if disk.get('type') == 'file': path = source.get('file') elif disk.get('type') == 'block': path = source.get('dev') else: # TODO: logger print >>sys.stderr, "Unknown disk type '%s' attached to " \ "VM '%s'" % (source.get('type'), vm.name) continue if backend_name == device['vm'] and ( path == device['device'] or not path.startswith('/dev/') and path == device['desc']): return { "frontend": disk.find('target').get('dev'), "vm": vm } return None
def unpause(self): self.log.debug('unpause()') if dry_run: return if not self.is_paused(): raise QubesException ("VM not paused!") self.libvirt_domain.resume() self.adjust_proxy_arp() self.adjust_own_firewall_rules()
def prepare_for_vm_startup(self, verbose): super(QubesXenVmStorage, self).prepare_for_vm_startup(verbose=verbose) if self.drive is not None: (drive_type, drive_domain, drive_path) = self.drive.split(":") if drive_domain.lower() != "dom0": try: # FIXME: find a better way to access QubesVmCollection drive_vm = self.vm._collection.get_vm_by_name(drive_domain) # prepare for improved QubesVmCollection if drive_vm is None: raise KeyError if not drive_vm.is_running(): raise QubesException( "VM '{}' holding '{}' isn't running".format( drive_domain, drive_path)) except KeyError: raise QubesException( "VM '{}' holding '{}' does not exists".format( drive_domain, drive_path))
def start(self, *args, **kwargs): # make it available to storage.prepare_for_vm_startup, which is # called before actually building VM libvirt configuration self.storage.drive = self.drive if self.template and self.template.is_running(): raise QubesException("Cannot start the HVM while its template is running") try: if 'mem_required' not in kwargs: # Reserve 44MB for stubdomain kwargs['mem_required'] = (self.memory + 44) * 1024 * 1024 return super(QubesHVm, self).start(*args, **kwargs) except QubesException as e: capabilities = vmm.libvirt_conn.getCapabilities() tree = ElementTree.fromstring(capabilities) os_types = tree.findall('./guest/os_type') if 'hvm' not in map(lambda x: x.text, os_types): raise QubesException("Cannot start HVM without VT-x/AMD-v enabled") else: raise
def drive(self, value): if value is None: self._drive = None return # strip type for a moment drv_type = "cdrom" if value.startswith("hd:") or value.startswith("cdrom:"): (drv_type, unused, value) = value.partition(":") drv_type = drv_type.lower() # sanity check if drv_type not in ['hd', 'cdrom']: raise QubesException("Unsupported drive type: %s" % type) if value.count(":") == 0: value = "dom0:" + value if value.count(":/") == 0: # FIXME: when Windows backend will be supported, improve this raise QubesException("Drive path must be absolute") self._drive = drv_type + ":" + value
def block_list(qvmc = None, vm = None, system_disks = False): if vm is not None: if not vm.is_running(): return [] else: vm_list = [ vm ] else: if qvmc is None: raise QubesException("You must pass either qvm or vm argument") vm_list = qvmc.values() devices_list = {} for vm in vm_list: devices_list.update(block_list_vm(vm, system_disks)) return devices_list
def start_stubdom_guid(self, verbose=True): guid_cmd = [system_path["qubes_guid_path"], "-d", str(self.stubdom_xid), "-t", str(self.xid), "-N", self.name, "-c", self.label.color, "-i", self.label.icon_path, "-l", str(self.label.index)] if self.debug: guid_cmd += ['-v', '-v'] elif not verbose: guid_cmd += ['-q'] retcode = subprocess.call (guid_cmd) if (retcode != 0) : raise QubesException("Cannot start qubes-guid!")
def parse_size(size): units = [ ('K', 1024), ('KB', 1024), ('M', 1024*1024), ('MB', 1024*1024), ('G', 1024*1024*1024), ('GB', 1024*1024*1024), ] size = size.strip().upper() if size.isdigit(): return int(size) for unit, multiplier in units: if size.endswith(unit): size = size[:-len(unit)].strip() return int(size)*multiplier raise QubesException("Invalid size: {0}.".format(size))
def block_devid_to_name(devid): major = devid / 256 minor = devid % 256 dev_class = "" if major == 202: dev_class = "xvd" elif major == 8: dev_class = "sd" else: raise QubesException("Unknown device class %d" % major) if minor % 16 == 0: return "%s%c" % (dev_class, ord('a')+minor/16) else: return "%s%c%d" % (dev_class, ord('a')+minor/16, minor%16)
def resize_root_img(self, size, allow_start=False): super(QubesResizableVmWithResize2fs, self).\ resize_root_img(size, allow_start=allow_start) if not allow_start: raise QubesException("VM start required to complete the " "operation, but not allowed. Either run the " "operation again allowing VM start this " "time, or run resize2fs in the VM manually.") self.start(start_guid=False) self.run("resize2fs /dev/mapper/dmroot", user="******", wait=True, gui=False) self.shutdown() while self.is_running(): sleep(1)
def usb_check_attached(qvmc, device): """Reread device attachment status""" vm = device['vm'] untrusted_connected_to = vm.qdb.read('{}/connected-to'.format( device['qdb_path'])) if untrusted_connected_to: if not usb_connected_to_re.match(untrusted_connected_to): raise QubesException( "Invalid %s device 'connected-to' in VM '%s'" % (device['device'], vm.name)) connected_to = qvmc.get_vm_by_name(untrusted_connected_to) if connected_to is None: print >>sys.stderr, \ "Device {} appears to be connected to {}, " \ "but such VM doesn't exist".format( device['device'], untrusted_connected_to) else: connected_to = None return connected_to
def shutdown(self, force=False): if dry_run: return connected_vms = [ vm for vm in self.connected_vms.values() if vm.is_running() ] if connected_vms and not force: raise QubesException("There are other VMs connected to this VM: " + str([vm.name for vm in connected_vms])) # detach network interfaces of connected VMs before shutting down, # otherwise libvirt will not notice it and will try to detach them # again (which would fail, obviously). # This code can be removed when #1426 got implemented for vm in self.connected_vms.values(): if vm.is_running(): try: vm.detach_network() except (QubesException, libvirt.libvirtError): # ignore errors pass super(QubesNetVm, self).shutdown(force=force)
def block_name_to_majorminor(name): # check if it is already devid if isinstance(name, int): return (name / 256, name % 256) if name.isdigit(): return (int(name) / 256, int(name) % 256) if os.path.exists('/dev/%s' % name): blk_info = os.stat(os.path.realpath('/dev/%s' % name)) if stat.S_ISBLK(blk_info.st_mode): return (blk_info.st_rdev / 256, blk_info.st_rdev % 256) major = 0 minor = 0 dXpY_style = False disk = True if name.startswith("xvd"): major = 202 elif name.startswith("sd"): major = 8 elif name.startswith("mmcblk"): dXpY_style = True major = 179 elif name.startswith("scd"): disk = False major = 11 elif name.startswith("sr"): disk = False major = 11 elif name.startswith("loop"): dXpY_style = True disk = False major = 7 elif name.startswith("md"): dXpY_style = True major = 9 elif name.startswith("dm-"): disk = False major = 253 else: # Unknown device return (0, 0) if not dXpY_style: name_match = re.match(r"^([a-z]+)([a-z-])([0-9]*)$", name) else: name_match = re.match(r"^([a-z]+)([0-9]*)(?:p([0-9]+))?$", name) if not name_match: raise QubesException("Invalid device name: %s" % name) if disk: if dXpY_style: minor = int(name_match.group(2))*8 else: minor = (ord(name_match.group(2))-ord('a')) * 16 else: minor = 0 if name_match.group(3): minor += int(name_match.group(3)) return (major, minor)
def start(self, verbose=False, **kwargs): self.log.debug('start()') if dry_run: return # Intentionally not used is_running(): eliminate also "Paused", "Crashed", "Halting" if self.get_power_state() != "Halted": raise QubesException("VM is already running!") if self.netvm is not None: if self.netvm.qid != 0: if not self.netvm.is_running(): if verbose: print >> sys.stderr, "--> Starting NetVM {0}...".\ format(self.netvm.name) self.netvm.start(verbose=verbose, **kwargs) if verbose: print >> sys.stderr, "--> Loading the VM (type = {0})...".format( self.type) print >> sys.stderr, "time=%s, creating config file" % (str( time.time())) # refresh config file domain_config = self.create_config_file() qmemman_client = self.request_memory() # dispvm cannot have PCI devices assert (len(self.pcidevs) == 0), "DispVM cannot have PCI devices" print >> sys.stderr, "time=%s, calling restore" % (str(time.time())) vmm.libvirt_conn.restoreFlags(self.disp_savefile, domain_config, libvirt.VIR_DOMAIN_SAVE_PAUSED) print >> sys.stderr, "time=%s, done" % (str(time.time())) self._libvirt_domain = None if verbose: print >> sys.stderr, "--> Starting Qubes DB..." self.start_qubesdb() self.services['qubes-dvm'] = True if verbose: print >> sys.stderr, "--> Setting Qubes DB info for the VM..." self.create_qubesdb_entries() print >> sys.stderr, "time=%s, done qubesdb" % (str(time.time())) # fire hooks for hook in self.hooks_start: hook(self, verbose=verbose, **kwargs) if verbose: print >> sys.stderr, "--> Starting the VM..." self.libvirt_domain.resume() print >> sys.stderr, "time=%s, resumed" % (str(time.time())) # close() is not really needed, because the descriptor is close-on-exec # anyway, the reason to postpone close() is that possibly xl is not done # constructing the domain after its main process exits # so we close() when we know the domain is up # the successful unpause is some indicator of it if qmemman_present: qmemman_client.close() if kwargs.get('start_guid', True) and os.path.exists('/var/run/shm.id'): self.start_guid(verbose=verbose, before_qrexec=True, notify_function=kwargs.get('notify_function', None)) self.start_qrexec_daemon(verbose=verbose, notify_function=kwargs.get( 'notify_function', None)) print >> sys.stderr, "time=%s, qrexec done" % (str(time.time())) if kwargs.get('start_guid', True) and os.path.exists('/var/run/shm.id'): self.start_guid(verbose=verbose, notify_function=kwargs.get('notify_function', None)) print >> sys.stderr, "time=%s, guid done" % (str(time.time())) return self.xid
def start(self, **kwargs): raise QubesException ("Cannot start Dom0 fake domain!")
def validate_drive_path(self, drive): drive_type, drive_domain, drive_path = drive.split(':', 2) if drive_domain == 'dom0': if not os.path.exists(drive_path): raise QubesException( "Invalid drive path '{}'".format(drive_path))
def resize_root_img(self, size): for vm in self.appvms.values(): if vm.is_running(): raise QubesException("Cannot resize root.img while any VM " "based on this tempate is running") return super(QubesTemplateHVm, self).resize_root_img(size)