class FilesystemController(SubiquityController): autoinstall_key = "storage" autoinstall_schema = {'type': 'object'} # ... model_name = "filesystem" def __init__(self, app): self.ai_data = {} super().__init__(app) self.model.target = app.base_model.target if self.opts.dry_run and self.opts.bootloader: name = self.opts.bootloader.upper() self.model.bootloader = getattr(Bootloader, name) self.answers.setdefault('guided', False) self.answers.setdefault('guided-index', 0) self.answers.setdefault('manual', []) self._monitor = None self._crash_reports = {} self._probe_once_task = SingleInstanceTask(self._probe_once, propagate_errors=False) self._probe_task = SingleInstanceTask(self._probe, propagate_errors=False) if self.model.bootloader == Bootloader.PREP: self.supports_resilient_boot = False else: release = lsb_release()['release'] self.supports_resilient_boot = release >= '20.04' def load_autoinstall_data(self, data): log.debug("load_autoinstall_data %s", data) if data is None: if not self.interactive(): data = { 'layout': { 'name': 'lvm', }, } else: data = {} log.debug("self.ai_data = %s", data) self.ai_data = data @with_context() async def apply_autoinstall_config(self, context): self.stop_listening_udev() await self._start_task await self._probe_task.wait() self.convert_autoinstall_config() if not self.model.is_root_mounted(): raise Exception("autoinstall config did not mount root") if self.model.needs_bootloader_partition(): raise Exception( "autoinstall config did not create needed bootloader " "partition") async def _probe_once(self, restricted): if restricted: probe_types = {'blockdev'} fname = 'probe-data-restricted.json' key = "ProbeDataRestricted" else: probe_types = None fname = 'probe-data.json' key = "ProbeData" storage = await run_in_thread(self.app.prober.get_storage, probe_types) fpath = os.path.join(self.app.block_log_dir, fname) with open(fpath, 'w') as fp: json.dump(storage, fp, indent=4) self.app.note_file_for_apport(key, fpath) self.model.load_probe_data(storage) async def _probe(self): with self.context.child("_probe") as context: async with self.app.install_lock_file.shared(): self._crash_reports = {} if isinstance(self.ui.body, ProbingFailed): self.ui.set_body(SlowProbing(self)) schedule_task(self._wait_for_probing()) for (restricted, kind) in [ (False, ErrorReportKind.BLOCK_PROBE_FAIL), (True, ErrorReportKind.DISK_PROBE_FAIL), ]: try: desc = "restricted={}".format(restricted) with context.child("probe_once", desc): await self._probe_once_task.start(restricted) # We wait on the task directly here, not # self._probe_once_task.wait as if _probe_once_task # gets cancelled, we should be cancelled too. await asyncio.wait_for(self._probe_once_task.task, 15.0) except asyncio.CancelledError: # asyncio.CancelledError is a subclass of Exception in # Python 3.6 (sadface) raise except Exception: block_discover_log.exception( "block probing failed restricted=%s", restricted) report = self.app.make_apport_report(kind, "block probing", interrupt=False) self._crash_reports[restricted] = report continue break def convert_autoinstall_config(self): log.debug("self.ai_data = %s", self.ai_data) if 'layout' in self.ai_data: layout = self.ai_data['layout'] with self.context.child("applying_autoinstall"): meth = getattr(self, "guided_" + layout['name']) disk = self.model.disk_for_match( self.model.all_disks(), layout.get("match", {'size': 'largest'})) meth(disk) elif 'config' in self.ai_data: with self.context.child("applying_autoinstall"): self.model.apply_autoinstall_config(self.ai_data['config']) self.model.grub = self.ai_data.get('grub', {}) self.model.swap = self.ai_data.get('swap') def start(self): self._start_task = schedule_task(self._start()) async def _start(self): context = pyudev.Context() self._monitor = pyudev.Monitor.from_netlink(context) self._monitor.filter_by(subsystem='block') self._monitor.enable_receiving() self.start_listening_udev() await self._probe_task.start() def start_listening_udev(self): loop = asyncio.get_event_loop() loop.add_reader(self._monitor.fileno(), self._udev_event) def stop_listening_udev(self): loop = asyncio.get_event_loop() loop.remove_reader(self._monitor.fileno()) def _udev_event(self): cp = run_command(['udevadm', 'settle', '-t', '0']) if cp.returncode != 0: log.debug("waiting 0.1 to let udev event queue settle") self.stop_listening_udev() loop = asyncio.get_event_loop() loop.call_later(0.1, self.start_listening_udev) return # Drain the udev events in the queue -- if we stopped listening to # allow udev to settle, it's good bet there is more than one event to # process and we don't want to kick off a full block probe for each # one. It's a touch unfortunate that pyudev doesn't have a # non-blocking read so we resort to select(). while select.select([self._monitor.fileno()], [], [], 0)[0]: action, dev = self._monitor.receive_device() log.debug("_udev_event %s %s", action, dev) self._probe_task.start_sync() async def _wait_for_probing(self): await self._start_task await self._probe_task.wait() if isinstance(self.ui.body, SlowProbing): self.start_ui() def start_ui(self): if self._probe_task.task is None or not self._probe_task.task.done(): self.ui.set_body(SlowProbing(self)) schedule_task(self._wait_for_probing()) elif True in self._crash_reports: self.ui.set_body(ProbingFailed(self)) self.ui.body.show_error() else: # Once we've shown the filesystem UI, we stop listening for udev # events as merging system changes with configuration the user has # performed would be tricky. Possibly worth doing though! Just # not today. self.convert_autoinstall_config() self.stop_listening_udev() self.ui.set_body(GuidedDiskSelectionView(self)) pr = self._crash_reports.get(False) if pr is not None: self.app.show_error_report(pr) if self.answers['guided']: disk = self.model.all_disks()[self.answers['guided-index']] method = self.answers.get('guided-method') self.ui.body.form.guided_choice.value = { 'disk': disk, 'use_lvm': method == "lvm", } self.ui.body.done(self.ui.body.form) elif self.answers['manual']: self.manual() def _action_get(self, id): dev_spec = id[0].split() dev = None if dev_spec[0] == "disk": if dev_spec[1] == "index": dev = self.model.all_disks()[int(dev_spec[2])] elif dev_spec[1] == "serial": dev = self.model._one(type='disk', serial=dev_spec[2]) elif dev_spec[0] == "raid": if dev_spec[1] == "name": for r in self.model.all_raids(): if r.name == dev_spec[2]: dev = r break elif dev_spec[0] == "volgroup": if dev_spec[1] == "name": for r in self.model.all_volgroups(): if r.name == dev_spec[2]: dev = r break if dev is None: raise Exception("could not resolve {}".format(id)) if len(id) > 1: part, index = id[1].split() if part == "part": return dev.partitions()[int(index)] else: return dev raise Exception("could not resolve {}".format(id)) def _action_clean_devices_raid(self, devices): r = { self._action_get(d): v for d, v in zip(devices[::2], devices[1::2]) } for d in r: assert d.ok_for_raid return r def _action_clean_devices_vg(self, devices): r = {self._action_get(d): 'active' for d in devices} for d in r: assert d.ok_for_lvm_vg return r def _action_clean_level(self, level): return raidlevels_by_value[level] def _answers_action(self, action): from subiquitycore.ui.stretchy import StretchyOverlay from subiquity.ui.views.filesystem.delete import ConfirmDeleteStretchy log.debug("_answers_action %r", action) if 'obj' in action: obj = self._action_get(action['obj']) action_name = action['action'] if action_name == "MAKE_BOOT": action_name = "TOGGLE_BOOT" meth = getattr(self.ui.body.avail_list, "_{}_{}".format(obj.type, action_name)) meth(obj) yield body = self.ui.body._w if not isinstance(body, StretchyOverlay): return if isinstance(body.stretchy, ConfirmDeleteStretchy): if action.get("submit", True): body.stretchy.done() else: yield from self._enter_form_data(body.stretchy.form, action['data'], action.get("submit", True)) elif action['action'] == 'create-raid': self.ui.body.create_raid() yield body = self.ui.body._w yield from self._enter_form_data(body.stretchy.form, action['data'], action.get("submit", True), clean_suffix='raid') elif action['action'] == 'create-vg': self.ui.body.create_vg() yield body = self.ui.body._w yield from self._enter_form_data(body.stretchy.form, action['data'], action.get("submit", True), clean_suffix='vg') elif action['action'] == 'done': if not self.ui.body.done.enabled: raise Exception("answers did not provide complete fs config") self.app.confirm_install() self.finish() else: raise Exception("could not process action {}".format(action)) def manual(self): self.ui.set_body(FilesystemView(self.model, self)) if self.answers['guided']: self.app.confirm_install() self.finish() if self.answers['manual']: self._run_iterator(self._run_actions(self.answers['manual'])) self.answers['manual'] = [] def guided(self, method): v = GuidedDiskSelectionView(self.model, self, method) self.ui.set_body(v) if self.answers['guided']: index = self.answers['guided-index'] v.form.guided.value = True v.form.guided_choice.disk.widget.index = index v.form._emit('done') def reset(self): log.info("Resetting Filesystem model") self.model.reset() self.manual() def cancel(self): self.app.prev_screen() def finish(self): log.debug("FilesystemController.finish next_screen") self.configured() self.app.next_screen() def create_mount(self, fs, spec): if spec.get('mount') is None: return mount = self.model.add_mount(fs, spec['mount']) if self.model.needs_bootloader_partition(): vol = fs.volume if vol.type == "partition" and vol.device.type == "disk": if vol.device._can_be_boot_disk(): self.add_boot_disk(vol.device) return mount def delete_mount(self, mount): if mount is None: return self.model.remove_mount(mount) def create_filesystem(self, volume, spec): if spec['fstype'] is None: # prep partitions are always wiped (and never have a filesystem) if getattr(volume, 'flag', None) != 'prep': volume.wipe = None fstype = volume.original_fstype() if fstype is None: return None preserve = True else: fstype = spec['fstype'] volume.wipe = 'superblock' preserve = False fs = self.model.add_filesystem(volume, fstype, preserve) if isinstance(volume, Partition): if fstype == "swap": volume.flag = "swap" elif volume.flag == "swap": volume.flag = "" if spec['fstype'] == "swap": self.model.add_mount(fs, "") if spec['fstype'] is None and spec['use_swap']: self.model.add_mount(fs, "") self.create_mount(fs, spec) return fs def delete_filesystem(self, fs): if fs is None: return self.delete_mount(fs.mount()) self.model.remove_filesystem(fs) delete_format = delete_filesystem def create_partition(self, device, spec, flag="", wipe=None, grub_device=None): part = self.model.add_partition(device, spec["size"], flag, wipe, grub_device) self.create_filesystem(part, spec) return part def delete_partition(self, part): self.clear(part) self.model.remove_partition(part) def _create_boot_partition(self, disk): bootloader = self.model.bootloader if bootloader == Bootloader.UEFI: part_size = UEFI_GRUB_SIZE_BYTES if UEFI_GRUB_SIZE_BYTES * 2 >= disk.size: part_size = disk.size // 2 log.debug('_create_boot_partition - adding EFI partition') spec = dict(size=part_size, fstype='fat32') if self.model._mount_for_path("/boot/efi") is None: spec['mount'] = '/boot/efi' part = self.create_partition(disk, spec, flag="boot", grub_device=True) elif bootloader == Bootloader.PREP: log.debug('_create_boot_partition - adding PReP partition') part = self.create_partition( disk, dict(size=PREP_GRUB_SIZE_BYTES, fstype=None, mount=None), # must be wiped or grub-install will fail wipe='zero', flag='prep', grub_device=True) elif bootloader == Bootloader.BIOS: log.debug('_create_boot_partition - adding bios_grub partition') part = self.create_partition(disk, dict(size=BIOS_GRUB_SIZE_BYTES, fstype=None, mount=None), flag='bios_grub') disk.grub_device = True return part def create_raid(self, spec): for d in spec['devices'] | spec['spare_devices']: self.clear(d) raid = self.model.add_raid(spec['name'], spec['level'].value, spec['devices'], spec['spare_devices']) return raid def delete_raid(self, raid): if raid is None: return self.clear(raid) for p in list(raid.partitions()): self.delete_partition(p) for d in set(raid.devices) | set(raid.spare_devices): d.wipe = 'superblock' self.model.remove_raid(raid) def create_volgroup(self, spec): devices = set() key = spec.get('password') for device in spec['devices']: self.clear(device) if key: device = self.model.add_dm_crypt(device, key) devices.add(device) return self.model.add_volgroup(name=spec['name'], devices=devices) create_lvm_volgroup = create_volgroup def delete_volgroup(self, vg): for lv in list(vg.partitions()): self.delete_logical_volume(lv) for d in vg.devices: d.wipe = 'superblock' if d.type == "dm_crypt": self.model.remove_dm_crypt(d) self.model.remove_volgroup(vg) delete_lvm_volgroup = delete_volgroup def create_logical_volume(self, vg, spec): lv = self.model.add_logical_volume(vg=vg, name=spec['name'], size=spec['size']) self.create_filesystem(lv, spec) return lv create_lvm_partition = create_logical_volume def delete_logical_volume(self, lv): self.clear(lv) self.model.remove_logical_volume(lv) delete_lvm_partition = delete_logical_volume def delete(self, obj): if obj is None: return getattr(self, 'delete_' + obj.type)(obj) def clear(self, obj): if obj.type == "disk": obj.preserve = False obj.wipe = 'superblock' for subobj in obj.fs(), obj.constructed_device(): self.delete(subobj) def reformat(self, disk): disk.grub_device = False for p in list(disk.partitions()): self.delete_partition(p) self.clear(disk) def partition_disk_handler(self, disk, partition, spec): log.debug('partition_disk_handler: %s %s %s', disk, partition, spec) log.debug('disk.freespace: {}'.format(disk.free_for_partitions)) if partition is not None: if 'size' in spec: partition.size = align_up(spec['size']) if disk.free_for_partitions < 0: raise Exception("partition size too large") self.delete_filesystem(partition.fs()) self.create_filesystem(partition, spec) return if len(disk.partitions()) == 0: if disk.type == "disk": disk.preserve = False disk.wipe = 'superblock-recursive' needs_boot = self.model.needs_bootloader_partition() log.debug('model needs a bootloader partition? {}'.format(needs_boot)) can_be_boot = DeviceAction.TOGGLE_BOOT in disk.supported_actions if needs_boot and len(disk.partitions()) == 0 and can_be_boot: part = self._create_boot_partition(disk) # adjust downward the partition size (if necessary) to accommodate # bios/grub partition if spec['size'] > disk.free_for_partitions: log.debug("Adjusting request down: %s - %s = %s", spec['size'], part.size, disk.free_for_partitions) spec['size'] = disk.free_for_partitions self.create_partition(disk, spec) log.debug("Successfully added partition") def logical_volume_handler(self, vg, lv, spec): log.debug('logical_volume_handler: %s %s %s', vg, lv, spec) log.debug('vg.freespace: {}'.format(vg.free_for_partitions)) if lv is not None: if 'name' in spec: lv.name = spec['name'] if 'size' in spec: lv.size = align_up(spec['size']) if vg.free_for_partitions < 0: raise Exception("lv size too large") self.delete_filesystem(lv.fs()) self.create_filesystem(lv, spec) return self.create_logical_volume(vg, spec) def add_format_handler(self, volume, spec): log.debug('add_format_handler %s %s', volume, spec) self.clear(volume) self.create_filesystem(volume, spec) def raid_handler(self, existing, spec): log.debug("raid_handler %s %s", existing, spec) if existing is not None: for d in existing.devices | existing.spare_devices: d._constructed_device = None for d in spec['devices'] | spec['spare_devices']: self.clear(d) d._constructed_device = existing existing.name = spec['name'] existing.raidlevel = spec['level'].value existing.devices = spec['devices'] existing.spare_devices = spec['spare_devices'] else: self.create_raid(spec) def volgroup_handler(self, existing, spec): if existing is not None: key = spec.get('password') for d in existing.devices: if d.type == "dm_crypt": self.model.remove_dm_crypt(d) d = d.volume d._constructed_device = None devices = set() for d in spec['devices']: self.clear(d) if key: d = self.model.add_dm_crypt(d, key) d._constructed_device = existing devices.add(d) existing.name = spec['name'] existing.devices = devices else: self.create_volgroup(spec) def _mount_esp(self, part): if part.fs() is None: self.model.add_filesystem(part, 'fat32') self.model.add_mount(part.fs(), '/boot/efi') def remove_boot_disk(self, boot_disk): if self.model.bootloader == Bootloader.BIOS: boot_disk.grub_device = False flag = 'bios_grub' elif self.model.bootloader == Bootloader.UEFI: flag = 'boot' elif self.model.bootloader == Bootloader.PREP: flag = 'prep' partitions = [p for p in boot_disk.partitions() if p.flag == flag] remount = False if boot_disk.preserve: if self.model.bootloader == Bootloader.BIOS: return for p in partitions: p.grub_device = False if self.model.bootloader == Bootloader.PREP: p.wipe = None elif self.model.bootloader == Bootloader.UEFI: if p.fs(): if p.fs().mount(): self.delete_mount(p.fs().mount()) remount = True if not p.fs().preserve and p.original_fstype(): self.delete_filesystem(p.fs()) self.model.add_filesystem(p, p.original_fstype(), preserve=True) else: full = boot_disk.free_for_partitions == 0 tot_size = 0 for p in partitions: tot_size += p.size if p.fs() and p.fs().mount(): remount = True self.delete_partition(p) if full: largest_part = max(boot_disk.partitions(), key=lambda p: p.size) largest_part.size += tot_size if self.model.bootloader == Bootloader.UEFI and remount: part = self.model._one(type='partition', grub_device=True) if part: self._mount_esp(part) def add_boot_disk(self, new_boot_disk): bootloader = self.model.bootloader if not self.supports_resilient_boot: for disk in self.model.all_disks(): if disk._is_boot_device(): self.remove_boot_disk(disk) if new_boot_disk._has_preexisting_partition(): if bootloader == Bootloader.BIOS: new_boot_disk.grub_device = True elif bootloader == Bootloader.UEFI: should_mount = self.model._mount_for_path('/boot/efi') is None for p in new_boot_disk.partitions(): if p.flag == 'boot': p.grub_device = True if should_mount: self._mount_esp(p) should_mount = False elif bootloader == Bootloader.PREP: for p in new_boot_disk.partitions(): if p.flag == 'prep': p.wipe = 'zero' p.grub_device = True else: new_boot_disk.preserve = False if bootloader == Bootloader.UEFI: part_size = UEFI_GRUB_SIZE_BYTES if UEFI_GRUB_SIZE_BYTES * 2 >= new_boot_disk.size: part_size = new_boot_disk.size // 2 elif bootloader == Bootloader.PREP: part_size = PREP_GRUB_SIZE_BYTES elif bootloader == Bootloader.BIOS: part_size = BIOS_GRUB_SIZE_BYTES if part_size > new_boot_disk.free_for_partitions: largest_part = max(new_boot_disk.partitions(), key=lambda p: p.size) largest_part.size -= (part_size - new_boot_disk.free_for_partitions) self._create_boot_partition(new_boot_disk) def guided_direct(self, disk): self.reformat(disk) result = { "size": disk.free_for_partitions, "fstype": "ext4", "mount": "/", } self.partition_disk_handler(disk, None, result) def guided_lvm(self, disk, lvm_options=None): self.reformat(disk) if DeviceAction.TOGGLE_BOOT in disk.supported_actions: self.add_boot_disk(disk) self.create_partition(device=disk, spec=dict(size=dehumanize_size('1G'), fstype="ext4", mount='/boot')) part = self.create_partition(device=disk, spec=dict( size=disk.free_for_partitions, fstype=None, )) spec = dict(name="ubuntu-vg", devices=set([part])) if lvm_options and lvm_options['encrypt']: spec['password'] = lvm_options['luks_options']['password'] vg = self.create_volgroup(spec) # There's no point using LVM and unconditionally filling the # VG with a single LV, but we should use more of a smaller # disk to avoid the user running into out of space errors # earlier than they probably expect to. if vg.size < 10 * (2 << 30): # Use all of a small (<10G) disk. lv_size = vg.size elif vg.size < 20 * (2 << 30): # Use 10G of a smallish (<20G) disk. lv_size = 10 * (2 << 30) elif vg.size < 200 * (2 << 30): # Use half of a larger (<200G) disk. lv_size = vg.size // 2 else: # Use at most 100G of a large disk. lv_size = 100 * (2 << 30) self.create_logical_volume(vg=vg, spec=dict( size=lv_size, name="ubuntu-lv", fstype="ext4", mount="/", )) def make_autoinstall(self): rendered = self.model.render() r = {'config': rendered['storage']['config']} if 'swap' in rendered: r['swap'] = rendered['swap'] return r
class RefreshController(SubiquityController): endpoint = API.refresh autoinstall_key = "refresh-installer" autoinstall_schema = { 'type': 'object', 'properties': { 'update': { 'type': 'boolean' }, 'channel': { 'type': 'string' }, }, 'additionalProperties': False, } signals = [ ('snapd-network-change', 'snapd_network_changed'), ] def __init__(self, app): super().__init__(app) self.ai_data = {} self.snap_name = os.environ.get("SNAP_NAME", "subiquity") self.configure_task = None self.check_task = None self.status = RefreshStatus(availability=RefreshCheckState.UNKNOWN) def load_autoinstall_data(self, data): if data is not None: self.ai_data = data @property def active(self): if 'update' in self.ai_data: return self.ai_data['update'] else: return self.interactive() def start(self): if not self.active: return self.configure_task = schedule_task(self.configure_snapd()) self.check_task = SingleInstanceTask(self.check_for_update, propagate_errors=False) self.check_task.start_sync() @with_context() async def apply_autoinstall_config(self, context, index=1): if not self.active: return try: await asyncio.wait_for(self.check_task.wait(), 60) except asyncio.TimeoutError: return if self.status.availability != RefreshCheckState.AVAILABLE: return change_id = await self.start_update(context=context) while True: change = await self.get_progress(change_id) if change['status'] not in ['Do', 'Doing', 'Done']: raise Exception("update failed: %s", change['status']) await asyncio.sleep(0.1) @with_context() async def configure_snapd(self, context): with context.child("get_details") as subcontext: try: r = await self.app.snapd.get( 'v2/snaps/{snap_name}'.format(snap_name=self.snap_name)) except requests.exceptions.RequestException: log.exception("getting snap details") return self.status.current_snap_version = r['result']['version'] for k in 'channel', 'revision', 'version': self.app.note_data_for_apport("Snap" + k.title(), r['result'][k]) subcontext.description = "current version of snap is: %r" % ( self.status.current_snap_version) channel = self.get_refresh_channel() desc = "switching {} to {}".format(self.snap_name, channel) with context.child("switching", desc) as subcontext: try: await self.app.snapd.post_and_wait( 'v2/snaps/{}'.format(self.snap_name), { 'action': 'switch', 'channel': channel }) except requests.exceptions.RequestException: log.exception("switching channels") return subcontext.description = "switched to " + channel def get_refresh_channel(self): """Return the channel we should refresh subiquity to.""" prefix = "subiquity-channel=" for arg in self.app.kernel_cmdline: if arg.startswith(prefix): log.debug("get_refresh_channel: found %s on kernel cmdline", arg) return arg[len(prefix):] if 'channel' in self.ai_data: return self.ai_data['channel'] info_file = '/cdrom/.disk/info' try: fp = open(info_file) except FileNotFoundError: if self.opts.dry_run: info = ('Ubuntu-Server 18.04.2 LTS "Bionic Beaver" - ' 'Release amd64 (20190214.3)') else: log.debug( "get_refresh_channel: failed to find .disk/info file") return else: with fp: info = fp.read() release = info.split()[1] return 'stable/ubuntu-' + release def snapd_network_changed(self): if self.active and \ self.status.availability == RefreshCheckState.UNKNOWN: self.check_task.start_sync() @with_context() async def check_for_update(self, context): await asyncio.shield(self.configure_task) if self.app.updated: context.description = "not offered update when already updated" self.status.availability = RefreshCheckState.UNAVAILABLE return try: result = await self.app.snapd.get('v2/find', select='refresh') except requests.exceptions.RequestException: log.exception("checking for snap update failed") context.description = "checking for snap update failed" self.status.availability = RefreshCheckState.UNKNOWN return log.debug("check_for_update received %s", result) for snap in result["result"]: if snap["name"] == self.snap_name: self.status.new_snap_version = snap["version"] context.description = ("new version of snap available: %r" % self.status.new_snap_version) self.status.availability = RefreshCheckState.AVAILABLE return else: context.description = "no new version of snap available" self.status.availability = RefreshCheckState.UNAVAILABLE @with_context() async def start_update(self, context): open(self.app.state_path('updating'), 'w').close() change = await self.app.snapd.post( 'v2/snaps/{}'.format(self.snap_name), {'action': 'refresh'}) context.description = "change id: {}".format(change) return change async def get_progress(self, change): result = await self.app.snapd.get('v2/changes/{}'.format(change)) change = result['result'] if change['status'] == 'Done': # Clearly if we got here we didn't get restarted by # snapd/systemctl (dry-run mode) self.app.restart() return change async def GET(self, wait: bool = False) -> RefreshStatus: if wait: await self.check_task.wait() return self.status async def POST(self, context) -> str: return await self.start_update(context=context) async def progress_GET(self, change_id: str) -> dict: return await self.get_progress(change_id)
class NetworkController(BaseController): model_name = "network" root = "/" def __init__(self, app): super().__init__(app) self.view = None self.view_shown = False self.apply_config_task = SingleInstanceTask(self._apply_config) if self.opts.dry_run: self.root = os.path.abspath(".subiquity") netplan_path = self.netplan_path netplan_dir = os.path.dirname(netplan_path) if os.path.exists(netplan_dir): import shutil shutil.rmtree(netplan_dir) os.makedirs(netplan_dir) with open(netplan_path, 'w') as fp: fp.write(default_netplan) self.parse_netplan_configs() self._watching = False self.network_event_receiver = SubiquityNetworkEventReceiver(self.model) self.network_event_receiver.add_default_route_watcher( self.route_watcher) def parse_netplan_configs(self): self.model.parse_netplan_configs(self.root) def route_watcher(self, routes): if routes: self.signal.emit_signal('network-change') def start(self): self._observer_handles = [] self.observer, self._observer_fds = ( self.app.prober.probe_network(self.network_event_receiver)) self.start_watching() def stop_watching(self): if not self._watching: return loop = asyncio.get_event_loop() for fd in self._observer_fds: loop.remove_reader(fd) self._watching = False def start_watching(self): if self._watching: return loop = asyncio.get_event_loop() for fd in self._observer_fds: loop.add_reader(fd, self._data_ready, fd) self._watching = True def _data_ready(self, fd): cp = run_command(['udevadm', 'settle', '-t', '0']) if cp.returncode != 0: log.debug("waiting 0.1 to let udev event queue settle") self.stop_watching() loop = asyncio.get_event_loop() loop.call_later(0.1, self.start_watching) return self.observer.data_ready(fd) v = self.ui.body if hasattr(v, 'refresh_model_inputs'): v.refresh_model_inputs() def start_scan(self, dev): self.observer.trigger_scan(dev.ifindex) def done(self): log.debug("NetworkController.done next_screen") self.model.has_network = bool( self.network_event_receiver.default_routes) self.app.next_screen() def cancel(self): self.app.prev_screen() def _action_get(self, id): dev_spec = id[0].split() dev = None if dev_spec[0] == "interface": if dev_spec[1] == "index": dev = self.model.get_all_netdevs()[int(dev_spec[2])] elif dev_spec[1] == "name": dev = self.model.get_netdev_by_name(dev_spec[2]) if dev is None: raise Exception("could not resolve {}".format(id)) if len(id) > 1: part, index = id[1].split() if part == "part": return dev.partitions()[int(index)] else: return dev raise Exception("could not resolve {}".format(id)) def _action_clean_devices(self, devices): return [self._action_get(device) for device in devices] def _answers_action(self, action): from subiquitycore.ui.stretchy import StretchyOverlay log.debug("_answers_action %r", action) if 'obj' in action: obj = self._action_get(action['obj']) meth = getattr( self.ui.body, "_action_{}".format(action['action'])) action_obj = getattr(NetDevAction, action['action']) self.ui.body._action(None, (action_obj, meth), obj) yield body = self.ui.body._w if not isinstance(body, StretchyOverlay): return for k, v in action.items(): if not k.endswith('data'): continue form_name = "form" submit_key = "submit" if '-' in k: prefix = k.split('-')[0] form_name = prefix + "_form" submit_key = prefix + "-submit" yield from self._enter_form_data( getattr(body.stretchy, form_name), v, action.get(submit_key, True)) elif action['action'] == 'create-bond': self.ui.body._create_bond() yield body = self.ui.body._w yield from self._enter_form_data( body.stretchy.form, action['data'], action.get("submit", True)) elif action['action'] == 'done': self.ui.body.done() else: raise Exception("could not process action {}".format(action)) def update_initial_configs(self): # Any device that does not have a (global) address by the time # we get to the network screen is marked as disabled, with an # explanation. log.debug("updating initial NIC config") for dev in self.model.get_all_netdevs(): has_global_address = False if dev.info is None or not dev.config: continue for a in dev.info.addresses.values(): if a.scope == "global": has_global_address = True break if not has_global_address: dev.remove_ip_networks_for_version(4) dev.remove_ip_networks_for_version(6) log.debug("disabling %s", dev.name) dev.disabled_reason = _("autoconfiguration failed") def start_ui(self): if not self.view_shown: self.update_initial_configs() self.view = NetworkView(self.model, self) if not self.view_shown: self.apply_config(silent=True) self.view_shown = True self.network_event_receiver.view = self.view self.ui.set_body(self.view) def end_ui(self): self.view = self.network_event_receiver.view = None @property def netplan_path(self): if self.opts.project == "subiquity": netplan_config_file_name = '00-installer-config.yaml' else: netplan_config_file_name = '00-snapd-config.yaml' return os.path.join(self.root, 'etc/netplan', netplan_config_file_name) def apply_config(self, context=None, silent=False): self.apply_config_task.start_sync(context=context, silent=silent) async def _down_devs(self, devs): for dev in devs: try: log.debug('downing %s', dev.name) self.observer.rtlistener.unset_link_flags(dev.ifindex, IFF_UP) except RuntimeError: # We don't actually care very much about this log.exception('unset_link_flags failed for %s', dev.name) async def _delete_devs(self, devs): for dev in devs: # XXX would be nicer to do this via rtlistener eventually. log.debug('deleting %s', dev.name) cmd = ['ip', 'link', 'delete', 'dev', dev.name] try: await arun_command(cmd, check=True) except subprocess.CalledProcessError as cp: log.info("deleting %s failed with %r", dev.name, cp.stderr) def _write_config(self): config = self.model.render_config() log.debug("network config: \n%s", yaml.dump(sanitize_config(config), default_flow_style=False)) for p in netplan.configs_in_root(self.root, masked=True): if p == self.netplan_path: continue os.rename(p, p + ".dist-" + self.opts.project) write_file( self.netplan_path, self.model.stringify_config(config), omode="w") self.parse_netplan_configs() @with_context( name="apply_config", description="silent={silent}", level="INFO") async def _apply_config(self, *, context, silent): devs_to_delete = [] devs_to_down = [] dhcp_device_versions = [] dhcp_events = set() for dev in self.model.get_all_netdevs(include_deleted=True): dev.dhcp_events = {} for v in 4, 6: if dev.dhcp_enabled(v): if not silent: dev.set_dhcp_state(v, "PENDING") self.network_event_receiver.update_link( dev.ifindex) else: dev.set_dhcp_state(v, "RECONFIGURE") dev.dhcp_events[v] = e = asyncio.Event() dhcp_events.add(e) if dev.info is None: continue if dev.config != self.model.config.config_for_device(dev.info): if dev.is_virtual: devs_to_delete.append(dev) else: devs_to_down.append(dev) self._write_config() if not silent and self.view: self.view.show_apply_spinner() try: def error(stage): if not silent and self.view: self.view.show_network_error(stage) if self.opts.dry_run: delay = 1/self.app.scale_factor await arun_command(['sleep', str(delay)]) if os.path.exists('/lib/netplan/generate'): # If netplan appears to be installed, run generate to # at least test that what we wrote is acceptable to # netplan. await arun_command( ['netplan', 'generate', '--root', self.root], check=True) else: if devs_to_down or devs_to_delete: try: await arun_command( ['systemctl', 'mask', '--runtime', 'systemd-networkd.service', 'systemd-networkd.socket'], check=True) await arun_command( ['systemctl', 'stop', 'systemd-networkd.service', 'systemd-networkd.socket'], check=True) except subprocess.CalledProcessError: error("stop-networkd") raise if devs_to_down: await self._down_devs(devs_to_down) if devs_to_delete: await self._delete_devs(devs_to_delete) if devs_to_down or devs_to_delete: await arun_command( ['systemctl', 'unmask', '--runtime', 'systemd-networkd.service', 'systemd-networkd.socket'], check=True) try: await arun_command(['netplan', 'apply'], check=True) except subprocess.CalledProcessError: error("apply") raise if devs_to_down or devs_to_delete: # It's probably running already, but just in case. await arun_command( ['systemctl', 'start', 'systemd-networkd.socket'], check=False) finally: if not silent and self.view: self.view.hide_apply_spinner() if self.answers.get('accept-default', False): self.done() elif self.answers.get('actions', False): actions = self.answers['actions'] self.answers.clear() self._run_iterator(self._run_actions(actions)) if not dhcp_events: return try: await asyncio.wait_for( asyncio.wait({e.wait() for e in dhcp_events}), 10) except asyncio.TimeoutError: pass for dev, v in dhcp_device_versions: dev.dhcp_events = {} if not dev.dhcp_addresses()[v]: dev.set_dhcp_state(v, "TIMEDOUT") self.network_event_receiver.update_link(dev.ifindex) def add_vlan(self, device, vlan): return self.model.new_vlan(device, vlan) def add_or_update_bond(self, existing, result): mode = result['mode'] params = { 'mode': mode, } if mode in BondParameters.supports_xmit_hash_policy: params['transmit-hash-policy'] = result['xmit_hash_policy'] if mode in BondParameters.supports_lacp_rate: params['lacp-rate'] = result['lacp_rate'] for device in result['devices']: device.config = {} interfaces = [d.name for d in result['devices']] if existing is None: return self.model.new_bond(result['name'], interfaces, params) else: existing.config['interfaces'] = interfaces existing.config['parameters'] = params existing.name = result['name'] return existing
class RefreshController(SubiquityController): autoinstall_key = "refresh-installer" signals = [ ('snapd-network-change', 'snapd_network_changed'), ] def __init__(self, app): super().__init__(app) self.snap_name = os.environ.get("SNAP_NAME", "subiquity") self.configure_task = None self.check_task = None self.current_snap_version = "unknown" self.new_snap_version = "" self.offered_first_time = False self.active = self.interactive() def load_autoinstall_data(self, data): if data is not None and data.get('refresh'): self.active = True def start(self): if not self.active: return self.configure_task = schedule_task(self.configure_snapd()) self.check_task = SingleInstanceTask(self.check_for_update, propagate_errors=False) self.check_task.start_sync() async def apply_autoinstall_config(self, index=1): if not self.active: return try: await asyncio.wait_for(self.check_task.wait(), 60) except asyncio.TimeoutError: return if self.check_state != CheckState.AVAILABLE: return change_id = await self.start_update() while True: try: change = await self.controller.get_progress(change_id) except requests.exceptions.RequestException as e: raise e if change['status'] == 'Done': # Will only get here dry run mode as part of the refresh is us # getting restarted by snapd... return if change['status'] not in ['Do', 'Doing']: raise Exception("update failed") await asyncio.sleep(0.1) @property def check_state(self): if not self.active: return CheckState.UNAVAILABLE task = self.check_task.task if not task.done() or task.cancelled(): return CheckState.UNKNOWN if task.exception(): return CheckState.UNAVAILABLE return task.result() async def configure_snapd(self): with self.context.child("configure_snapd") as context: with context.child("get_details") as subcontext: try: r = await self.app.snapd.get( 'v2/snaps/{snap_name}'.format(snap_name=self.snap_name) ) except requests.exceptions.RequestException: log.exception("getting snap details") return self.current_snap_version = r['result']['version'] for k in 'channel', 'revision', 'version': self.app.note_data_for_apport("Snap" + k.title(), r['result'][k]) subcontext.description = "current version of snap is: %r" % ( self.current_snap_version) channel = self.get_refresh_channel() desc = "switching {} to {}".format(self.snap_name, channel) with context.child("switching", desc) as subcontext: try: await self.app.snapd.post_and_wait( 'v2/snaps/{}'.format(self.snap_name), { 'action': 'switch', 'channel': channel }) except requests.exceptions.RequestException: log.exception("switching channels") return subcontext.description = "switched to " + channel def get_refresh_channel(self): """Return the channel we should refresh subiquity to.""" if 'channel' in self.answers: return self.answers['channel'] with open('/proc/cmdline') as fp: cmdline = fp.read() prefix = "subiquity-channel=" for arg in cmdline.split(): if arg.startswith(prefix): log.debug("get_refresh_channel: found %s on kernel cmdline", arg) return arg[len(prefix):] info_file = '/cdrom/.disk/info' try: fp = open(info_file) except FileNotFoundError: if self.opts.dry_run: info = ('Ubuntu-Server 18.04.2 LTS "Bionic Beaver" - ' 'Release amd64 (20190214.3)') else: log.debug( "get_refresh_channel: failed to find .disk/info file") return else: with fp: info = fp.read() release = info.split()[1] return 'stable/ubuntu-' + release def snapd_network_changed(self): if self.check_state == CheckState.UNKNOWN: self.check_task.start_sync() async def check_for_update(self): await asyncio.shield(self.configure_task) with self.context.child("check_for_update") as context: if self.app.updated: context.description = ( "not offered update when already updated") return CheckState.UNAVAILABLE result = await self.app.snapd.get('v2/find', select='refresh') log.debug("check_for_update received %s", result) for snap in result["result"]: if snap["name"] == self.snap_name: self.new_snap_version = snap["version"] context.description = ( "new version of snap available: %r" % self.new_snap_version) return CheckState.AVAILABLE else: context.description = ("no new version of snap available") return CheckState.UNAVAILABLE async def start_update(self): update_marker = os.path.join(self.app.state_dir, 'updating') open(update_marker, 'w').close() with self.context.child("starting_update") as context: change = await self.app.snapd.post( 'v2/snaps/{}'.format(self.snap_name), {'action': 'refresh'}) context.description = "change id: {}".format(change) return change async def get_progress(self, change): result = await self.app.snapd.get('v2/changes/{}'.format(change)) return result['result'] def start_ui(self, index=1): from subiquity.ui.views.refresh import RefreshView if self.app.updated: raise Skip() show = False if index == 1: if self.check_state == CheckState.AVAILABLE: show = True self.offered_first_time = True elif index == 2: if not self.offered_first_time: if self.check_state in [ CheckState.UNKNOWN, CheckState.AVAILABLE ]: show = True else: raise AssertionError("unexpected index {}".format(index)) if show: self.ui.set_body(RefreshView(self)) else: raise Skip() def done(self, sender=None): log.debug("RefreshController.done next_screen") self.app.next_screen() def cancel(self, sender=None): self.app.prev_screen()
class BaseNetworkController(BaseController): model_name = "network" root = "/" def __init__(self, app): super().__init__(app) self.apply_config_task = SingleInstanceTask(self._apply_config) if self.opts.dry_run: self.root = os.path.abspath(".subiquity") netplan_path = self.netplan_path netplan_dir = os.path.dirname(netplan_path) if os.path.exists(netplan_dir): import shutil shutil.rmtree(netplan_dir) os.makedirs(netplan_dir) with open(netplan_path, 'w') as fp: fp.write(default_netplan) self.parse_netplan_configs() self._watching = False self.network_event_receiver = SubiquityNetworkEventReceiver(self) def parse_netplan_configs(self): self.model.parse_netplan_configs(self.root) def start(self): self._observer_handles = [] self.observer, self._observer_fds = (self.app.prober.probe_network( self.network_event_receiver)) self.start_watching() def stop_watching(self): if not self._watching: return loop = asyncio.get_event_loop() for fd in self._observer_fds: loop.remove_reader(fd) self._watching = False def start_watching(self): if self._watching: return loop = asyncio.get_event_loop() for fd in self._observer_fds: loop.add_reader(fd, self._data_ready, fd) self._watching = True def _data_ready(self, fd): cp = run_command(['udevadm', 'settle', '-t', '0']) if cp.returncode != 0: log.debug("waiting 0.1 to let udev event queue settle") self.stop_watching() loop = asyncio.get_event_loop() loop.call_later(0.1, self.start_watching) return self.observer.data_ready(fd) def update_initial_configs(self): # Any device that does not have a (global) address by the time # we get to the network screen is marked as disabled, with an # explanation. log.debug("updating initial NIC config") for dev in self.model.get_all_netdevs(): has_global_address = False if dev.info is None or not dev.config: continue for a in dev.info.addresses.values(): if a.scope == "global": has_global_address = True break if not has_global_address: dev.remove_ip_networks_for_version(4) dev.remove_ip_networks_for_version(6) log.debug("disabling %s", dev.name) dev.disabled_reason = _("autoconfiguration failed") @property def netplan_path(self): if self.opts.project == "subiquity": netplan_config_file_name = '00-installer-config.yaml' else: netplan_config_file_name = '00-snapd-config.yaml' return os.path.join(self.root, 'etc/netplan', netplan_config_file_name) def apply_config(self, context=None, silent=False): self.apply_config_task.start_sync(context=context, silent=silent) async def _down_devs(self, devs): for dev in devs: try: log.debug('downing %s', dev.name) self.observer.rtlistener.unset_link_flags(dev.ifindex, IFF_UP) except RuntimeError: # We don't actually care very much about this log.exception('unset_link_flags failed for %s', dev.name) async def _delete_devs(self, devs): for dev in devs: # XXX would be nicer to do this via rtlistener eventually. log.debug('deleting %s', dev.name) cmd = ['ip', 'link', 'delete', 'dev', dev.name] try: await arun_command(cmd, check=True) except subprocess.CalledProcessError as cp: log.info("deleting %s failed with %r", dev.name, cp.stderr) def _write_config(self): config = self.model.render_config() log.debug( "network config: \n%s", yaml.dump(netplan.sanitize_config(config), default_flow_style=False)) for p in netplan.configs_in_root(self.root, masked=True): if p == self.netplan_path: continue os.rename(p, p + ".dist-" + self.opts.project) write_file(self.netplan_path, self.model.stringify_config(config), omode="w") self.parse_netplan_configs() @with_context(name="apply_config", description="silent={silent}", level="INFO") async def _apply_config(self, *, context, silent): devs_to_delete = [] devs_to_down = [] dhcp_device_versions = [] dhcp_events = set() for dev in self.model.get_all_netdevs(include_deleted=True): dev.dhcp_events = {} for v in 4, 6: if dev.dhcp_enabled(v): if not silent: dev.set_dhcp_state(v, DHCPState.PENDING) self.network_event_receiver.update_link(dev.ifindex) else: dev.set_dhcp_state(v, DHCPState.RECONFIGURE) dev.dhcp_events[v] = e = asyncio.Event() dhcp_events.add(e) if dev.info is None: continue if dev.config != self.model.config.config_for_device(dev.info): if dev.is_virtual: devs_to_delete.append(dev) else: devs_to_down.append(dev) self._write_config() if not silent: self.apply_starting() try: def error(stage): if not silent: self.apply_error(stage) if self.opts.dry_run: delay = 1 / self.app.scale_factor await arun_command(['sleep', str(delay)]) if os.path.exists('/lib/netplan/generate'): # If netplan appears to be installed, run generate to # at least test that what we wrote is acceptable to # netplan. await arun_command( ['netplan', 'generate', '--root', self.root], check=True) else: if devs_to_down or devs_to_delete: try: await arun_command([ 'systemctl', 'mask', '--runtime', 'systemd-networkd.service', 'systemd-networkd.socket' ], check=True) await arun_command([ 'systemctl', 'stop', 'systemd-networkd.service', 'systemd-networkd.socket' ], check=True) except subprocess.CalledProcessError: error("stop-networkd") raise if devs_to_down: await self._down_devs(devs_to_down) if devs_to_delete: await self._delete_devs(devs_to_delete) if devs_to_down or devs_to_delete: await arun_command([ 'systemctl', 'unmask', '--runtime', 'systemd-networkd.service', 'systemd-networkd.socket' ], check=True) try: await arun_command(['netplan', 'apply'], check=True) except subprocess.CalledProcessError: error("apply") raise if devs_to_down or devs_to_delete: # It's probably running already, but just in case. await arun_command( ['systemctl', 'start', 'systemd-networkd.socket'], check=False) finally: if not silent: self.apply_stopping() if not dhcp_events: return try: await asyncio.wait_for( asyncio.wait({e.wait() for e in dhcp_events}), 10) except asyncio.TimeoutError: pass for dev, v in dhcp_device_versions: dev.dhcp_events = {} if not dev.dhcp_addresses()[v]: dev.set_dhcp_state(v, DHCPState.TIMED_OUT) self.network_event_receiver.update_link(dev.ifindex) def set_static_config(self, dev_name: str, ip_version: int, static_config: StaticConfig) -> None: dev = self.model.get_netdev_by_name(dev_name) dev.remove_ip_networks_for_version(ip_version) dev.config.setdefault('addresses', []).extend(static_config.addresses) gwkey = 'gateway{v}'.format(v=ip_version) if static_config.gateway: dev.config[gwkey] = static_config.gateway else: dev.config.pop(gwkey, None) ns = dev.config.setdefault('nameservers', {}) ns.setdefault('addresses', []).extend(static_config.nameservers) ns.setdefault('search', []).extend(static_config.searchdomains) self.update_link(dev) self.apply_config() def enable_dhcp(self, dev_name: str, ip_version: int) -> None: dev = self.model.get_netdev_by_name(dev_name) dev.remove_ip_networks_for_version(ip_version) dhcpkey = 'dhcp{v}'.format(v=ip_version) dev.config[dhcpkey] = True self.update_link(dev) self.apply_config() def disable_network(self, dev_name: str, ip_version: int) -> None: dev = self.model.get_netdev_by_name(dev_name) dev.remove_ip_networks_for_version(ip_version) self.update_link(dev) self.apply_config() def add_vlan(self, dev_name: str, id: int): new = self.model.new_vlan(dev_name, id) self.new_link(new) dev = self.model.get_netdev_by_name(dev_name) self.update_link(dev) self.apply_config() def delete_link(self, dev_name: str): dev = self.model.get_netdev_by_name(dev_name) touched_devices = set() if dev.type == "bond": for device_name in dev.config['interfaces']: interface = self.model.get_netdev_by_name(device_name) touched_devices.add(interface) elif dev.type == "vlan": link = self.model.get_netdev_by_name(dev.config['link']) touched_devices.add(link) dev.config = None self.del_link(dev) for dev in touched_devices: self.update_link(dev) self.apply_config() def add_or_update_bond(self, existing_name: Optional[str], new_name: str, new_info: BondConfig) -> None: get_netdev_by_name = self.model.get_netdev_by_name touched_devices = set() for device_name in new_info.interfaces: device = get_netdev_by_name(device_name) device.config = {} touched_devices.add(device) if existing_name is None: new_dev = self.model.new_bond(new_name, new_info) self.new_link(new_dev) else: existing = get_netdev_by_name(existing_name) for interface in existing.config['interfaces']: touched_devices.add(get_netdev_by_name(interface)) existing.config.update(new_info.to_config()) if existing.name != new_name: config = existing.config existing.config = None self.del_link(existing) existing.config = config existing.name = new_name self.new_link(existing) else: touched_devices.add(existing) for dev in touched_devices: self.update_link(dev) self.apply_config() def get_info_for_netdev(self, dev_name: str) -> str: device = self.model.get_netdev_by_name(dev_name) if device.info is not None: return yaml.dump(device.info.serialize(), default_flow_style=False) else: return "Configured but not yet created {type} interface.".format( type=device.type) def set_wlan(self, dev_name: str, wlan: WLANConfig) -> None: device = self.model.get_netdev_by_name(dev_name) device.set_ssid_psk(wlan.ssid, wlan.psk) self.update_link(device) def start_scan(self, dev_name: str) -> None: device = self.model.get_netdev_by_name(dev_name) self.observer.trigger_scan(device.ifindex) self.update_link(device) @abc.abstractmethod def apply_starting(self): pass @abc.abstractmethod def apply_stopping(self): pass @abc.abstractmethod def apply_error(self, stage): pass @abc.abstractmethod def update_default_routes(self, routes): if routes: self.signal.emit_signal('network-change') @abc.abstractmethod def new_link(self, netdev): pass @abc.abstractmethod def update_link(self, netdev): for v, e in netdev.dhcp_events.items(): if netdev.dhcp_addresses()[v]: netdev.set_dhcp_state(v, DHCPState.CONFIGURED) e.set() pass @abc.abstractmethod def del_link(self, netdev): pass
class MirrorController(SubiquityController): autoinstall_key = "apt" model_name = "mirror" signals = [ ('snapd-network-change', 'snapd_network_changed'), ] def __init__(self, app): self.ai_data = {} super().__init__(app) self.check_state = CheckState.NOT_STARTED if 'country-code' in self.answers: self.check_state = CheckState.DONE self.model.set_country(self.answers['country-code']) self.lookup_task = SingleInstanceTask(self.lookup) self.geoip_enabled = True def load_autoinstall_data(self, data): if data is None: return geoip = data.pop('geoip', True) merge_config(self.model.config, data) self.geoip_enabled = geoip and self.model.is_default() async def apply_autoinstall_config(self): if not self.geoip_enabled: return try: await asyncio.wait_for(self.lookup_task.wait(), 10) except asyncio.TimeoutError: pass def snapd_network_changed(self): if not self.geoip_enabled: return if self.check_state != CheckState.DONE: self.check_state = CheckState.CHECKING self.lookup_task.start_sync() async def lookup(self): with self.context.child("lookup"): try: response = await run_in_thread( requests.get, "https://geoip.ubuntu.com/lookup") response.raise_for_status() except requests.exceptions.RequestException: log.exception("geoip lookup failed") self.check_state = CheckState.FAILED return try: e = ElementTree.fromstring(response.text) except ElementTree.ParseError: log.exception("parsing %r failed", response.text) self.check_state = CheckState.FAILED return cc = e.find("CountryCode") if cc is None: log.debug("no CountryCode found in %r", response.text) self.check_state = CheckState.FAILED return cc = cc.text.lower() if len(cc) != 2: log.debug("bogus CountryCode found in %r", response.text) self.check_state = CheckState.FAILED return self.check_state = CheckState.DONE self.model.set_country(cc) def start_ui(self): self.check_state = CheckState.DONE self.ui.set_body(MirrorView(self.model, self)) if 'mirror' in self.answers: self.done(self.answers['mirror']) elif 'country-code' in self.answers \ or 'accept-default' in self.answers: self.done(self.model.get_mirror()) def cancel(self): self.app.prev_screen() def serialize(self): return self.model.get_mirror() def deserialize(self, data): super().deserialize(data) self.model.set_mirror(data) def done(self, mirror): log.debug("MirrorController.done next_screen mirror=%s", mirror) if mirror != self.model.get_mirror(): self.model.set_mirror(mirror) self.configured() self.app.next_screen()
class MirrorController(SubiquityController): endpoint = API.mirror autoinstall_key = "apt" autoinstall_schema = { # This is obviously incomplete. 'type': 'object', 'properties': { 'preserve_sources_list': {'type': 'boolean'}, 'primary': {'type': 'array'}, 'geoip': {'type': 'boolean'}, 'sources': {'type': 'object'}, }, } model_name = "mirror" signals = [ ('snapd-network-change', 'snapd_network_changed'), ] def __init__(self, app): super().__init__(app) self.geoip_enabled = True self.check_state = CheckState.NOT_STARTED self.lookup_task = SingleInstanceTask(self.lookup) def load_autoinstall_data(self, data): if data is None: return geoip = data.pop('geoip', True) merge_config(self.model.config, data) self.geoip_enabled = geoip and self.model.is_default() @with_context() async def apply_autoinstall_config(self, context): if not self.geoip_enabled: return if self.lookup_task.task is None: return try: with context.child('waiting'): await asyncio.wait_for(self.lookup_task.wait(), 10) except asyncio.TimeoutError: pass def snapd_network_changed(self): if not self.geoip_enabled: return if self.check_state != CheckState.DONE: self.check_state = CheckState.CHECKING self.lookup_task.start_sync() @with_context() async def lookup(self, context): try: response = await run_in_thread(requests.get, "https://geoip.ubuntu.com/lookup") response.raise_for_status() except requests.exceptions.RequestException: log.exception("geoip lookup failed") self.check_state = CheckState.FAILED return try: e = ElementTree.fromstring(response.text) except ElementTree.ParseError: log.exception("parsing %r failed", response.text) self.check_state = CheckState.FAILED return cc = e.find("CountryCode") if cc is None: log.debug("no CountryCode found in %r", response.text) self.check_state = CheckState.FAILED return cc = cc.text.lower() if len(cc) != 2: log.debug("bogus CountryCode found in %r", response.text) self.check_state = CheckState.FAILED return self.check_state = CheckState.DONE self.model.set_country(cc) def serialize(self): return self.model.get_mirror() def deserialize(self, data): self.model.set_mirror(data) def make_autoinstall(self): r = self.model.render()['apt'] r['geoip'] = self.geoip_enabled return r async def GET(self) -> str: return self.model.get_mirror() async def POST(self, data: str): self.model.set_mirror(data) self.configured()
class FilesystemController(SubiquityController, FilesystemManipulator): endpoint = API.storage autoinstall_key = "storage" autoinstall_schema = {'type': 'object'} # ... model_name = "filesystem" def __init__(self, app): self.ai_data = {} super().__init__(app) self.model.target = app.base_model.target if self.opts.dry_run and self.opts.bootloader: name = self.opts.bootloader.upper() self.model.bootloader = getattr(Bootloader, name) self._monitor = None self._errors = {} self._probe_once_task = SingleInstanceTask( self._probe_once, propagate_errors=False) self._probe_task = SingleInstanceTask( self._probe, propagate_errors=False) def load_autoinstall_data(self, data): log.debug("load_autoinstall_data %s", data) if data is None: if not self.interactive(): data = { 'layout': { 'name': 'lvm', }, } else: data = {} log.debug("self.ai_data = %s", data) self.ai_data = data def configured(self): super().configured() self.stop_listening_udev() @with_context() async def apply_autoinstall_config(self, context=None): await self._start_task await self._probe_task.wait() if False in self._errors: raise self._errors[False][0] if True in self._errors: raise self._errors[True][0] self.convert_autoinstall_config(context=context) if not self.model.is_root_mounted(): raise Exception("autoinstall config did not mount root") if self.model.needs_bootloader_partition(): raise Exception( "autoinstall config did not create needed bootloader " "partition") def guided_direct(self, disk): self.reformat(disk) result = { "size": disk.free_for_partitions, "fstype": "ext4", "mount": "/", } self.partition_disk_handler(disk, None, result) def guided_lvm(self, disk, lvm_options=None): self.reformat(disk) if DeviceAction.TOGGLE_BOOT in DeviceAction.supported(disk): self.add_boot_disk(disk) self.create_partition( device=disk, spec=dict( size=dehumanize_size('1G'), fstype="ext4", mount='/boot' )) part = self.create_partition( device=disk, spec=dict( size=disk.free_for_partitions, fstype=None, )) vg_name = 'ubuntu-vg' i = 0 while self.model._one(type='lvm_volgroup', name=vg_name) is not None: i += 1 vg_name = 'ubuntu-vg-{}'.format(i) spec = dict(name=vg_name, devices=set([part])) if lvm_options and lvm_options['encrypt']: spec['password'] = lvm_options['luks_options']['password'] vg = self.create_volgroup(spec) # There's no point using LVM and unconditionally filling the # VG with a single LV, but we should use more of a smaller # disk to avoid the user running into out of space errors # earlier than they probably expect to. if vg.size < 10 * (2 << 30): # Use all of a small (<10G) disk. lv_size = vg.size elif vg.size < 20 * (2 << 30): # Use 10G of a smallish (<20G) disk. lv_size = 10 * (2 << 30) elif vg.size < 200 * (2 << 30): # Use half of a larger (<200G) disk. lv_size = vg.size // 2 else: # Use at most 100G of a large disk. lv_size = 100 * (2 << 30) self.create_logical_volume( vg=vg, spec=dict( size=lv_size, name="ubuntu-lv", fstype="ext4", mount="/", )) async def _probe_response(self, wait, resp_cls): if self._probe_task.task is None or not self._probe_task.task.done(): if wait: await self._start_task await self._probe_task.wait() else: return resp_cls(status=ProbeStatus.PROBING) if True in self._errors: return resp_cls( status=ProbeStatus.FAILED, error_report=self._errors[True][1].ref()) return None def full_probe_error(self): if False in self._errors: return self._errors[False][1].ref() else: return None async def GET(self, wait: bool = False) -> StorageResponse: probe_resp = await self._probe_response(wait, StorageResponse) if probe_resp is not None: return probe_resp return StorageResponse( status=ProbeStatus.DONE, bootloader=self.model.bootloader, error_report=self.full_probe_error(), orig_config=self.model._orig_config, config=self.model._render_actions(include_all=True), blockdev=self.model._probe_data['blockdev'], dasd=self.model._probe_data.get('dasd', {})) async def POST(self, config: list): self.model._actions = self.model._actions_from_config( config, self.model._probe_data['blockdev'], is_probe_data=False) self.configured() async def guided_GET(self, min_size: int = None, wait: bool = False) \ -> GuidedStorageResponse: probe_resp = await self._probe_response(wait, GuidedStorageResponse) if probe_resp is not None: return probe_resp if not min_size: min_size = DEFAULT_MIN_SIZE_GUIDED return GuidedStorageResponse( status=ProbeStatus.DONE, error_report=self.full_probe_error(), disks=[ labels.for_client(device, min_size=min_size) for device in self.model._actions if boot.can_be_boot_device(device, with_reformatting=True) ]) async def guided_POST(self, choice: Optional[GuidedChoice]) \ -> StorageResponse: if choice is not None: disk = self.model._one(type='disk', id=choice.disk_id) if choice.use_lvm: lvm_options = None if choice.password is not None: lvm_options = { 'encrypt': True, 'luks_options': { 'password': choice.password, }, } self.guided_lvm(disk, lvm_options) else: self.guided_direct(disk) return await self.GET() async def reset_POST(self, context, request) -> StorageResponse: log.info("Resetting Filesystem model") self.model.reset() return await self.GET(context) async def has_rst_GET(self) -> bool: search = '/sys/module/ahci/drivers/pci:ahci/*/remapped_nvme' for remapped_nvme in glob.glob(search): with open(remapped_nvme, 'r') as f: if int(f.read()) > 0: return True return False @with_context(name='probe_once', description='restricted={restricted}') async def _probe_once(self, *, context, restricted): if restricted: probe_types = {'blockdev'} fname = 'probe-data-restricted.json' key = "ProbeDataRestricted" else: probe_types = None fname = 'probe-data.json' key = "ProbeData" storage = await run_in_thread( self.app.prober.get_storage, probe_types) fpath = os.path.join(self.app.block_log_dir, fname) with open(fpath, 'w') as fp: json.dump(storage, fp, indent=4) self.app.note_file_for_apport(key, fpath) self.model.load_probe_data(storage) @with_context() async def _probe(self, *, context=None): self._errors = {} for (restricted, kind) in [ (False, ErrorReportKind.BLOCK_PROBE_FAIL), (True, ErrorReportKind.DISK_PROBE_FAIL), ]: try: await self._probe_once_task.start( context=context, restricted=restricted) # We wait on the task directly here, not # self._probe_once_task.wait as if _probe_once_task # gets cancelled, we should be cancelled too. await asyncio.wait_for(self._probe_once_task.task, 15.0) except asyncio.CancelledError: # asyncio.CancelledError is a subclass of Exception in # Python 3.6 (sadface) raise except Exception as exc: block_discover_log.exception( "block probing failed restricted=%s", restricted) report = self.app.make_apport_report(kind, "block probing") if report is not None: self._errors[restricted] = (exc, report) continue break @with_context() def convert_autoinstall_config(self, context=None): log.debug("self.ai_data = %s", self.ai_data) if 'layout' in self.ai_data: layout = self.ai_data['layout'] meth = getattr(self, "guided_" + layout['name']) disk = self.model.disk_for_match( self.model.all_disks(), layout.get("match", {'size': 'largest'})) meth(disk) elif 'config' in self.ai_data: self.model.apply_autoinstall_config(self.ai_data['config']) self.model.grub = self.ai_data.get('grub') self.model.swap = self.ai_data.get('swap') def start(self): if self.model.bootloader == Bootloader.PREP: self.supports_resilient_boot = False else: release = lsb_release()['release'] self.supports_resilient_boot = release >= '20.04' self._start_task = schedule_task(self._start()) async def _start(self): context = pyudev.Context() self._monitor = pyudev.Monitor.from_netlink(context) self._monitor.filter_by(subsystem='block') self._monitor.enable_receiving() self.start_listening_udev() await self._probe_task.start() def start_listening_udev(self): loop = asyncio.get_event_loop() loop.add_reader(self._monitor.fileno(), self._udev_event) def stop_listening_udev(self): loop = asyncio.get_event_loop() loop.remove_reader(self._monitor.fileno()) def _udev_event(self): cp = run_command(['udevadm', 'settle', '-t', '0']) if cp.returncode != 0: log.debug("waiting 0.1 to let udev event queue settle") self.stop_listening_udev() loop = asyncio.get_event_loop() loop.call_later(0.1, self.start_listening_udev) return # Drain the udev events in the queue -- if we stopped listening to # allow udev to settle, it's good bet there is more than one event to # process and we don't want to kick off a full block probe for each # one. It's a touch unfortunate that pyudev doesn't have a # non-blocking read so we resort to select(). while select.select([self._monitor.fileno()], [], [], 0)[0]: action, dev = self._monitor.receive_device() log.debug("_udev_event %s %s", action, dev) self._probe_task.start_sync() def make_autoinstall(self): rendered = self.model.render() r = { 'config': rendered['storage']['config'] } if 'swap' in rendered: r['swap'] = rendered['swap'] return r
class GeoIP: def __init__(self, app): self.app = app self.element = None self.cc = None self.tz = None self.check_state = CheckState.NOT_STARTED self.on_countrycode = EventCallback() self.on_timezone = EventCallback() self.lookup_task = SingleInstanceTask(self.lookup) self.app.hub.subscribe('network-up', self.maybe_start_check) self.app.hub.subscribe('network-proxy-set', self.maybe_start_check) def maybe_start_check(self): if self.check_state != CheckState.DONE: self.check_state = CheckState.CHECKING self.lookup_task.start_sync() async def lookup(self): rv = await self._lookup() if rv: self.check_state = CheckState.DONE else: self.check_state = CheckState.FAILED return rv async def _lookup(self): try: response = await run_in_thread(requests.get, "https://geoip.ubuntu.com/lookup") response.raise_for_status() except requests.exceptions.RequestException: log.exception("geoip lookup failed") return False self.response_text = response.text try: self.element = ElementTree.fromstring(self.response_text) except ElementTree.ParseError: log.exception("parsing %r failed", self.response_text) return False cc = self.element.find("CountryCode") if cc is None or cc.text is None: log.debug("no CountryCode found in %r", self.response_text) return False cc = cc.text.lower() if len(cc) != 2: log.debug("bogus CountryCode found in %r", self.response_text) return False if cc != self.cc: self.on_countrycode.broadcast(cc) self.cc = cc tz = self.element.find("TimeZone") if tz is None or not tz.text: log.debug("no TimeZone found in %r", self.response_text) return False if tz != self.tz: self.on_timezone.broadcast(tz) self.tz = tz.text return True @property def countrycode(self): return self.cc @property def timezone(self): return self.tz
class FilesystemController(SubiquityController, FilesystemManipulator): endpoint = API.storage autoinstall_key = "storage" autoinstall_schema = {'type': 'object'} # ... model_name = "filesystem" def __init__(self, app): self.ai_data = {} super().__init__(app) self.model.target = app.base_model.target if self.opts.dry_run and self.opts.bootloader: name = self.opts.bootloader.upper() self.model.bootloader = getattr(Bootloader, name) self._monitor = None self._errors = {} self._probe_once_task = SingleInstanceTask(self._probe_once, propagate_errors=False) self._probe_task = SingleInstanceTask(self._probe, propagate_errors=False) def load_autoinstall_data(self, data): log.debug("load_autoinstall_data %s", data) if data is None: if not self.interactive(): data = { 'layout': { 'name': 'lvm', }, } else: data = {} log.debug("self.ai_data = %s", data) self.ai_data = data def configured(self): super().configured() self.stop_listening_udev() @with_context() async def apply_autoinstall_config(self, context=None): await self._start_task await self._probe_task.wait() if False in self._errors: raise self._errors[False][0] if True in self._errors: raise self._errors[True][0] self.convert_autoinstall_config(context=context) if not self.model.is_root_mounted(): raise Exception("autoinstall config did not mount root") if self.model.needs_bootloader_partition(): raise Exception( "autoinstall config did not create needed bootloader " "partition") async def GET(self, wait: bool = False) -> StorageResponse: if self._probe_task.task is None or not self._probe_task.task.done(): if wait: await self._start_task await self._probe_task.wait() else: return StorageResponse(status=ProbeStatus.PROBING) if True in self._errors: return StorageResponse(status=ProbeStatus.FAILED, error_report=self._errors[True][1].ref()) else: if False in self._errors: err_ref = self._errors[False][1].ref() else: err_ref = None return StorageResponse( status=ProbeStatus.DONE, bootloader=self.model.bootloader, error_report=err_ref, orig_config=self.model._orig_config, config=self.model._render_actions(include_all=True), blockdev=self.model._probe_data['blockdev'], dasd=self.model._probe_data.get('dasd', {})) async def POST(self, config: list): self.model._actions = self.model._actions_from_config( config, self.model._probe_data['blockdev'], is_probe_data=False) self.configured() async def reset_POST(self, context, request) -> StorageResponse: log.info("Resetting Filesystem model") self.model.reset() return await self.GET(context) @with_context(name='probe_once', description='restricted={restricted}') async def _probe_once(self, *, context, restricted): if restricted: probe_types = {'blockdev'} fname = 'probe-data-restricted.json' key = "ProbeDataRestricted" else: probe_types = None fname = 'probe-data.json' key = "ProbeData" storage = await run_in_thread(self.app.prober.get_storage, probe_types) fpath = os.path.join(self.app.block_log_dir, fname) with open(fpath, 'w') as fp: json.dump(storage, fp, indent=4) self.app.note_file_for_apport(key, fpath) self.model.load_probe_data(storage) @with_context() async def _probe(self, *, context=None): self._errors = {} for (restricted, kind) in [ (False, ErrorReportKind.BLOCK_PROBE_FAIL), (True, ErrorReportKind.DISK_PROBE_FAIL), ]: try: await self._probe_once_task.start(context=context, restricted=restricted) # We wait on the task directly here, not # self._probe_once_task.wait as if _probe_once_task # gets cancelled, we should be cancelled too. await asyncio.wait_for(self._probe_once_task.task, 15.0) except asyncio.CancelledError: # asyncio.CancelledError is a subclass of Exception in # Python 3.6 (sadface) raise except Exception as exc: block_discover_log.exception( "block probing failed restricted=%s", restricted) report = self.app.make_apport_report(kind, "block probing") if report is not None: self._errors[restricted] = (exc, report) continue break @with_context() def convert_autoinstall_config(self, context=None): log.debug("self.ai_data = %s", self.ai_data) if 'layout' in self.ai_data: layout = self.ai_data['layout'] meth = getattr(self, "guided_" + layout['name']) disk = self.model.disk_for_match( self.model.all_disks(), layout.get("match", {'size': 'largest'})) meth(disk) elif 'config' in self.ai_data: self.model.apply_autoinstall_config(self.ai_data['config']) self.model.grub = self.ai_data.get('grub', {}) self.model.swap = self.ai_data.get('swap') def start(self): if self.model.bootloader == Bootloader.PREP: self.supports_resilient_boot = False else: release = lsb_release()['release'] self.supports_resilient_boot = release >= '20.04' self._start_task = schedule_task(self._start()) async def _start(self): context = pyudev.Context() self._monitor = pyudev.Monitor.from_netlink(context) self._monitor.filter_by(subsystem='block') self._monitor.enable_receiving() self.start_listening_udev() await self._probe_task.start() def start_listening_udev(self): loop = asyncio.get_event_loop() loop.add_reader(self._monitor.fileno(), self._udev_event) def stop_listening_udev(self): loop = asyncio.get_event_loop() loop.remove_reader(self._monitor.fileno()) def _udev_event(self): cp = run_command(['udevadm', 'settle', '-t', '0']) if cp.returncode != 0: log.debug("waiting 0.1 to let udev event queue settle") self.stop_listening_udev() loop = asyncio.get_event_loop() loop.call_later(0.1, self.start_listening_udev) return # Drain the udev events in the queue -- if we stopped listening to # allow udev to settle, it's good bet there is more than one event to # process and we don't want to kick off a full block probe for each # one. It's a touch unfortunate that pyudev doesn't have a # non-blocking read so we resort to select(). while select.select([self._monitor.fileno()], [], [], 0)[0]: action, dev = self._monitor.receive_device() log.debug("_udev_event %s %s", action, dev) self._probe_task.start_sync() def make_autoinstall(self): rendered = self.model.render() r = {'config': rendered['storage']['config']} if 'swap' in rendered: r['swap'] = rendered['swap'] return r