def run(self, load=SNAPSHOT_BASE_NAME, guest_forwards=[], extra_args=[]): if load: self.vm.time_desync.report(self.vm.time_desync.LARGE) run_args = ['-loadvm', load] if load else [] self.monitor = Monitor(self.vm) run_args += ['-qmp', (f'tcp:127.0.0.1:{self.monitor.port},' 'server,nowait,nodelay')] # TODO: extract SSH into a separate plugin? self.vm.ssh = SSH(self.vm, key=path.fingertip('ssh_key', 'fingertip.paramiko')) self.vm.shared_directory = SharedDirectory(self.vm) self.vm.exec = self.vm.ssh.exec ssh_host_forward = f'hostfwd=tcp:127.0.0.1:{self.vm.ssh.port}-:22' cache_guest_forward = (CACHE_INTERNAL_IP, CACHE_INTERNAL_PORT, f'nc 127.0.0.1 {self.vm.http_cache.port}') guest_forwards = guest_forwards + [cache_guest_forward] run_args += ['-device', 'virtio-net,netdev=net0', '-netdev', ','.join(['user', 'id=net0', ssh_host_forward] + (['restrict=yes'] if self.vm.sealed else []) + [f'guestfwd=tcp:{ip}:{port}-cmd:{cmd}' for ip, port, cmd in guest_forwards])] image = os.path.join(self.vm.path, 'image.qcow2') if self._image_to_clone: required_space = os.path.getsize(self._image_to_clone) + 2**30 lock = fasteners.process_lock.InterProcessLock('/tmp/.fingertip') lock.acquire() if self.vm._transient and temp.has_space(required_space): image = temp.disappearing_file('/tmp', hint='fingertip-qemu') reflink.auto(self._image_to_clone, image) lock.release() else: lock.release() reflink.auto(self._image_to_clone, image) self._image_to_clone = None run_args += ['-drive', f'file={image},cache=unsafe,if=virtio,discard=unmap'] run_args += ['-m', self.ram_size] os.makedirs(path.SHARED, exist_ok=True) args = QEMU_COMMON_ARGS + self.custom_args + run_args + extra_args self.vm.log.debug(' '.join(args)) if self.vm._backend_mode == 'pexpect': pexp = self.vm.log.pseudofile_powered(pexpect.spawn, logfile=logging.INFO) self.vm.console = pexp(self._qemu, args, echo=False, timeout=None, encoding='utf-8', codec_errors='ignore') self.live = True elif self.vm._backend_mode == 'direct': subprocess.run([self._qemu, '-serial', 'mon:stdio'] + args, check=True) self.live = False self._go_down()
def hint(): if os.path.exists(to_file): fname = f'{name}-{datetime.datetime.utcnow().isoformat()}.txt' t = path.logs(fname, makedirs=True) reflink.auto(to_file, t) home = os.path.expanduser('~') t = t if not t.startswith(home) else t.replace(home, '~') m = (f'Check {t} for more details or set FINGERTIP_DEBUG=1' if not DEBUG else f'Logfile: {t}') sys.stderr.write(m + '\n')
def hint(self): if not os.path.exists(self.path): self.warning(f'{self.path} missing!') fname = f'{datetime.datetime.utcnow().isoformat()}.txt' t = path.logs(fname, makedirs=True) reflink.auto(self.path, t) home = os.path.expanduser('~') t = t if not t.startswith(home) else t.replace(home, '~') m = (f'For an intermediate log, check {t} or set FINGERTIP_DEBUG=1.' if not DEBUG else f'Logfile: {t}') sys.stderr.write(m + '\n')
def fetch(self, url, out_path): sources = saviour_sources() for i, (source, cache) in enumerate(sources): if is_fetcheable(source, url) or i == len(sources) - 1: if source == 'local': reflink.auto(path.saviour(url), out_path) return sess = self._get_requests_session(direct=not cache) if source == 'direct': surl = url else: surl = source + '/' + url surl = 'http://' + surl if '://' not in source else surl log.debug(f'fetching{"/caching" if cache else ""} ' f'{os.path.basename(url)} from {surl}') r = sess.get(surl) # not raw because that punctures cache with open(out_path, 'wb') as f: f.write(r.content) return
def __init__(self, backend_name, sealed=True, expire_in='7d'): self.hooks = hooks.HookManager() os.makedirs(path.MACHINES, exist_ok=True) self.path = temp.disappearing_dir(path.MACHINES) self._parent_path = path.MACHINES # States: loaded -> spun_up -> spun_down -> saved/dropped self._state = 'spun_down' self._transient = False self._up_counter = 0 self.sealed = sealed self.expiration = expiration.Expiration(expire_in) self.time_desync = time_desync.TimeDesync(self) self.backend = backend_name self.log = log.Sublogger(f'plugins.backend.{backend_name}', os.path.join(self.path, 'log.txt')) self.log.debug(f'created {backend_name}') self.hooks.clone.append(lambda to: reflink.auto( os.path.join(self.path, 'log.txt'), os.path.join(to, 'log.txt')))
def run(self, load=SNAPSHOT_BASE_NAME, guest_forwards=[], extra_args=[]): if load: self.vm.time_desync.report(self.vm.time_desync.LARGE) run_args = ['-loadvm', load] if load else [] self.monitor = Monitor(self.vm) run_args += [ '-qmp', (f'tcp:127.0.0.1:{self.monitor.port},' 'server,nowait,nodelay') ] self.vm.ssh.port = free_port.find() self.vm.shared_directory = SharedDirectory(self.vm) self.vm.exec = self.vm.ssh.exec host_forwards = [(self.vm.ssh.port, 22)] + self.vm._host_forwards host_forwards = [ f'hostfwd=tcp:127.0.0.1:{h}-:{g}' for h, g in host_forwards ] cache_guest_forward = (CACHE_INTERNAL_IP, CACHE_INTERNAL_PORT, f'nc 127.0.0.1 {self.vm.http_cache.port}') guest_forwards = guest_forwards + [cache_guest_forward] run_args += [ '-device', 'virtio-net,netdev=net0', '-netdev', ','.join(['user', 'id=net0'] + host_forwards + (['restrict=yes'] if self.vm.sealed else []) + [ f'guestfwd=tcp:{ip}:{port}-cmd:{cmd}' for ip, port, cmd in guest_forwards ]) ] self.image = os.path.join(self.vm.path, 'image.qcow2') if self._image_to_clone: # let's try to use /tmp (which is, hopefully, tmpfs) for transients # if it looks empty enough cloned_to_tmp = False required_space = os.path.getsize(self._image_to_clone) + 2 * 2**30 if self.vm._transient: # Would be ideal to have it global (and multiuser-ok) tmp_free_lock = path.cache('.tmp-free-space-check-lock') with fasteners.process_lock.InterProcessLock(tmp_free_lock): if temp.has_space(required_space, where='/tmp'): self.image = temp.disappearing_file( '/tmp', hint='fingertip-qemu') self.vm.log.info('preloading image to /tmp...') reflink.auto(self._image_to_clone, self.image) self.vm.log.info('preloading image to /tmp completed') cloned_to_tmp = True if not cloned_to_tmp: reflink.auto(self._image_to_clone, self.image) self._image_to_clone = None if self.virtio_scsi: run_args += [ '-device', 'virtio-scsi-pci', '-device', 'scsi-hd,drive=hd', '-drive', f'file={self.image},cache=unsafe,' 'if=none,id=hd,discard=unmap' ] else: run_args += [ '-drive', f'file={self.image},cache=unsafe,' 'if=virtio,discard=unmap' ] run_args += ['-m', str(self.vm.ram.max // 2**20)] os.makedirs(path.SHARED, exist_ok=True) args = QEMU_COMMON_ARGS + self.custom_args + run_args + extra_args self.vm.log.debug(' '.join(args)) if self.vm._backend_mode == 'pexpect': # start connecting/negotiating QMP, later starts auto-ballooning threading.Thread(target=self.monitor.connect, daemon=True).start() pexp = self.vm.log.pseudofile_powered(pexpect.spawn, logfile=logging.INFO) self.vm.console = pexp(self._qemu, args, echo=False, timeout=None, encoding='utf-8', codec_errors='ignore') self.live = True elif self.vm._backend_mode == 'direct': subprocess.run([self._qemu, '-serial', 'mon:stdio'] + args, check=True) # FIXME: autoballooning won't start w/o the monitor connection! self.live = False self._go_down()
def main(m=None, url=None): m = m or fingertip.build('backend.qemu') assert url assert hasattr(m, 'qemu') # because we have no idea how to unseal it later m.sealed = False m.expiration.cap('4h') image_file = os.path.join(m.path, os.path.basename(url)) if '://' in url: m.log.info(f'fetching {url}...') m.http_cache.fetch(url, image_file) else: m.log.info(f'copying {url}...') reflink.auto(url, image_file) m.expiration.depend_on_a_file(url) m.log.info('resizing image...') run = m.log.pipe_powered(subprocess.run, stdout=logging.INFO, stderr=logging.ERROR) run(['qemu-img', 'resize', image_file, m.qemu.disk_size], check=True) m.qemu._image_to_clone = image_file m.qemu.virtio_scsi = True # in case it's Linux <5 with m: hostname = url.rsplit('/', 1)[-1].rsplit('.', 1)[0].replace('.', '_') hostname = hostname.replace('x86_64', '') fqdn = hostname + '.fingertip.local' meta_data = META_TEMPLATE.format(FQDN=fqdn, HOSTNAME=hostname, SSH_PUBKEY=m.ssh.pubkey) meta_file = os.path.join(m.path, 'meta-data') with open(meta_file, 'w') as f: f.write(meta_data) m.http_cache.serve_local_file('/cloud-init/meta-data', meta_file) user_data = USER_TEMPLATE.format(FQDN=fqdn, HOSTNAME=hostname, SSH_PUBKEY=m.ssh.pubkey) user_file = os.path.join(m.path, 'user-data') with open(user_file, 'w') as f: f.write(user_data) m.http_cache.serve_local_file('/cloud-init/user-data', user_file) init_url = m.http_cache.internal_url + '/cloud-init/' seed = ['-smbios', f'type=1,serial=ds=nocloud-net;s={init_url}'] m.qemu.run(load=None, extra_args=seed) m.console.expect_exact('cloud-config final message') m.console.sendline('') m.console.sendline('') m.console.expect(f'login:'******'root') m.console.expect('Password: '******'fingertip') m.console.sendline(' echo prompt" "detection\n') m.console.expect_exact('prompt detection') m.prompt = re.search(r'\n(.+?) echo prompt', m.console.before).group(1) m.log.debug(f'm.prompt = {repr(m.prompt)}') m.console.sendline('') m.console.expect_exact(m.prompt) m.ram.safeguard = '512M' # sane for 2020, and it's overrideable anyway def login(username='******', password='******'): m.console.expect(f'login: '******'Password: '******'hwclock -s')) m.log.info('cloud-init finished') return m
def __init__(self, url, *path_components, enough_to_have=None): if not path_components: path_components = [url.replace('/', '::')] self.url = url cache_path = path.downloads('git', *path_components, makedirs=True) self.path = temp.disappearing_dir(os.path.dirname(cache_path), path_components[-1]) lock_working_copy_path = self.path + '-lock' lock_cache_path = cache_path + '-lock' lock.Lock.__init__(self, lock_working_copy_path) sources = saviour_sources() self.self_destruct = False with lock.Lock(lock_cache_path), lock.Lock(lock_working_copy_path): cache_is_enough = False if os.path.exists(cache_path): try: cr = git.Repo(cache_path) cache_is_enough = (enough_to_have and _has_rev(cr, enough_to_have)) except git.GitError as e: log.error(f'something wrong with git cache {cache_path}') log.error(str(e)) _remove(self.path) for i, (source, cache) in enumerate(sources): last_source = i == len(sources) - 1 if cache and cache_is_enough: log.info(f'not re-fetching {url} from {source} ' f'because {enough_to_have} ' 'is already present in cache') git.Repo.clone_from(cache_path, self.path, mirror=True) break if source == 'local': surl = path.saviour(url).replace('//', '/') # workaround if not os.path.exists(surl) and not last_source: continue log.info(f'cloning {url} from local saviour mirror') git.Repo.clone_from(surl, self.path, mirror=True) break elif source == 'direct': surl = url else: surl = source + '/' + url surl = 'http://' + surl if '://' not in source else surl log.info(f'cloning {url} from {source} ' f'cache_exists={os.path.exists(cache_path)}...') try: # TODO: bare clone # no harm in referencing cache, even w/o cached+ git.Repo.clone_from(surl, self.path, mirror=True, dissociate=True, reference_if_able=cache_path) except git.GitError: log.warning(f'could not clone {url} from {source}') if last_source: raise continue break _remove(cache_path) reflink.auto(self.path, cache_path) git.Repo.__init__(self, self.path) self.remotes[0].set_url(url) self.self_destruct = True
def clone(to): if not hasattr(m.container, 'starting_image'): # no starting image yet return m.log.debug(f'{m} {to}') reflink.auto(os.path.join(m.path, 'snapshot.tar'), os.path.join(to, 'snapshot.tar'))
def __init__(self, url, *path_components, enough_to_have=None): assert path_components self.url = url cache_path = path.downloads('git', *path_components, makedirs=True) cache_exists = os.path.exists(cache_path) self.path = temp.disappearing_dir(os.path.dirname(cache_path), path_components[-1]) lock_working_copy_path = self.path + '-lock' lock_cache_path = cache_path + '-lock' lock.Lock.__init__(self, lock_working_copy_path) update_not_needed = None sources = saviour_sources() self.self_destruct = False with lock.Lock(lock_cache_path), lock.Lock(lock_working_copy_path): _remove(self.path) for i, (source, cache) in enumerate(sources): last_source = i == len(sources) - 1 if cache and cache_exists and update_not_needed is None: cr = git.Repo(cache_path) update_not_needed = enough_to_have and ( enough_to_have in (t.name for t in cr.tags) or enough_to_have in (h.name for h in cr.heads) or enough_to_have in (c.hexsha for c in cr.iter_commits()) # that's not all revspecs, but best-effort is fine ) if update_not_needed: log.info(f'not re-fetching {url} from {source} ' f'because {enough_to_have} ' 'is already present in cache') git.Repo.clone_from(cache_path, self.path, mirror=True) break if source == 'local': surl = path.saviour(url).replace('//', '/') # workaround if not os.path.exists(surl) and not last_source: continue log.info(f'cloning {url} from local saviour mirror') git.Repo.clone_from(surl, self.path, mirror=True) break elif source == 'direct': surl = url else: surl = source + '/' + url surl = 'http://' + surl if '://' not in source else surl log.info(f'cloning {url} from {source} ' f'cache_exists={cache_exists}...') try: # TODO: bare clone # no harm in referencing cache, even w/o cached+ git.Repo.clone_from(surl, self.path, mirror=True, dissociate=True, reference_if_able=cache_path) except git.GitError: log.warning(f'could not clone {url} from {source}') if last_source: raise continue break _remove(cache_path) reflink.auto(self.path, cache_path) git.Repo.__init__(self, self.path) self.remotes[0].set_url(url) self.self_destruct = True