def test_gen_obj_reg(self): path = os.path.join(self.dir, "reg_obj") open(path, "w").close() o = livefs.gen_obj(path) self.assertTrue(fs.isreg(o)) self.check_attrs(o, path) o2 = livefs.gen_obj(path, inode=None) self.check_attrs(o, path) self.assertNotEqual(o.inode, o2.inode)
def test_identification(self): o = [gen_obj(self.dir)] t = self.kls() t.set_state([self.dir]) self.assertEqual(list(t.saved_mtimes), o) open(pjoin(self.dir, 'file'), 'w') t.set_state([self.dir, pjoin(self.dir, 'file')]) self.assertEqual(list(t.saved_mtimes), o) loc = pjoin(self.dir, 'dir') os.mkdir(loc) o.append(gen_obj(pjoin(self.dir, 'dir'))) o.sort() t.set_state([x.location for x in o]) self.assertEqual(sorted(t.saved_mtimes), o) # test syms. src = pjoin(self.dir, 'dir2') os.mkdir(src) loc = pjoin(self.dir, 'foo') os.symlink(src, loc) locs = [x.location for x in o] # insert a crap location to ensure it handles it. locs.append(pjoin(self.dir, "asdfasdfasdfasfdasdfasdfasdfasdf")) locs.append(src) i = gen_obj(src, stat=os.stat(src)) o.append(i) o.sort() t.set_state(locs) self.assertEqual(sorted(t.saved_mtimes), o) locs[-1] = loc o.remove(i) i = i.change_attributes(location=loc) o.append(i) o.sort() t.set_state(locs) self.assertEqual(sorted(t.saved_mtimes), o) o.remove(i) os.rmdir(src) # check stat_func usage; if lstat, the sym won't be derefed, # thus ignored. t.set_state(locs, stat_func=os.lstat) self.assertEqual(sorted(t.saved_mtimes), o) open(pjoin(self.dir, 'bar'), 'w') self.assertTrue(t.check_state()) # test dead sym filtering for stat. t.set_state(locs) self.assertEqual(sorted(t.saved_mtimes), o) self.assertFalse(t.check_state())
def trigger(self, engine, existing_cset, install_cset): # hackish, but it works. protected_filter = gen_config_protect_filter( engine.offset, self.extra_protects, self.extra_disables).match ignore_filter = gen_collision_ignore_filter(engine.offset).match protected = {} for x in existing_cset.iterfiles(): if not ignore_filter(x.location) and protected_filter(x.location): replacement = install_cset[x] if not simple_chksum_compare(replacement, x): protected.setdefault( pjoin(engine.offset, os.path.dirname(x.location).lstrip(os.path.sep)), []).append((os.path.basename(replacement.location), replacement)) for dir_loc, entries in protected.iteritems(): updates = {x[0]: [] for x in entries} try: existing = sorted(x for x in listdir_files(dir_loc) if x.startswith("._cfg")) except OSError as oe: if oe.errno != errno.ENOENT: raise # this shouldn't occur. continue for x in existing: try: # ._cfg0000_filename count = int(x[5:9]) if x[9] != "_": raise ValueError fn = x[10:] except (ValueError, IndexError): continue if fn in updates: updates[fn].append((count, fn)) # now we rename. for fname, entry in entries: # check for any updates with the same chksums. count = 0 for cfg_count, cfg_fname in updates[fname]: if simple_chksum_compare(livefs.gen_obj( pjoin(dir_loc, cfg_fname)), entry): count = cfg_count break count = max(count, cfg_count + 1) try: install_cset.remove(entry) except KeyError: # this shouldn't occur... continue new_fn = pjoin(dir_loc, "._cfg%04i_%s" % (count, fname)) new_entry = entry.change_attributes(location=new_fn) install_cset.add(new_entry) self.renames[new_entry] = entry del updates
def merge_contents(cset, offset=None, callback=None): """ merge a :class:`pkgcore.fs.contents.contentsSet` instance to the livefs :param cset: :class:`pkgcore.fs.contents.contentsSet` instance :param offset: if not None, offset to prefix all locations with. Think of it as target dir. :param callback: callable to report each entry being merged; given a single arg, the fs object being merged. :raise EnvironmentError: Thrown for permission failures. """ if callback is None: callback = lambda obj:None ensure_perms = get_plugin("fs_ops.ensure_perms") copyfile = get_plugin("fs_ops.copyfile") mkdir = get_plugin("fs_ops.mkdir") if not isinstance(cset, contents.contentsSet): raise TypeError("cset must be a contentsSet, got %r" % (cset,)) if offset is not None: if os.path.exists(offset): if not os.path.isdir(offset): raise TypeError("offset must be a dir, or not exist: %s" % offset) else: mkdir(fs.fsDir(offset, strict=False)) iterate = partial(contents.offset_rewriter, offset.rstrip(os.path.sep)) else: iterate = iter d = list(iterate(cset.iterdirs())) d.sort() for x in d: callback(x) try: # we pass in the stat ourselves, using stat instead of # lstat gen_obj uses internally; this is the equivalent of # "deference that link" obj = gen_obj(x.location, stat=os.stat(x.location)) if not fs.isdir(obj): raise Exception( "%s exists and needs to be a dir, but is a %s" % (x.location, obj)) ensure_perms(x, obj) except OSError, oe: if oe.errno != errno.ENOENT: raise try: # we do this form to catch dangling symlinks mkdir(x) except OSError, oe: if oe.errno != errno.EEXIST: raise os.unlink(x.location) mkdir(x) ensure_perms(x)
def default_copyfile(obj, mkdirs=False): """ copy a :class:`pkgcore.fs.fs.fsBase` to its stated location. :param obj: :class:`pkgcore.fs.fs.fsBase` instance, exempting :class:`fsDir` :return: true if success, else an exception is thrown :raise EnvironmentError: permission errors """ existent = False ensure_perms = get_plugin("fs_ops.ensure_perms") if not fs.isfs_obj(obj): raise TypeError("obj must be fsBase derivative: %r" % obj) elif fs.isdir(obj): raise TypeError("obj must not be a fsDir instance: %r" % obj) try: existing = gen_obj(obj.location) if fs.isdir(existing): raise CannotOverwrite(obj, existing) existent = True except OSError as oe: # verify the parent dir is there at least basefp = os.path.dirname(obj.location) if basefp.strip(os.path.sep) and not os.path.exists(basefp): if mkdirs: if not ensure_dirs(basefp, mode=0750, minimal=True): raise FailedCopy(obj, str(oe))
def default_copyfile(obj, mkdirs=False): """ copy a :class:`pkgcore.fs.fs.fsBase` to its stated location. :param obj: :class:`pkgcore.fs.fs.fsBase` instance, exempting :class:`fsDir` :return: true if success, else an exception is thrown :raise EnvironmentError: permission errors """ existent = False ensure_perms = get_plugin("fs_ops.ensure_perms") if not fs.isfs_obj(obj): raise TypeError(f'obj must be fsBase derivative: {obj!r}') elif fs.isdir(obj): raise TypeError(f'obj must not be a fsDir instance: {obj!r}') try: existing = gen_obj(obj.location) if fs.isdir(existing): raise CannotOverwrite(obj, existing) existent = True except OSError as oe: # verify the parent dir is there at least basefp = os.path.dirname(obj.location) if basefp.strip(os.path.sep) and not os.path.exists(basefp): if mkdirs: if not ensure_dirs(basefp, mode=0o750, minimal=True): raise FailedCopy(obj, str(oe)) else: raise existent = False if not existent: fp = obj.location else: fp = existent_fp = obj.location + "#new" if fs.isreg(obj): obj.data.transfer_to_path(fp) elif fs.issym(obj): os.symlink(obj.target, fp) elif fs.isfifo(obj): os.mkfifo(fp) elif fs.isdev(obj): dev = os.makedev(obj.major, obj.minor) os.mknod(fp, obj.mode, dev) else: ret = spawn([CP_BINARY, "-Rp", obj.location, fp]) if ret != 0: raise FailedCopy(obj, f'got {ret} from {CP_BINARY} -Rp') ensure_perms(obj.change_attributes(location=fp)) if existent: os.rename(existent_fp, obj.location) return True
def test_puke_on_dirs(self): path = pjoin(self.dir, "puke_dir") self.assertRaises(TypeError, ops.default_copyfile, fs.fsDir(path, strict=False)) os.mkdir(path) fp = pjoin(self.dir, "foon") open(fp, "w").close() f = livefs.gen_obj(fp) self.assertRaises(TypeError, livefs.gen_obj(fp).change_attributes(location=path)) # test sym over a directory. f = fs.fsSymlink(path, fp, mode=0644, mtime=0, uid=os.getuid(), gid=os.getgid()) self.assertRaises(TypeError, ops.default_copyfile, f) os.unlink(fp) os.mkdir(fp) self.assertRaises(ops.CannotOverwrite, ops.default_copyfile, f)
def test_gen_obj_sym(self): path = os.path.join(self.dir, "test_sym") os.mkdir(path) src = os.path.join(path, "s") link = os.path.join(path, "t") open(src, "w").close() os.symlink(src, link) obj = livefs.gen_obj(link) self.assertInstance(obj, fs.fsSymlink) self.check_attrs(obj, link) self.assertEqual(os.readlink(link), obj.target)
def test_sym_over_dir(self): path = pjoin(self.dir, "sym") fp = pjoin(self.dir, "trg") os.mkdir(path) # test sym over a directory. f = fs.fsSymlink(path, fp, mode=0644, mtime=0, uid=os.getuid(), gid=os.getgid()) cset = contents.contentsSet([f]) self.assertRaises(ops.FailedCopy, ops.merge_contents, cset) self.assertTrue(fs.isdir(livefs.gen_obj(path))) os.mkdir(fp) ops.merge_contents(cset)
def test_read_ld_so_conf(self): # test the defaults first. should create etc and the file. self.assertPaths(self.trigger.read_ld_so_conf(self.dir), [pjoin(self.dir, x) for x in self.trigger.default_ld_path]) o = gen_obj(pjoin(self.dir, 'etc')) self.assertEqual(o.mode, 0755) self.assertTrue(fs.isdir(o)) self.assertTrue(os.path.exists(pjoin(self.dir, 'etc/ld.so.conf'))) # test normal functioning. with open(pjoin(self.dir, 'etc/ld.so.conf'), 'w') as f: f.write("\n".join(["/foon", "dar", "blarnsball", "#comment"])) self.assertPaths(self.trigger.read_ld_so_conf(self.dir), [pjoin(self.dir, x) for x in ["foon", "dar", "blarnsball"]])
def test_read_ld_so_conf(self): # test the defaults first. should create etc and the file. self.assertPaths(self.trigger.read_ld_so_conf(self.dir), [pjoin(self.dir, x) for x in self.trigger.default_ld_path]) o = gen_obj(pjoin(self.dir, 'etc')) self.assertEqual(o.mode, 0755) self.assertTrue(fs.isdir(o)) self.assertTrue(os.path.exists(pjoin(self.dir, 'etc/ld.so.conf'))) # test normal functioning. open(pjoin(self.dir, 'etc/ld.so.conf'), 'w').write("\n".join( ["/foon", "dar", "blarnsball", "#comment"])) self.assertPaths(self.trigger.read_ld_so_conf(self.dir), [pjoin(self.dir, x) for x in ["foon", "dar", "blarnsball"]])
def copy_main(options, out, err): """Copy pkgs between repos.""" source_repo = options.source_repo if source_repo is None: source_repo = options.domain.all_source_repos target_repo = options.target_repo failures = False for pkg in source_repo.itermatch(options.query): if options.ignore_existing and pkg.versioned_atom in target_repo: out.write(f"skipping existing pkg: {pkg.cpvstr}") continue # TODO: remove this once we limit src repos to non-virtual (pkg.provided) repos if not getattr(pkg, 'package_is_real', True): out.write(f"skipping virtual pkg: {pkg.cpvstr}") continue out.write(f"copying {pkg}... ") if getattr(getattr(pkg, 'repo', None), 'livefs', False): out.write("forcing regen of contents due to src being livefs..") new_contents = contents.contentsSet(mutable=True) for fsobj in pkg.contents: try: new_contents.add(livefs.gen_obj(fsobj.location)) except FileNotFoundError: err.write( f"warning: dropping fs obj {fsobj!r} since it doesn't exist") except OSError as oe: err.write( f"failed accessing fs obj {fsobj!r}; {oe}\n" "aborting this copy") failures = True new_contents = None break if new_contents is None: continue pkg = mutated.MutatedPkg(pkg, {'contents': new_contents}) target_repo.operations.install_or_replace(pkg).finish() out.write("completed\n") if failures: return 1 return 0
def merge_contents(cset, offset=None, callback=None): """ merge a :class:`pkgcore.fs.contents.contentsSet` instance to the livefs :param cset: :class:`pkgcore.fs.contents.contentsSet` instance :param offset: if not None, offset to prefix all locations with. Think of it as target dir. :param callback: callable to report each entry being merged; given a single arg, the fs object being merged. :raise EnvironmentError: Thrown for permission failures. """ if callback is None: callback = lambda obj:None ensure_perms = get_plugin("fs_ops.ensure_perms") copyfile = get_plugin("fs_ops.copyfile") mkdir = get_plugin("fs_ops.mkdir") if not isinstance(cset, contents.contentsSet): raise TypeError(f'cset must be a contentsSet, got {cset!r}') if offset is not None: if os.path.exists(offset): if not os.path.isdir(offset): raise TypeError(f'offset must be a dir, or not exist: {offset}') else: mkdir(fs.fsDir(offset, strict=False)) iterate = partial(contents.offset_rewriter, offset.rstrip(os.path.sep)) else: iterate = iter d = list(iterate(cset.iterdirs())) d.sort() for x in d: callback(x) try: # we pass in the stat ourselves, using stat instead of # lstat gen_obj uses internally; this is the equivalent of # "deference that link" obj = gen_obj(x.location, stat=os.stat(x.location)) if not fs.isdir(obj): # according to the spec, dirs can't be merged over files # that aren't dirs or symlinks to dirs raise CannotOverwrite(x.location, obj) ensure_perms(x, obj) except FileNotFoundError: try: # we do this form to catch dangling symlinks mkdir(x) except FileExistsError: os.unlink(x.location) mkdir(x) ensure_perms(x) del d # might look odd, but what this does is minimize the try/except cost # to one time, assuming everything behaves, rather then per item. i = iterate(cset.iterdirs(invert=True)) merged_inodes = {} while True: try: for x in i: callback(x) if x.is_reg: key = (x.dev, x.inode) # This logic could be made smarter- instead of # blindly trying candidates, we could inspect the st_dev # of the final location. This however can be broken by # overlayfs's potentially. Brute force is in use either # way. candidates = merged_inodes.setdefault(key, []) if any(target._can_be_hardlinked(x) and do_link(target, x) for target in candidates): continue candidates.append(x) copyfile(x, mkdirs=True) break except CannotOverwrite as cf: if not fs.issym(x): raise # by this time, all directories should've been merged. # thus we can check the target try: if not fs.isdir(gen_obj(pjoin(x.location, x.target))): raise except OSError: raise cf return True
def merge_contents(cset, offset=None, callback=None): """ merge a :class:`pkgcore.fs.contents.contentsSet` instance to the livefs :param cset: :class:`pkgcore.fs.contents.contentsSet` instance :param offset: if not None, offset to prefix all locations with. Think of it as target dir. :param callback: callable to report each entry being merged; given a single arg, the fs object being merged. :raise EnvironmentError: Thrown for permission failures. """ if callback is None: callback = lambda obj: None ensure_perms = get_plugin("fs_ops.ensure_perms") copyfile = get_plugin("fs_ops.copyfile") mkdir = get_plugin("fs_ops.mkdir") if not isinstance(cset, contents.contentsSet): raise TypeError("cset must be a contentsSet, got %r" % (cset, )) if offset is not None: if os.path.exists(offset): if not os.path.isdir(offset): raise TypeError("offset must be a dir, or not exist: %s" % offset) else: mkdir(fs.fsDir(offset, strict=False)) iterate = partial(contents.offset_rewriter, offset.rstrip(os.path.sep)) else: iterate = iter d = list(iterate(cset.iterdirs())) d.sort() for x in d: callback(x) try: # we pass in the stat ourselves, using stat instead of # lstat gen_obj uses internally; this is the equivalent of # "deference that link" obj = gen_obj(x.location, stat=os.stat(x.location)) if not fs.isdir(obj): raise Exception( "%s exists and needs to be a dir, but is a %s" % (x.location, obj)) ensure_perms(x, obj) except OSError as oe: if oe.errno != errno.ENOENT: raise try: # we do this form to catch dangling symlinks mkdir(x) except OSError as oe: if oe.errno != errno.EEXIST: raise os.unlink(x.location) mkdir(x) ensure_perms(x) del d # might look odd, but what this does is minimize the try/except cost # to one time, assuming everything behaves, rather then per item. i = iterate(cset.iterdirs(invert=True)) merged_inodes = {} while True: try: for x in i: callback(x) if x.is_reg: key = (x.dev, x.inode) link_target = merged_inodes.get(key) if link_target is not None and \ link_target._can_be_hardlinked(x): if do_link(link_target, x): continue # TODO: should notify that hardlinking failed. merged_inodes.setdefault(key, x) copyfile(x, mkdirs=True) break except CannotOverwrite as cf: if not fs.issym(x): raise # by this time, all directories should've been merged. # thus we can check the target try: if not fs.isdir(gen_obj(pjoin(x.location, x.target))): raise except OSError: raise cf return True
def trigger(self, engine, existing_cset, install_cset): # hackish, but it works. protected_filter = gen_config_protect_filter(engine.offset, self.extra_protects, self.extra_disables).match ignore_filter = gen_collision_ignore_filter(engine.offset).match protected = {} for x in existing_cset.iterfiles(): if not ignore_filter(x.location) and protected_filter(x.location): replacement = install_cset[x] if not simple_chksum_compare(replacement, x): protected.setdefault( pjoin(engine.offset, os.path.dirname(x.location).lstrip(os.path.sep)), []).append((os.path.basename(replacement.location), replacement)) for dir_loc, entries in protected.iteritems(): updates = {x[0]: [] for x in entries} try: existing = sorted(x for x in listdir_files(dir_loc) if x.startswith("._cfg")) except OSError as oe: if oe.errno != errno.ENOENT: raise # this shouldn't occur. continue for x in existing: try: # ._cfg0000_filename count = int(x[5:9]) if x[9] != "_": raise ValueError fn = x[10:] except (ValueError, IndexError): continue if fn in updates: updates[fn].append((count, fn)) # now we rename. for fname, entry in entries: # check for any updates with the same chksums. count = 0 for cfg_count, cfg_fname in updates[fname]: if simple_chksum_compare( livefs.gen_obj(pjoin(dir_loc, cfg_fname)), entry): count = cfg_count break count = max(count, cfg_count + 1) try: install_cset.remove(entry) except KeyError: # this shouldn't occur... continue new_fn = pjoin(dir_loc, "._cfg%04i_%s" % (count, fname)) new_entry = entry.change_attributes(location=new_fn) install_cset.add(new_entry) self.renames[new_entry] = entry del updates
def test_relative_sym(self): f = os.path.join(self.dir, "relative-symlink-test") os.symlink("../sym1/blah", f) o = livefs.gen_obj(f) self.assertTrue(o.target == "../sym1/blah")
def merge_contents(cset, offset=None, callback=None): """ merge a :class:`pkgcore.fs.contents.contentsSet` instance to the livefs :param cset: :class:`pkgcore.fs.contents.contentsSet` instance :param offset: if not None, offset to prefix all locations with. Think of it as target dir. :param callback: callable to report each entry being merged; given a single arg, the fs object being merged. :raise EnvironmentError: Thrown for permission failures. """ if callback is None: callback = lambda obj:None ensure_perms = get_plugin("fs_ops.ensure_perms") copyfile = get_plugin("fs_ops.copyfile") mkdir = get_plugin("fs_ops.mkdir") if not isinstance(cset, contents.contentsSet): raise TypeError("cset must be a contentsSet, got %r" % (cset,)) if offset is not None: if os.path.exists(offset): if not os.path.isdir(offset): raise TypeError("offset must be a dir, or not exist: %s" % offset) else: mkdir(fs.fsDir(offset, strict=False)) iterate = partial(contents.offset_rewriter, offset.rstrip(os.path.sep)) else: iterate = iter d = list(iterate(cset.iterdirs())) d.sort() for x in d: callback(x) try: # we pass in the stat ourselves, using stat instead of # lstat gen_obj uses internally; this is the equivalent of # "deference that link" obj = gen_obj(x.location, stat=os.stat(x.location)) if not fs.isdir(obj): raise Exception( "%s exists and needs to be a dir, but is a %s" % (x.location, obj)) ensure_perms(x, obj) except OSError as oe: if oe.errno != errno.ENOENT: raise try: # we do this form to catch dangling symlinks mkdir(x) except OSError as oe: if oe.errno != errno.EEXIST: raise os.unlink(x.location) mkdir(x) ensure_perms(x) del d # might look odd, but what this does is minimize the try/except cost # to one time, assuming everything behaves, rather then per item. i = iterate(cset.iterdirs(invert=True)) merged_inodes = {} while True: try: for x in i: callback(x) if x.is_reg: key = (x.dev, x.inode) link_target = merged_inodes.get(key) if link_target is not None and \ link_target._can_be_hardlinked(x): if do_link(link_target, x): continue # TODO: should notify that hardlinking failed. merged_inodes.setdefault(key, x) copyfile(x, mkdirs=True) break except CannotOverwrite as cf: if not fs.issym(x): raise # by this time, all directories should've been merged. # thus we can check the target try: if not fs.isdir(gen_obj(pjoin(x.location, x.target))): raise except OSError: raise cf return True
link_target._can_be_hardlinked(x): if do_link(link_target, x): continue # TODO: should notify that hardlinking failed. merged_inodes.setdefault(key, x) copyfile(x, mkdirs=True) break except CannotOverwrite, cf: if not fs.issym(x): raise # by this time, all directories should've been merged. # thus we can check the target try: if not fs.isdir(gen_obj(pjoin(x.location, x.target))): raise except OSError: raise cf return True def unmerge_contents(cset, offset=None, callback=None): """ unmerge a :obj:`pkgcore.fs.contents.contentsSet` instance to the livefs :param cset: :obj:`pkgcore.fs.contents.contentsSet` instance :param offset: if not None, offset to prefix all locations with. Think of it as target dir. :param callback: callable to report each entry being unmerged
def test_data_source(self): o = livefs.gen_obj("/tmp/etc/passwd", real_location="/etc/passwd") self.assertTrue(o.location, "/tmp/etc/passwd") self.assertTrue(o.data.path, "/etc/passwd") with open("/etc/passwd", "rb") as f: self.assertTrue(o.data.bytes_fileobj().read(), f.read())
def test_gen_obj_dir(self): o = livefs.gen_obj(self.dir) self.assertTrue(fs.isdir(o)) self.check_attrs(o, self.dir)
def test_gen_obj_fifo(self): path = os.path.join(self.dir, "fifo") os.mkfifo(path) o = livefs.gen_obj(path) self.check_attrs(o, path)
def _split(self, iterable, observer, engine, cset): debug_store = pjoin(engine.offset, self._debug_storage.lstrip('/')) objcopy_args = [self.objcopy_binary, '--only-keep-debug'] if self._compress: objcopy_args.append('--compress-debug-sections') for fs_objs, ftype in iterable: if 'ar archive' in ftype: continue if 'relocatable' in ftype: if not any(x.basename.endswith(".ko") for x in fs_objs): continue fs_obj = fs_objs[0] debug_loc = pjoin(debug_store, fs_obj.location.lstrip('/') + ".debug") if debug_loc in cset: continue fpath = fs_obj.data.path debug_ondisk = pjoin(os.path.dirname(fpath), os.path.basename(fpath) + ".debug") # note that we tell the UI the final pathway- not the intermediate one. observer.info(f"splitdebug'ing {fs_obj.location} into {debug_loc}") ret = spawn.spawn(objcopy_args + [fpath, debug_ondisk]) if ret != 0: observer.warn( f"splitdebug'ing {fs_obj.location} failed w/ exitcode {ret}" ) continue # note that the given pathway to the debug file /must/ be relative to ${D}; # it must exist at the time of invocation. ret = spawn.spawn([ self.objcopy_binary, '--add-gnu-debuglink', debug_ondisk, fpath ]) if ret != 0: observer.warn( f"splitdebug created debug file {debug_ondisk!r}, but " f"failed adding links to {fpath!r} ({ret!r})") observer.debug("failed splitdebug command was %r", (self.objcopy_binary, '--add-gnu-debuglink', debug_ondisk, fpath)) continue debug_obj = gen_obj(debug_loc, real_location=debug_ondisk, uid=os_data.root_uid, gid=os_data.root_gid) stripped_fsobj = self._strip_fsobj(fs_obj, ftype, observer, quiet=True) self._modified.add(stripped_fsobj) self._modified.add(debug_obj) for fs_obj in fs_objs[1:]: debug_loc = pjoin(debug_store, fs_obj.location.lstrip('/') + ".debug") linked_debug_obj = debug_obj.change_attributes( location=debug_loc) observer.info( f"splitdebug hardlinking {debug_obj.location} to {debug_loc}" ) self._modified.add(linked_debug_obj) self._modified.add( stripped_fsobj.change_attributes(location=fs_obj.location))