def _mount_archive(self): """Mount the squashfs archive onto the repo in a mount namespace.""" # enable a user namespace if not running as root unshare_kwds = {'mount': True, 'user': not os.getuid() == 0} try: simple_unshare(**unshare_kwds) except OSError as e: raise repo_errors.InitializationError( f'namespace support unavailable: {e.strerror}') # First try using mount binary to automatically handle setting up loop # device -- this only works with real root perms since loopback device # mounting (losetup) doesn't work in user namespaces. p = subprocess.run(['mount', self._sqfs, self.location], capture_output=True) if p.returncode == 0: return elif p.returncode not in (1, 32): # fail out if not a permissions issue (regular or loopback failure inside userns) self._failed_cmd(p, 'mounting') # fallback to using squashfuse try: p = subprocess.run( ['squashfuse', '-o', 'nonempty', self._sqfs, self.location], capture_output=True) except FileNotFoundError as e: raise repo_errors.InitializationError( f'failed mounting squashfs archive: {e.filename} required') if p.returncode: self._failed_cmd(p, 'mounting')
def namespace(mount=False, uts=False, ipc=False, net=False, pid=False, user=False, hostname=None): namespaces = { (mount, "mnt"): None, (uts, "uts"): None, (ipc, "ipc"): None, (net, "net"): None, (pid, "pid"): None, (user, "user"): None, } dirs = { "root": None, "cwd": None, } # Save fds of current namespaces for ns in [ns for ns in namespaces if ns[0]]: fp = open(f"/proc/self/ns/{ns[1]}") namespaces[ns] = fp # Save fds of current directories if mount: for d in dirs: dirs[d] = os.open(f"/proc/self/{d}", os.O_RDONLY) simple_unshare(mount=mount, uts=uts, ipc=ipc, net=net, pid=pid, user=user, hostname=hostname) try: yield finally: for ns in [ns for ns in namespaces if ns[0]]: fp = namespaces[ns] setns(fp.fileno(), 0) fp.close() if mount: # Restore original root and cwd. Since we cannot directly chroot to # a fd, first change the current directory to the fd of the # original root, then chroot to "." os.fchdir(dirs["root"]) os.chroot(".") os.fchdir(dirs["cwd"]) for fd in dirs.values(): os.close(fd)
def _child_setup(self): kwargs = {} if os.getuid() != 0: # Enable a user namespace if we're not root. Note that this also # requires a network namespace in order to mount sysfs and use # network devices; however, we currently only provide a basic # loopback interface if iproute2 is installed in the chroot so # regular connections out of the namespaced environment won't work # by default. kwargs.update({'user': True, 'net': True}) simple_unshare(pid=True, hostname=self.hostname, **kwargs) self._mount() os.chroot(self.path) if not self.skip_chdir: os.chdir('/')
def child_setup(self): kwargs = {} if os.getuid() != 0: # Enable a user namespace if we're not root. Note that this also # requires a network namespace in order to mount sysfs and use # network devices; however, we currently only provide a basic # loopback interface if iproute2 is installed in the chroot so # regular connections out of the namespaced environment won't work # by default. kwargs.update({'user': True, 'net': True}) simple_unshare(pid=True, hostname=self.hostname, **kwargs) self.mount() os.chroot(self.path) if not self.skip_chdir: os.chdir('/')
def _main(parser, opts): """The "main" main function so we can trace/profile.""" # Initialize the logger before anything else. log_level = opts.log_level if log_level is None: if opts.debug: log_level = 'debug' elif opts.verbose: log_level = 'info' else: log_level = 'notice' log.setup_logging(log_level, output=opts.log_file, debug=opts.debug, color=opts.color) # Parse the command line options. myconfigs = opts.configs if not myconfigs: myconfigs = [DEFAULT_CONFIG_FILE] myspecfile = opts.file mycmdline = opts.cli[:] if opts.snapshot: mycmdline.append('target=snapshot') mycmdline.append('version_stamp=' + opts.snapshot) conf_values['DEBUG'] = opts.debug conf_values['VERBOSE'] = opts.debug or opts.verbose options = set() if opts.fetchonly: options.add('fetch') if opts.purge: options.add('purge') if opts.purgeonly: options.add('purgeonly') if opts.purgetmponly: options.add('purgetmponly') if opts.clear_autoresume: options.add('clear-autoresume') # Make sure we have some work before moving further. if not myspecfile and not mycmdline: parser.error('please specify one of either -f or -C or -s') # made it this far so start by outputting our version info version() # import configuration file and import our main module using those settings parse_config(myconfigs) conf_values["options"].update(options) log.notice('conf_values[options] = %s', conf_values['options']) # initialize our contents generator contents_map = ContentsMap(CONTENTS_DEFINITIONS, comp_prog=conf_values['comp_prog'], decomp_opt=conf_values['decomp_opt'], list_xattrs_opt=conf_values['list_xattrs_opt']) conf_values["contents_map"] = contents_map # initialze our hash and contents generators hash_map = HashMap(HASH_DEFINITIONS) conf_values["hash_map"] = hash_map # initialize our (de)compression definitions conf_values['decompress_definitions'] = DECOMPRESS_DEFINITIONS conf_values['compress_definitions'] = COMPRESS_DEFINITIONS # TODO add capability to config/spec new definitions # Start checking that digests are valid now that hash_map is initialized if "digests" in conf_values: digests = set(conf_values['digests'].split()) valid_digests = set(HASH_DEFINITIONS.keys()) # Use the magic keyword "auto" to use all algos that are available. skip_missing = False if 'auto' in digests: skip_missing = True digests.remove('auto') if not digests: digests = set(valid_digests) # First validate all the requested digests are valid keys. if digests - valid_digests: log.critical( 'These are not valid digest entries:\n' '%s\n' 'Valid digest entries:\n' '%s', ', '.join(digests - valid_digests), ', '.join(sorted(valid_digests))) # Then check for any programs that the hash func requires. for digest in digests: try: process.find_binary(hash_map.hash_map[digest].cmd) except process.CommandNotFound: # In auto mode, just ignore missing support. if skip_missing: digests.remove(digest) continue log.critical( 'The "%s" binary needed by digest "%s" was not found. ' 'It needs to be in your system path.', hash_map.hash_map[digest].cmd, digest) # Now reload the config with our updated value. conf_values['digests'] = ' '.join(digests) if "hash_function" in conf_values: if conf_values["hash_function"] not in HASH_DEFINITIONS: log.critical( '%s is not a valid hash_function entry\n' 'Valid hash_function entries:\n' '%s', conf_values["hash_function"], HASH_DEFINITIONS.keys()) try: process.find_binary( hash_map.hash_map[conf_values["hash_function"]].cmd) except process.CommandNotFound: log.critical( 'The "%s" binary needed by hash_function "%s" was not found. ' 'It needs to be in your system path.', hash_map.hash_map[conf_values['hash_function']].cmd, conf_values['hash_function']) # detect GNU sed for sed in ('/usr/bin/gsed', '/bin/sed', '/usr/bin/sed'): if os.path.exists(sed): conf_values["sed"] = sed break addlargs = {} if myspecfile: log.notice("Processing spec file: %s", myspecfile) spec = catalyst.config.SpecParser(myspecfile) addlargs.update(spec.get_values()) if mycmdline: try: cmdline = catalyst.config.ConfigParser() cmdline.parse_lines(mycmdline) addlargs.update(cmdline.get_values()) except CatalystError: log.critical('Could not parse commandline') if "target" not in addlargs: raise CatalystError("Required value \"target\" not specified.") if os.getuid() != 0: # catalyst cannot be run as a normal user due to chroots, mounts, etc log.critical('This script requires root privileges to operate') # Namespaces aren't supported on *BSDs at the moment. So let's check # whether we're on Linux. if os.uname().sysname in ["Linux", "linux"]: # Start off by creating unique namespaces to run in. Would be nice to # use pid & user namespaces, but snakeoil's namespace module has signal # transfer issues (CTRL+C doesn't propagate), and user namespaces need # more work due to Gentoo build process (uses sudo/root/portage). namespaces.simple_unshare(mount=True, uts=True, ipc=True, pid=False, net=False, user=False, hostname='catalyst') # everything is setup, so the build is a go try: success = build_target(addlargs) except KeyboardInterrupt: log.critical('Catalyst build aborted due to user interrupt (Ctrl-C)') if not success: sys.exit(2) sys.exit(0)
def _main(parser, opts): """The "main" main function so we can trace/profile.""" # Initialize the logger before anything else. log_level = opts.log_level if log_level is None: if opts.debug: log_level = 'debug' elif opts.verbose: log_level = 'info' else: log_level = 'notice' log.setup_logging(log_level, output=opts.log_file, debug=opts.debug, color=opts.color) # Parse the command line options. myconfigs = opts.configs if not myconfigs: myconfigs = [DEFAULT_CONFIG_FILE] myspecfile = opts.file mycmdline = list() if opts.snapshot: mycmdline.append('target: snapshot') mycmdline.append('snapshot_treeish: ' + opts.snapshot) conf_values['DEBUG'] = opts.debug conf_values['VERBOSE'] = opts.debug or opts.verbose options = [] if opts.fetchonly: options.append('fetch') if opts.purge: options.append('purge') if opts.purgeonly: options.append('purgeonly') if opts.purgetmponly: options.append('purgetmponly') if opts.clear_autoresume: options.append('clear-autoresume') # Make sure we have some work before moving further. if not myspecfile and not mycmdline: parser.error('please specify one of either -f or -C or -s') # made it this far so start by outputting our version info version() # import configuration file and import our main module using those settings parse_config(myconfigs) conf_values["options"].extend(options) log.notice('conf_values[options] = %s', conf_values['options']) # initialize our contents generator contents_map = ContentsMap(CONTENTS_DEFINITIONS, comp_prog=conf_values['comp_prog'], decomp_opt=conf_values['decomp_opt'], list_xattrs_opt=conf_values['list_xattrs_opt']) conf_values["contents_map"] = contents_map # initialize our (de)compression definitions conf_values['decompress_definitions'] = DECOMPRESS_DEFINITIONS conf_values['compress_definitions'] = COMPRESS_DEFINITIONS # TODO add capability to config/spec new definitions if "digests" in conf_values: valid_digests = hashlib.algorithms_available digests = set(conf_values['digests']) conf_values['digests'] = digests # First validate all the requested digests are valid keys. if digests - valid_digests: raise CatalystError('These are not valid digest entries:\n%s\n' 'Valid digest entries:\n%s' % (', '.join(sorted(digests - valid_digests)), ', '.join(sorted(valid_digests)))) addlargs = {} if myspecfile: log.notice("Processing spec file: %s", myspecfile) spec = catalyst.config.SpecParser(myspecfile) addlargs.update(spec.get_values()) if mycmdline: try: cmdline = catalyst.config.SpecParser() cmdline.parse_lines(mycmdline) addlargs.update(cmdline.get_values()) except CatalystError: log.critical('Could not parse commandline') if "target" not in addlargs: raise CatalystError("Required value \"target\" not specified.") if os.getuid() != 0: # catalyst cannot be run as a normal user due to chroots, mounts, etc log.critical('This script requires root privileges to operate') # Start off by creating unique namespaces to run in. Would be nice to # use pid & user namespaces, but snakeoil's namespace module has signal # transfer issues (CTRL+C doesn't propagate), and user namespaces need # more work due to Gentoo build process (uses sudo/root/portage). namespaces.simple_unshare(mount=True, uts=True, ipc=True, pid=False, net=False, user=False, hostname='catalyst') # everything is setup, so the build is a go try: success = build_target(addlargs) except KeyboardInterrupt: log.critical('Catalyst build aborted due to user interrupt (Ctrl-C)') if not success: sys.exit(2) sys.exit(0)