def copy(gentoo_dir, upper_dir, files): if not gentoo_dir.endswith('/'): gentoo_dir += '/' # files / dirs to shallow copy rsync = subprocess.Popen(sudo(["rsync", "-lptgoD", "--keep-dirlinks", "--files-from=-", gentoo_dir, upper_dir]), stdin=subprocess.PIPE) for f in files: if f.endswith("/."): continue f_wo_leading_slash = re.sub(r'^/', "", f) rsync.stdin.write(encode_utf8(f_wo_leading_slash + '\n')) src_path = os.path.join(gentoo_dir, f_wo_leading_slash) if os.path.islink(src_path): link = os.readlink(src_path) target = link[1:] if link[0] == '/' else os.path.join(os.path.dirname(f_wo_leading_slash), link) if os.path.exists(os.path.join(gentoo_dir, target)): rsync.stdin.write(encode_utf8(target + '\n')) rsync.stdin.close() if rsync.wait() != 0: raise BaseException("rsync returned error code.") # dirs to deep copy rsync = subprocess.Popen(sudo(["rsync", "-ar", "--keep-dirlinks", "--files-from=-", gentoo_dir, upper_dir]), stdin=subprocess.PIPE) for f in files: if not f.endswith("/."): continue f_wo_leading_slash = re.sub(r'^/', "", f) rsync.stdin.write(encode_utf8(f_wo_leading_slash + '\n')) src_path = os.path.join(gentoo_dir, f_wo_leading_slash) rsync.stdin.close() if rsync.wait() != 0: raise BaseException("rsync returned error code.")
def cleanup(REDIR_APP, PROXY_PORT): ''' Called on unhandled exception or keyboard interrupt ''' print "\n*************** FATAL:\n" traceback.print_exc() # Stop redirecting traffic to this app sudo.sudo('iptables -t nat -D OUTPUT -p tcp --dport 80 -m owner --uid-owner %s -j REDIRECT --to-ports %s'%(REDIR_APP, PROXY_PORT)) print "\n***Removed iptables proxy settings" print "DONE"
def pack(upper_dir, outfile, compression="gzip"): cmdline = ["mksquashfs", upper_dir, outfile, "-noappend", "-no-exports"] if compression == "xz": cmdline += ["-comp", "xz", "-b", "1M", "-Xbcj", "x86"] elif compression == "gzip": cmdline += ["-Xcompression-level", "1"] elif compression == "lzo": cmdline += ["-comp", "lzo"] else: raise BaseException("Unknown compression type %s" % compression) subprocess.check_call(sudo(cmdline)) subprocess.check_call(sudo(["chown", "%d:%d" % (os.getuid(), os.getgid()), outfile]))
def cleanup(REDIR_APP, PROXY_PORT): ''' Called on unhandled exception or keyboard interrupt ''' print "\n*************** FATAL:\n" traceback.print_exc() # Stop redirecting traffic to this app sudo.sudo( 'iptables -t nat -D OUTPUT -p tcp --dport 80 -m owner --uid-owner %s -j REDIRECT --to-ports %s' % (REDIR_APP, PROXY_PORT)) print "\n***Removed iptables proxy settings" print "DONE"
def clean(workdir, arch, profile=None): portage = os.path.join(workdir, "portage.tar.xz") archdir = os.path.join(workdir, arch) stage3 = os.path.join(archdir, "stage3.tar.xz") profiles = os.path.join(archdir, "profiles") artifacts = os.path.join(archdir, "artifacts") subprocess.check_call(sudo(["rm", "-rf", portage, stage3, profiles, artifacts]))
def lower_exec(lower_dir, cache_dir, portage_dir, cmdline, nspawn_opts=[]): subprocess.check_call(sudo( ["systemd-nspawn", "-q", "-M", CONTAINER_NAME, "-D", lower_dir, "--bind=%s:/var/cache" % os.path.abspath(cache_dir), "--capability=CAP_MKNOD,CAP_SYS_ADMIN", "--bind-ro=%s:/var/db/repos/gentoo" % os.path.abspath(portage_dir) ] + nspawn_opts + cmdline) )
def sync_files(srcdir, dstdir, exclude=None): files_to_sync, newest_file = scan_files(srcdir) for f in files_to_sync: if exclude is not None and re.match(exclude, f): continue src = os.path.join(srcdir, f) dst = os.path.join(dstdir, f) subprocess.check_call(sudo(["rsync", "-k", "-R", "--chown=root:root", os.path.join(srcdir, ".", f), dstdir])) return newest_file
def extract_portage(base, workdir): portage_tarball_url = base + "snapshots/portage-latest.tar.xz" portage_tarball = os.path.join(workdir, "portage.tar.xz") portage_dir = os.path.join(workdir, "portage") trash_dir = os.path.join(workdir, "trash") done_file = os.path.join(portage_dir, ".done") os.makedirs(workdir, exist_ok=True) set_gitignore(workdir) if not os.path.isfile(portage_tarball) or os.path.getsize(portage_tarball) != get_content_length(portage_tarball_url): subprocess.check_call(["wget", "-O", portage_tarball, portage_tarball_url]) if os.path.exists(done_file): os.remove(done_file) if os.path.isdir(portage_dir) and not os.path.exists(done_file): os.makedirs(trash_dir, exist_ok=True) os.rename(portage_dir, os.path.join(trash_dir, str(uuid.uuid4()))) if not os.path.isdir(portage_dir): os.makedirs(portage_dir, exist_ok=True) print("Extracting portage...") subprocess.check_call(sudo(["tar", "xpf", portage_tarball, "--strip-components=1", "-C", portage_dir])) with open(done_file, "w") as f: pass
def set_locale_to_envvar(root_dir): subprocess.check_call(sudo(["sed", "-i", r"s/^export LANG=.\+$/\[ -f \/etc\/locale\.conf \] \&\& . \/etc\/locale.conf \&\& export LANG/", os.path.join(root_dir, "etc/profile.env") ]))
def enable_services(root_dir, services): if not isinstance(services, list): services = [services] subprocess.check_call(sudo(["systemd-nspawn", "-q", "-M", CONTAINER_NAME, "-D", root_dir, "systemctl", "enable"] + services))
def make_ld_so_conf_latest(root_dir): subprocess.check_call(sudo(["touch", os.path.join(root_dir, "etc/ld.so.conf") ]))
def create_default_iptables_rules(root_dir): subprocess.check_call(sudo(["touch", os.path.join(root_dir, "var/lib/iptables/rules-save"), os.path.join(root_dir, "var/lib/ip6tables/rules-save")]))
def copyup_gcc_libs(gentoo_dir, upper_dir): subprocess.check_call(sudo(["systemd-nspawn", "-q", "-M", CONTAINER_NAME, "-D", gentoo_dir, "--overlay=+/:%s:/" % os.path.abspath(upper_dir), "sh", "-c", "touch -h `gcc --print-file-name=`/*.so.* && ldconfig" ]))
def remove_root_password(root_dir): subprocess.check_call(sudo(["sed", "-i", r"s/^root:\*:/root::/", os.path.join(root_dir, "etc/shadow") ]))
def __enter__(self): self.loop = subprocess.check_output( sudo(["losetup", "-P", "-f", "--show", self.backing])).decode("utf-8").strip() return self.loop
if result == None: clientPipeline.close() except Exception as e: print "Error returning data to client:", e except Exception: print traceback.print_exc() print try: # TODO: single port may be fragile on some systems, provide several posssibilities PROXY_PORT = 13998 PROXY_ADDR = '' REDIR_APP = 'app_152' sudo.sudo('iptables -t nat -F OUTPUT') sudo.sudo( 'iptables -t nat -A OUTPUT -p tcp --dport 80 -m owner --uid-owner %s -j REDIRECT --to-ports %s' % (REDIR_APP, PROXY_PORT)) listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) listener.bind((PROXY_ADDR, PROXY_PORT)) listener.settimeout(None) listener.listen(1) print '\n>> Now listening on port: %s' % PROXY_PORT parentPid = os.getpid() print ">> Parent PID:", parentPid # forks: 1, 2, 3, 4, 5, 6, 7 # procs: 2, 4, 8, 16, 32, 64, 128 # User selectable; maximum nuimber of processes to use USER_MAX_PROCS = 6
def __exit__(self, exception_type, exception_value, traceback): subprocess.check_call(sudo(["losetup", "-d", self.loop]))
def __enter__(self): self.tempdir = tempfile.TemporaryDirectory() subprocess.check_call(sudo(["mount", self.device, self.tempdir.name])) return self.tempdir.name
def __exit__(self, exception_type, exception_value, traceback): subprocess.check_call(sudo(["umount", self.tempdir.name])) self.tempdir.cleanup()
parser.add_argument("artifact", default=[], nargs='*', help="Artifacts to build") args = parser.parse_args() artifacts = [] if len(args.artifact) == 0 and os.path.isdir("./artifacts"): for i in os.listdir("./artifacts"): if os.path.isdir(os.path.join("./artifacts", i)): artifacts.append(i) else: artifacts += args.artifact if len(artifacts) == 0: artifacts.append("default") extract_portage(args.base, args.workdir) for artifact in artifacts: if artifact != "default" and not os.path.isdir(os.path.join("./artifacts", artifact)): raise BaseException("No such artifact: %s" % artifact) print("Processing artifact %s..." % artifact) if args.artifact == "clean": clean(args.workdir, arch, args.profile) else: outfile = main(args.base, args.workdir, arch, args.sync, args.bash, artifact, args.outfile, args.profile) if outfile is not None and args.qemu: qemu.run(outfile, os.path.join(args.workdir, "qemu.img"), args.drm, args.data_volume, args.system_ini) print("Done.") trash_dir = os.path.join(args.workdir, "trash") if os.path.isdir(trash_dir): print("Cleaning up...") subprocess.check_call(sudo(["rm", "-rf", trash_dir]))
def run(rootfs_file, disk_image, drm=False, data_volume=False, system_ini=None): with open(disk_image, "w") as f: f.truncate(8 * 1024 * 1024 * 1024 if data_volume else 4 * 1024 * 1024 * 1024) parted_commands = ["mklabel msdos"] if data_volume: parted_commands += [ "mkpart primary 1MiB 4GiB", "mkpart primary 4GiB -1" ] else: parted_commands += ["mkpart primary 1MiB -1"] parted_commands += ["set 1 boot on", "set 1 esp on"] subprocess.check_call(["parted", "--script", disk_image] + parted_commands) print("Run " + rootfs_file + " by qemu") with Loopback(disk_image) as loop: subprocess.check_call(sudo(["mkfs.vfat", "-F", "32", "%sp1" % loop])) with Tmpmount("%sp1" % loop) as mountpoint: grub_dir = os.path.join(mountpoint, "boot/grub") subprocess.check_call(sudo(["mkdir", "-p", grub_dir])) with Tee(os.path.join(grub_dir, "grub.cfg")) as f: f.write(grub_cfg.encode("utf-8")) subprocess.check_call( sudo([ "grub-install", "--target=i386-pc", "--skip-fs-probe", "--boot-directory=%s" % os.path.join(mountpoint, "boot"), "--modules=normal echo linux probe sleep test ls cat configfile cpuid minicmd vbe gfxterm_background png multiboot multiboot2 lvm xfs btrfs keystatus", loop ])) subprocess.check_call( sudo([ "cp", rootfs_file, os.path.join(mountpoint, "system.img") ])) if system_ini: subprocess.check_call( sudo([ "cp", system_ini, os.path.join(mountpoint, "system.ini") ])) uuid = subprocess.check_output( ["blkid", "-o", "value", "-s", "UUID", "%sp1" % loop]).decode("utf-8").strip() if data_volume: subprocess.check_call( sudo(["mkfs.btrfs", "-L", "data-%s" % uuid, "%sp2" % loop])) qemu_cmdline = [ "qemu-system-x86_64", "-enable-kvm", "-M", "q35", "-drive", "file=%s,format=raw,index=0,media=disk,if=virtio" % disk_image, "-rtc", "base=utc,clock=rt", "-m", "4096", "-no-shutdown" ] if drm: qemu_cmdline += [ "-display", "gtk,gl=on", "-vga", "virtio", "-usb", "-device", "usb-tablet", "-device", "virtio-keyboard" ] subprocess.check_call(qemu_cmdline)
def main(base, workdir, arch, sync, bash, artifact, outfile=None, profile=None): artifact_dir = os.path.join(".", "artifacts", artifact) build_json = load_json_file(os.path.join(artifact_dir, "build.json")) if profile is None: profile = "default" if build_json and "profile" in build_json: profile = build_json["profile"] stage3_tarball_url = get_latest_stage3_tarball_url(base,arch) arch_workdir = os.path.join(workdir, arch) os.makedirs(arch_workdir, exist_ok=True) set_gitignore(workdir) stage3_tarball = os.path.join(arch_workdir, "stage3.tar.xz") portage_dir = os.path.join(workdir, "portage") profile_workdir = os.path.join(arch_workdir, "profiles", profile) cache_dir = os.path.join(profile_workdir, "cache") gentoo_dir = os.path.join(profile_workdir, "root") repos_dir = os.path.join(gentoo_dir, "var/db/repos/gentoo") usr_local_dir = os.path.join(gentoo_dir, "usr/local") trash_dir = os.path.join(workdir, "trash") if not os.path.isfile(stage3_tarball) or os.path.getsize(stage3_tarball) != get_content_length(stage3_tarball_url): subprocess.check_call(["wget", "-O", stage3_tarball, stage3_tarball_url]) stage3_done_file = os.path.join(gentoo_dir, ".stage3-done") stage3_done_file_time = os.stat(stage3_done_file).st_mtime if os.path.isfile(stage3_done_file) else None if not stage3_done_file_time or stage3_done_file_time < os.stat(stage3_tarball).st_mtime: if os.path.isdir(gentoo_dir): os.makedirs(trash_dir, exist_ok=True) os.rename(gentoo_dir, os.path.join(trash_dir, str(uuid.uuid4()))) os.makedirs(repos_dir, exist_ok=True) print("Extracting stage3...") subprocess.check_call(sudo(["tar", "xpf", stage3_tarball, "--strip-components=1", "-C", gentoo_dir])) kernel_config_dir = os.path.join(gentoo_dir, "etc/kernels") subprocess.check_call(sudo(["mkdir", "-p", kernel_config_dir])) subprocess.check_call(sudo(["chmod", "-R", "o+rw", os.path.join(gentoo_dir, "etc/portage"), os.path.join(gentoo_dir, "usr/src"), os.path.join(gentoo_dir, "var/db/repos"), os.path.join(gentoo_dir, "var/cache"), kernel_config_dir, usr_local_dir])) with open(os.path.join(gentoo_dir, "etc/portage/make.conf"), "a") as f: f.write('FEATURES="-sandbox -usersandbox -network-sandbox"\n') with open(stage3_done_file, "w") as f: pass newest_file = link_files(os.path.join(".", "profiles", profile), gentoo_dir) # remove irrelevant arch dependent settings for i in glob.glob(os.path.join(gentoo_dir, "etc/portage/package.*/arch-*")): if not i.endswith("-" + arch): os.unlink(i) # move files under /var/cache os.makedirs(cache_dir, exist_ok=True) subprocess.check_call(sudo(["rsync", "-a", "--remove-source-files", os.path.join(gentoo_dir,"var/cache/"), cache_dir])) put_resource_file(gentoo_dir, initlib, "initlib.cpp") put_resource_file(gentoo_dir, initlib, "initlib.h") put_resource_file(gentoo_dir, initlib, "fat.cpp") put_resource_file(gentoo_dir, initlib, "fat.h") put_resource_file(gentoo_dir, init, "init.cpp") put_resource_file(gentoo_dir, init, "init.h") put_resource_file(gentoo_dir, util, "build-kernel.py", "usr/local/sbin/build-kernel", True) put_resource_file(gentoo_dir, util, "with-mysql.py", "usr/local/sbin/with-mysql", True) put_resource_file(gentoo_dir, util, "download.py", "usr/local/bin/download", True) put_resource_file(gentoo_dir, util, "install-system-image", "usr/sbin/install-system-image", True) put_resource_file(gentoo_dir, util, "expand-rw-layer", "usr/sbin/expand-rw-layer", True) put_resource_file(gentoo_dir, util, "do-with-lvm-snapshot", "usr/sbin/do-with-lvm-snapshot", True) put_resource_file(gentoo_dir, util, "rpmbootstrap.py", "usr/sbin/rpmbootstrap", True) put_resource_file(gentoo_dir, util, "genbootstrap.py", "usr/sbin/genbootstrap", True) put_resource_file(gentoo_dir, util, "genpack-install.cpp", "usr/src/genpack-install.cpp", True) if sync: lower_exec(gentoo_dir, cache_dir, portage_dir, ["emerge", "--sync"]) if bash: print("Entering shell... 'exit 1' to abort the process.") lower_exec(gentoo_dir, cache_dir, portage_dir, ["bash"]) done_file = os.path.join(gentoo_dir, ".done") done_file_time = os.stat(done_file).st_mtime if os.path.isfile(done_file) else None portage_time = os.stat(os.path.join(portage_dir, "metadata/timestamp")).st_mtime newest_file = max(newest_file, portage_time) if (not done_file_time or newest_file > done_file_time or sync or artifact == "none"): lower_exec(gentoo_dir, cache_dir, portage_dir, ["emerge", "-uDN", "-bk", "--binpkg-respect-use=y", "system", "nano", "gentoolkit", "repoman", "strace", "vim", "tcpdump", "netkit-telnetd"]) if os.path.isfile(os.path.join(gentoo_dir, "build.sh")): lower_exec(gentoo_dir, cache_dir, portage_dir, ["/build.sh"]) lower_exec(gentoo_dir, cache_dir, portage_dir, ["sh", "-c", "emerge -bk --binpkg-respect-use=y @preserved-rebuild && emerge --depclean && etc-update --automode -5 && eclean-dist -d && eclean-pkg -d"]) with open(done_file, "w") as f: pass if artifact == "none": return None # no build artifact elif artifact == "bash": lower_exec(gentoo_dir, cache_dir, portage_dir, ["bash"]) return None #else ##### building profile done ##### build artifact if necessary upper_dir = os.path.join(arch_workdir, "artifacts", artifact) genpack_packages_file = os.path.join(upper_dir, ".genpack", "packages") # use its timestamp as build date if not os.path.exists(genpack_packages_file) or os.stat(genpack_packages_file).st_mtime < max(os.stat(done_file).st_mtime, get_newest_mtime(artifact_dir), get_newest_mtime(os.path.join(".", "packages"))): if os.path.isdir(upper_dir): os.makedirs(trash_dir, exist_ok=True) subprocess.check_call(sudo(["mv", upper_dir, os.path.join(trash_dir, str(uuid.uuid4()))])) build_artifact(profile, artifact, gentoo_dir, cache_dir, upper_dir, build_json) # final output if outfile is None: if build_json and "outfile" in build_json: outfile = build_json["outfile"] else: outfile = "%s-%s.squashfs" % (artifact, arch) if outfile == "-": subprocess.check_call(sudo(["systemd-nspawn", "-M", CONTAINER_NAME, "-q", "-D", upper_dir, "--network-veth", "-b"])) return None #else if not os.path.isfile(outfile) or os.stat(genpack_packages_file).st_mtime > os.stat(outfile).st_mtime: compression = build_json["compression"] if build_json and "compression" in build_json else "gzip" pack(upper_dir, outfile, compression) return outfile
def build_artifact(profile, artifact, gentoo_dir, cache_dir, upper_dir, build_json): artifact_pkgs = ["gentoo-systemd-integration", "util-linux","timezone-data","bash","openssh", "coreutils", "procps", "net-tools", "iproute2", "iputils", "dbus", "python", "rsync", "tcpdump", "ca-certificates","e2fsprogs"] if build_json and "packages" in build_json: if not isinstance(build_json["packages"], list): raise Exception("packages must be list") #else artifact_pkgs += build_json["packages"] pkg_map = collect_packages(gentoo_dir) pkgs = scan_pkg_dep(gentoo_dir, pkg_map, artifact_pkgs) packages_dir = os.path.join(".", "packages") files = process_pkgs(gentoo_dir, packages_dir, pkgs) if os.path.isfile(os.path.join(gentoo_dir, "boot/kernel")): files.append("/boot/kernel") if os.path.isfile(os.path.join(gentoo_dir, "boot/initramfs")): files.append("/boot/initramfs") if os.path.isdir(os.path.join(gentoo_dir, "lib/modules")): files.append("/lib/modules/.") files += ["/dev/.", "/proc", "/sys", "/root", "/home", "/tmp", "/var/tmp", "/var/run", "/run", "/mnt"] files += ["/etc/passwd", "/etc/group", "/etc/shadow", "/etc/profile.env"] files += ["/etc/ld.so.conf", "/etc/ld.so.conf.d/."] files += ["/usr/lib/locale/locale-archive"] files += ["/bin/sh", "/bin/sed", "/usr/bin/awk", "/usr/bin/python", "/usr/bin/nano", "/bin/tar", "/usr/bin/unzip", "/usr/bin/wget", "/usr/bin/curl", "/usr/bin/telnet", "/usr/bin/make", "/usr/bin/diff", "/usr/bin/patch", "/usr/bin/strings", "/usr/bin/strace", "/usr/bin/find", "/usr/bin/xargs", "/usr/bin/less"] files += ["/sbin/iptables", "/sbin/ip6tables", "/sbin/iptables-restore", "/sbin/ip6tables-restore", "/sbin/iptables-save", "/sbin/ip6tables-save"] if build_json and "files" in build_json: if not isinstance(build_json["files"], list): raise Exception("files must be list") #else files += build_json["files"] os.makedirs(os.path.dirname(upper_dir), exist_ok=True) subprocess.check_call(sudo(["mkdir", upper_dir])) print("Copying files to artifact dir...") copy(gentoo_dir, upper_dir, files) copyup_gcc_libs(gentoo_dir, upper_dir) remove_root_password(upper_dir) make_ld_so_conf_latest(upper_dir) create_default_iptables_rules(upper_dir) set_locale_to_envvar(upper_dir) # per-package setup newest_pkg_file = 0 for pkg in pkgs: pkg_wo_ver = strip_ver(pkg) package_dir = os.path.join(packages_dir, pkg_wo_ver) if not os.path.isdir(package_dir): continue #else print("Processing package %s..." % pkg_wo_ver) newest_pkg_file = max(newest_pkg_file, sync_files(package_dir, upper_dir, r"^CONTENTS(\.|$)")) if os.path.isfile(os.path.join(upper_dir, "pkgbuild")): subprocess.check_call(sudo(["systemd-nspawn", "-q", "-M", CONTAINER_NAME, "-D", gentoo_dir, "--overlay=+/:%s:/" % os.path.abspath(upper_dir), "--bind=%s:/var/cache" % os.path.abspath(cache_dir), "-E", "PROFILE=%s" % profile, "-E", "ARTIFACT=%s" % artifact, "--capability=CAP_MKNOD", "sh", "-c", "/pkgbuild && rm -f /pkgbuild" ])) # enable services services = ["sshd","systemd-networkd", "systemd-resolved"] if build_json and "services" in build_json: if not isinstance(build_json["services"], list): raise Exception("services must be list") #else services += build_json["services"] enable_services(upper_dir, services) # artifact specific setup artifact_dir = os.path.join(".", "artifacts", artifact) newest_artifact_file = max(newest_pkg_file, sync_files(artifact_dir, upper_dir)) if os.path.isfile(os.path.join(upper_dir, "build")): print("Building artifact...") subprocess.check_call(sudo(["systemd-nspawn", "-q", "-M", CONTAINER_NAME, "-D", gentoo_dir, "--overlay=+/:%s:/" % os.path.abspath(upper_dir), "--bind=%s:/var/cache" % os.path.abspath(cache_dir), "/build" ])) else: print("Artifact build script not found.") subprocess.check_call(sudo(["rm", "-rf", os.path.join(upper_dir, "build"), os.path.join(upper_dir,"build.json"), os.path.join(upper_dir,"usr/src")])) # generate metadata genpack_metadata_dir = os.path.join(upper_dir, ".genpack") subprocess.check_call(sudo(["mkdir", "-p", genpack_metadata_dir])) subprocess.check_call(sudo(["chmod", "o+rwx", genpack_metadata_dir])) with open(os.path.join(genpack_metadata_dir, "profile"), "w") as f: f.write(profile) with open(os.path.join(genpack_metadata_dir, "artifact"), "w") as f: f.write(artifact) with open(os.path.join(genpack_metadata_dir, "packages"), "w") as f: for pkg in pkgs: f.write(pkg + '\n') subprocess.check_call(sudo(["chown", "-R", "root.root", genpack_metadata_dir])) subprocess.check_call(sudo(["chmod", "755", genpack_metadata_dir]))
def put_resource_file(gentoo_dir, module, filename, dst_filename=None, make_executable=False): dst_path = os.path.join(gentoo_dir, dst_filename if dst_filename is not None else filename) with Tee(dst_path) as f: f.write(importlib.resources.read_binary(module, filename)) if make_executable: subprocess.check_output(sudo(["chmod", "+x", dst_path]))
try: result = clientPipeline.sendall(data) if result == None: clientPipeline.close() except Exception as e: print "Error returning data to client:", e except Exception: print traceback.print_exc() print try: # TODO: single port may be fragile on some systems, provide several posssibilities PROXY_PORT = 13998 PROXY_ADDR = '' REDIR_APP = 'app_152' sudo.sudo('iptables -t nat -F OUTPUT') sudo.sudo('iptables -t nat -A OUTPUT -p tcp --dport 80 -m owner --uid-owner %s -j REDIRECT --to-ports %s'%(REDIR_APP, PROXY_PORT)) listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) listener.bind((PROXY_ADDR, PROXY_PORT)) listener.settimeout(None) listener.listen(1) print '\n>> Now listening on port: %s'%PROXY_PORT parentPid = os.getpid() print ">> Parent PID:", parentPid # forks: 1, 2, 3, 4, 5, 6, 7 # procs: 2, 4, 8, 16, 32, 64, 128 # User selectable; maximum nuimber of processes to use USER_MAX_PROCS = 6 forks=0 procs=1