def vagrant_destroy_dir(args): """Destroys the directory. """ target = Path(args.target[0]) read_dict(target / '.reprounzip') signals.pre_destroy(target=target) target.rmtree() signals.post_destroy(target=target)
def vagrant_destroy_dir(args): """Destroys the directory. """ target = Path(args.target[0]) read_dict(target) signals.pre_destroy(target=target) target.rmtree() signals.post_destroy(target=target)
def docker_destroy_dir(args): """Destroys the directory. """ target = Path(args.target[0]) read_dict(target) logging.info("Removing directory %s...", target) signals.pre_destroy(target=target) target.rmtree() signals.post_destroy(target=target)
def docker_destroy_dir(args): """Destroys the directory. """ target = Path(args.target[0]) read_dict(target / '.reprounzip') logging.info("Removing directory %s...", target) signals.pre_destroy(target=target) target.rmtree() signals.post_destroy(target=target)
def docker_setup_create(args): """Sets up the experiment to be run in a Docker-built container. """ pack = Path(args.pack[0]) target = Path(args.target[0]) if target.exists(): logging.critical("Target directory exists") sys.exit(1) signals.pre_setup(target=target, pack=pack) target.mkdir() try: # Unpacks configuration file rpz_pack = RPZPack(pack) rpz_pack.extract_config(target / 'config.yml') # Loads config runs, packages, other_files = config = load_config( target / 'config.yml', True) if args.base_image: record_usage(docker_explicit_base=True) base_image = args.base_image[0] if args.distribution: target_distribution = args.distribution[0] else: target_distribution = None else: target_distribution, base_image = select_image(runs) logging.info("Using base image %s", base_image) logging.debug("Distribution: %s", target_distribution or "unknown") rpz_pack.copy_data_tar(target / 'data.tgz') arch = runs[0]['architecture'] # Writes Dockerfile logging.info("Writing %s...", target / 'Dockerfile') with (target / 'Dockerfile').open('w', encoding='utf-8', newline='\n') as fp: fp.write('FROM %s\n\n' % base_image) # Installs busybox download_file(busybox_url(arch), target / 'busybox', 'busybox-%s' % arch) fp.write('COPY busybox /busybox\n') # Installs rpzsudo download_file(sudo_url(arch), target / 'rpzsudo', 'rpzsudo-%s' % arch) fp.write('COPY rpzsudo /rpzsudo\n\n') fp.write('COPY data.tgz /reprozip_data.tgz\n\n') fp.write('COPY rpz-files.list /rpz-files.list\n') fp.write('RUN \\\n' ' chmod +x /busybox /rpzsudo && \\\n') if args.install_pkgs: # Install every package through package manager missing_packages = [] else: # Only install packages that were not packed missing_packages = [pkg for pkg in packages if pkg.packfiles] packages = [pkg for pkg in packages if not pkg.packfiles] if packages: record_usage(docker_install_pkgs=True) try: installer = select_installer(pack, runs, target_distribution) except CantFindInstaller as e: logging.error( "Need to install %d packages but couldn't " "select a package installer: %s", len(packages), e) sys.exit(1) # Updates package sources update_script = installer.update_script() if update_script: fp.write(' %s && \\\n' % update_script) # Installs necessary packages fp.write(' %s && \\\n' % installer.install_script(packages)) logging.info( "Dockerfile will install the %d software " "packages that were not packed", len(packages)) else: record_usage(docker_install_pkgs=False) # Untar paths = set() pathlist = [] # Add intermediate directories, and check for existence in the tar missing_files = chain.from_iterable(pkg.files for pkg in missing_packages) data_files = rpz_pack.data_filenames() listoffiles = list(chain(other_files, missing_files)) for f in listoffiles: path = PosixPath('/') for c in rpz_pack.remove_data_prefix(f.path).components: path = path / c if path in paths: continue paths.add(path) if path in data_files: pathlist.append(path) else: logging.info("Missing file %s", path) rpz_pack.close() # FIXME : for some reason we need reversed() here, I'm not sure why # Need to read more of tar's docs. # TAR bug: --no-overwrite-dir removes --keep-old-files with (target / 'rpz-files.list').open('wb') as lfp: for p in reversed(pathlist): lfp.write(join_root(rpz_pack.data_prefix, p).path) lfp.write(b'\0') fp.write(' cd / && ' '(tar zpxf /reprozip_data.tgz -U --recursive-unlink ' '--numeric-owner --strip=1 --null -T /rpz-files.list || ' '/busybox echo "TAR reports errors, this might or might ' 'not prevent the execution to run")\n') # Meta-data for reprounzip write_dict(target, metadata_initial_iofiles(config)) signals.post_setup(target=target, pack=pack) except Exception: target.rmtree(ignore_errors=True) raise
def vagrant_setup_create(args): """Sets up the experiment to be run in a Vagrant-built virtual machine. This can either build a chroot or not. If building a chroot, we do just like without Vagrant: we copy all the files and only get what's missing from the host. But we do install automatically the packages whose files are required. If not building a chroot, we install all the packages, and only unpack files that don't come from packages. In short: files from packages with packfiles=True will only be used if building a chroot. """ if not args.pack: logging.critical("setup/create needs the pack filename") sys.exit(1) pack = Path(args.pack[0]) target = Path(args.target[0]) if target.exists(): logging.critical("Target directory exists") sys.exit(1) use_chroot = args.use_chroot mount_bind = args.bind_magic_dirs record_usage(use_chroot=use_chroot, mount_bind=mount_bind) signals.pre_setup(target=target, pack=pack) # Unpacks configuration file rpz_pack = RPZPack(pack) rpz_pack.extract_config(target / 'config.yml') # Loads config runs, packages, other_files = config = load_config(target / 'config.yml', True) if not args.memory: memory = None else: try: memory = int(args.memory[-1]) except ValueError: logging.critical("Invalid value for memory size: %r", args.memory) sys.exit(1) if args.base_image and args.base_image[0]: record_usage(vagrant_explicit_image=True) box = args.base_image[0] if args.distribution: target_distribution = args.distribution[0] else: target_distribution = None else: target_distribution, box = select_box(runs) logging.info("Using box %s", box) logging.debug("Distribution: %s", target_distribution or "unknown") # If using chroot, we might still need to install packages to get missing # (not packed) files if use_chroot: packages = [pkg for pkg in packages if not pkg.packfiles] if packages: record_usage(vagrant_install_pkgs=True) logging.info("Some packages were not packed, so we'll install and " "copy their files\n" "Packages that are missing:\n%s", ' '.join(pkg.name for pkg in packages)) if packages: try: installer = select_installer(pack, runs, target_distribution) except CantFindInstaller as e: logging.error("Need to install %d packages but couldn't select a " "package installer: %s", len(packages), e) target.mkdir(parents=True) try: # Writes setup script logging.info("Writing setup script %s...", target / 'setup.sh') with (target / 'setup.sh').open('w', encoding='utf-8', newline='\n') as fp: fp.write('#!/bin/sh\n\nset -e\n\n') if packages: # Updates package sources fp.write(installer.update_script()) fp.write('\n') # Installs necessary packages fp.write(installer.install_script(packages)) fp.write('\n') # TODO : Compare package versions (painful because of sh) # Untar if use_chroot: fp.write('\n' 'mkdir /experimentroot; cd /experimentroot\n') fp.write('tar zpxf /vagrant/data.tgz --numeric-owner ' '--strip=1 %s\n' % rpz_pack.data_prefix) if mount_bind: fp.write('\n' 'mkdir -p /experimentroot/dev\n' 'mkdir -p /experimentroot/proc\n') for pkg in packages: fp.write('\n# Copies files from package %s\n' % pkg.name) for f in pkg.files: f = f.path dest = join_root(PosixPath('/experimentroot'), f) fp.write('mkdir -p %s\n' % shell_escape(unicode_(f.parent))) fp.write('cp -L %s %s\n' % ( shell_escape(unicode_(f)), shell_escape(unicode_(dest)))) else: fp.write('\ncd /\n') paths = set() pathlist = [] # Adds intermediate directories, and checks for existence in # the tar for f in other_files: path = PosixPath('/') for c in rpz_pack.remove_data_prefix(f.path).components: path = path / c if path in paths: continue paths.add(path) try: rpz_pack.get_data(path) except KeyError: logging.info("Missing file %s", path) else: pathlist.append(path) # FIXME : for some reason we need reversed() here, I'm not sure # why. Need to read more of tar's docs. # TAR bug: --no-overwrite-dir removes --keep-old-files # TAR bug: there is no way to make --keep-old-files not report # an error if an existing file is encountered. --skip-old-files # was introduced too recently. Instead, we just ignore the exit # status with (target / 'rpz-files.list').open('wb') as lfp: for p in reversed(pathlist): lfp.write(join_root(rpz_pack.data_prefix, p).path) lfp.write(b'\0') fp.write('tar zpxf /vagrant/data.tgz --keep-old-files ' '--numeric-owner --strip=1 ' '--null -T /vagrant/rpz-files.list || /bin/true\n') # Copies busybox if use_chroot: arch = runs[0]['architecture'] download_file(busybox_url(arch), target / 'busybox', 'busybox-%s' % arch) fp.write(r''' cp /vagrant/busybox /experimentroot/busybox chmod +x /experimentroot/busybox mkdir -p /experimentroot/bin [ -e /experimentroot/bin/sh ] || \ ln -s /busybox /experimentroot/bin/sh ''') # Copies pack logging.info("Copying pack file...") rpz_pack.copy_data_tar(target / 'data.tgz') rpz_pack.close() # Writes Vagrant file logging.info("Writing %s...", target / 'Vagrantfile') with (target / 'Vagrantfile').open('w', encoding='utf-8', newline='\n') as fp: # Vagrant header and version fp.write( '# -*- mode: ruby -*-\n' '# vi: set ft=ruby\n\n' 'VAGRANTFILE_API_VERSION = "2"\n\n' 'Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|\n') # Selects which box to install fp.write(' config.vm.box = "%s"\n' % box) # Run the setup script on the virtual machine fp.write(' config.vm.provision "shell", path: "setup.sh"\n') # Memory size if memory is not None: fp.write(' config.vm.provider "virtualbox" do |v|\n' ' v.memory = %d\n' ' end\n' % memory) fp.write('end\n') # Meta-data for reprounzip write_dict(target, metadata_initial_iofiles(config, {'use_chroot': use_chroot})) signals.post_setup(target=target, pack=pack) except Exception: target.rmtree(ignore_errors=True) raise
def docker_setup_create(args): """Sets up the experiment to be run in a Docker-built container. """ pack = Path(args.pack[0]) target = Path(args.target[0]) if target.exists(): logging.critical("Target directory exists") sys.exit(1) signals.pre_setup(target=target, pack=pack) target.mkdir() try: # Unpacks configuration file rpz_pack = RPZPack(pack) rpz_pack.extract_config(target / 'config.yml') # Loads config runs, packages, other_files = config = load_config( target / 'config.yml', True) if args.base_image: record_usage(docker_explicit_base=True) base_image = args.base_image[0] if args.distribution: target_distribution = args.distribution[0] else: target_distribution = None else: target_distribution, base_image = select_image(runs) logging.info("Using base image %s", base_image) logging.debug("Distribution: %s", target_distribution or "unknown") rpz_pack.copy_data_tar(target / 'data.tgz') arch = runs[0]['architecture'] # Writes Dockerfile logging.info("Writing %s...", target / 'Dockerfile') with (target / 'Dockerfile').open('w', encoding='utf-8', newline='\n') as fp: fp.write('FROM %s\n\n' % base_image) # Installs busybox download_file(busybox_url(arch), target / 'busybox', 'busybox-%s' % arch) fp.write('COPY busybox /busybox\n') # Installs rpzsudo download_file(sudo_url(arch), target / 'rpzsudo', 'rpzsudo-%s' % arch) fp.write('COPY rpzsudo /rpzsudo\n\n') fp.write('COPY data.tgz /reprozip_data.tgz\n\n') fp.write('COPY rpz-files.list /rpz-files.list\n') fp.write('RUN \\\n' ' chmod +x /busybox /rpzsudo && \\\n') if args.install_pkgs: # Install every package through package manager missing_packages = [] else: # Only install packages that were not packed missing_packages = [pkg for pkg in packages if pkg.packfiles] packages = [pkg for pkg in packages if not pkg.packfiles] if packages: record_usage(docker_install_pkgs=True) try: installer = select_installer(pack, runs, target_distribution) except CantFindInstaller as e: logging.error("Need to install %d packages but couldn't " "select a package installer: %s", len(packages), e) sys.exit(1) # Updates package sources update_script = installer.update_script() if update_script: fp.write(' %s && \\\n' % update_script) # Installs necessary packages fp.write(' %s && \\\n' % installer.install_script(packages)) logging.info("Dockerfile will install the %d software " "packages that were not packed", len(packages)) else: record_usage(docker_install_pkgs=False) # Untar paths = set() pathlist = [] # Add intermediate directories, and check for existence in the tar logging.info("Generating file list...") missing_files = chain.from_iterable(pkg.files for pkg in missing_packages) data_files = rpz_pack.data_filenames() listoffiles = list(chain(other_files, missing_files)) for f in listoffiles: if f.path.name == 'resolv.conf' and ( f.path.lies_under('/etc') or f.path.lies_under('/run') or f.path.lies_under('/var')): continue path = PosixPath('/') for c in rpz_pack.remove_data_prefix(f.path).components: path = path / c if path in paths: continue paths.add(path) if path in data_files: pathlist.append(path) else: logging.info("Missing file %s", path) rpz_pack.close() # FIXME : for some reason we need reversed() here, I'm not sure why # Need to read more of tar's docs. # TAR bug: --no-overwrite-dir removes --keep-old-files with (target / 'rpz-files.list').open('wb') as lfp: for p in reversed(pathlist): lfp.write(join_root(rpz_pack.data_prefix, p).path) lfp.write(b'\0') fp.write(' cd / && ' '(tar zpxf /reprozip_data.tgz -U --recursive-unlink ' '--numeric-owner --strip=1 --null -T /rpz-files.list || ' '/busybox echo "TAR reports errors, this might or might ' 'not prevent the execution to run")\n') # Setup entry point fp.write('COPY rpz_entrypoint.sh /rpz_entrypoint.sh\n' 'ENTRYPOINT ["/busybox", "sh", "/rpz_entrypoint.sh"]\n') # Write entry point script logging.info("Writing %s...", target / 'rpz_entrypoint.sh') with (target / 'rpz_entrypoint.sh').open('w', encoding='utf-8', newline='\n') as fp: # The entrypoint gets some arguments from the run command # By default, it just does all the runs # "run N" executes the run with that number # "cmd STR" sets a replacement command-line for the next run # "do STR" executes a command as-is fp.write( '#!/bin/sh\n' '\n' 'COMMAND=\n' 'ENVVARS=\n' '\n' 'if [ $# = 0 ]; then\n' ' exec /busybox sh /rpz_entrypoint.sh') for nb in irange(len(runs)): fp.write(' run %d' % nb) fp.write( '\n' 'fi\n' '\n' 'while [ $# != 0 ]; do\n' ' case "$1" in\n' ' help)\n' ' echo "Image built from reprounzip-docker" >&2\n' ' echo "Usage: docker run <image> [cmd word [word ' '...]] [run <R>]" >&2\n' ' echo " \\`cmd ...\\` changes the command for ' 'the next \\`run\\` option" >&2\n' ' echo " \\`run <name|number>\\` runs the ' 'specified run" >&2\n' ' echo "By default, all the runs are executed." ' '>&2\n' ' echo "The runs in this image are:" >&2\n') for run in runs: fp.write( ' echo " {name}: {cmdline}" >&2\n'.format( name=run['id'], cmdline=' '.join(shell_escape(a) for a in run['argv']))) fp.write( ' exit 0\n' ' ;;\n' ' do)\n' ' shift\n' ' $1\n' ' ;;\n' ' env)\n' ' shift\n' ' ENVVARS="$1"\n' ' ;;\n' ' cmd)\n' ' shift\n' ' COMMAND="$1"\n' ' ;;\n' ' run)\n' ' shift\n' ' case "$1" in\n') for i, run in enumerate(runs): cmdline = ' '.join([run['binary']] + run['argv'][1:]) fp.write( ' {name})\n' ' RUNCOMMAND={cmd}\n' ' RUNWD={wd}\n' ' RUNENV={env}\n' ' RUNUID={uid}\n' ' RUNGID={gid}\n' ' ;;\n'.format( name='%s|%d' % (run['id'], i), cmd=shell_escape(cmdline), wd=shell_escape(run['workingdir']), env=shell_escape(' '.join( '%s=%s' % (shell_escape(k), shell_escape(v)) for k, v in iteritems(run['environ']))), uid=run.get('uid', 1000), gid=run.get('gid', 1000))) fp.write( ' *)\n' ' echo "RPZ: Unknown run $1" >&2\n' ' exit 1\n' ' ;;\n' ' esac\n' ' if [ -n "$COMMAND" ]; then\n' ' RUNCOMMAND="$COMMAND"\n' ' COMMAND=\n' ' fi\n' ' export RUNWD; export RUNENV; export ENVVARS; ' 'export RUNCOMMAND\n' ' /rpzsudo "#$RUNUID" "#$RUNGID" /busybox sh -c ' '"cd \\"\\$RUNWD\\" && /busybox env -i $RUNENV $ENVVARS ' '$RUNCOMMAND"\n' ' ENVVARS=\n' ' ;;\n' ' *)\n' ' echo "RPZ: Unknown option $1" >&2\n' ' exit 1\n' ' ;;\n' ' esac\n' ' shift\n' 'done\n') # Meta-data for reprounzip write_dict(target, metadata_initial_iofiles(config)) signals.post_setup(target=target, pack=pack) except Exception: target.rmtree(ignore_errors=True) raise
def trace(binary, argv, directory, append, verbosity='unset'): """Main function for the trace subcommand. """ if verbosity != 'unset': warnings.warn( "The 'verbosity' parameter for trace() is deprecated. " "Please set a level on the 'reprozip' logger instead.", DeprecationWarning) if not isinstance(directory, Path): directory = Path(directory) if isinstance(binary, Path): binary = binary.path cwd = Path.cwd() if (any(cwd.lies_under(c) for c in magic_dirs + system_dirs) and not cwd.lies_under('/usr/local')): logger.warning( "You are running this experiment from a system directory! " "Autodetection of non-system files will probably not work as " "intended") # Trace directory if directory.exists(): if append is None: r = tty_prompt( "Trace directory %s exists\n" "(a)ppend run to the trace, (d)elete it or (s)top? [a/d/s] " % directory, 'aAdDsS') if r is None: logger.critical( "Trace directory %s exists\n" "Please use either --continue or --overwrite\n", directory) sys.exit(125) elif r in 'sS': sys.exit(125) elif r in 'dD': directory.rmtree() directory.mkdir() logger.warning( "You can use --overwrite to replace the existing trace " "(or --continue to append\nwithout prompt)") elif append is False: logger.info("Removing existing trace directory %s", directory) directory.rmtree() directory.mkdir(parents=True) else: if append is True: logger.warning("--continue was set but trace doesn't exist yet") directory.mkdir() # Runs the trace database = directory / 'trace.sqlite3' logger.info("Running program") # Might raise _pytracer.Error c = _pytracer.execute(binary, argv, database.path) if c != 0: if c & 0x0100: logger.warning( "Program appears to have been terminated by " "signal %d", c & 0xFF) else: logger.warning("Program exited with non-zero code %d", c) logger.info("Program completed") return c
def docker_setup_create(args): """Sets up the experiment to be run in a Docker-built container. """ pack = Path(args.pack[0]) target = Path(args.target[0]) if target.exists(): logger.critical("Target directory exists") sys.exit(1) signals.pre_setup(target=target, pack=pack) target.mkdir() try: # Unpacks configuration file rpz_pack = RPZPack(pack) rpz_pack.extract_config(target / 'config.yml') # Loads config runs, packages, other_files = config = load_config( target / 'config.yml', True) if args.base_image: record_usage(docker_explicit_base=True) base_image = args.base_image[0] if args.distribution: target_distribution = args.distribution[0] else: target_distribution = None else: target_distribution, base_image = select_image(runs) logger.info("Using base image %s", base_image) logger.debug("Distribution: %s", target_distribution or "unknown") rpz_pack.copy_data_tar(target / 'data.tgz') arch = runs[0]['architecture'] # Writes Dockerfile logger.info("Writing %s...", target / 'Dockerfile') with (target / 'Dockerfile').open('w', encoding='utf-8', newline='\n') as fp: fp.write('FROM %s\n\n' % base_image) # Installs busybox download_file(busybox_url(arch), target / 'busybox', 'busybox-%s' % arch) fp.write('COPY busybox /busybox\n') # Installs rpzsudo download_file(sudo_url(arch), target / 'rpzsudo', 'rpzsudo-%s' % arch) fp.write('COPY rpzsudo /rpzsudo\n\n') fp.write('COPY data.tgz /reprozip_data.tgz\n\n') fp.write('COPY rpz-files.list /rpz-files.list\n') fp.write('RUN \\\n' ' chmod +x /busybox /rpzsudo && \\\n') if args.install_pkgs: # Install every package through package manager missing_packages = [] else: # Only install packages that were not packed missing_packages = [pkg for pkg in packages if pkg.packfiles] packages = [pkg for pkg in packages if not pkg.packfiles] if packages: record_usage(docker_install_pkgs=True) try: installer = select_installer(pack, runs, target_distribution) except CantFindInstaller as e: logger.error("Need to install %d packages but couldn't " "select a package installer: %s", len(packages), e) sys.exit(1) # Updates package sources update_script = installer.update_script() if update_script: fp.write(' %s && \\\n' % update_script) # Installs necessary packages fp.write(' %s && \\\n' % installer.install_script(packages)) logger.info("Dockerfile will install the %d software " "packages that were not packed", len(packages)) else: record_usage(docker_install_pkgs=False) # Untar paths = set() pathlist = [] # Add intermediate directories, and check for existence in the tar logger.info("Generating file list...") missing_files = chain.from_iterable(pkg.files for pkg in missing_packages) data_files = rpz_pack.data_filenames() listoffiles = list(chain(other_files, missing_files)) for f in listoffiles: if f.path.name == 'resolv.conf' and ( f.path.lies_under('/etc') or f.path.lies_under('/run') or f.path.lies_under('/var')): continue path = PosixPath('/') for c in rpz_pack.remove_data_prefix(f.path).components: path = path / c if path in paths: continue paths.add(path) if path in data_files: pathlist.append(path) else: logger.info("Missing file %s", path) rpz_pack.close() # FIXME : for some reason we need reversed() here, I'm not sure why # Need to read more of tar's docs. # TAR bug: --no-overwrite-dir removes --keep-old-files with (target / 'rpz-files.list').open('wb') as lfp: for p in reversed(pathlist): lfp.write(join_root(rpz_pack.data_prefix, p).path) lfp.write(b'\0') fp.write(' cd / && ' '(tar zpxf /reprozip_data.tgz -U --recursive-unlink ' '--numeric-owner --strip=1 --null -T /rpz-files.list || ' '/busybox echo "TAR reports errors, this might or might ' 'not prevent the execution to run")\n') # Setup entry point fp.write('COPY rpz_entrypoint.sh /rpz_entrypoint.sh\n' 'ENTRYPOINT ["/busybox", "sh", "/rpz_entrypoint.sh"]\n') # Write entry point script logger.info("Writing %s...", target / 'rpz_entrypoint.sh') with (target / 'rpz_entrypoint.sh').open('w', encoding='utf-8', newline='\n') as fp: # The entrypoint gets some arguments from the run command # By default, it just does all the runs # "run N" executes the run with that number # "cmd STR" sets a replacement command-line for the next run # "do STR" executes a command as-is fp.write( '#!/bin/sh\n' '\n' 'COMMAND=\n' 'ENVVARS=\n' '\n' 'if [ $# = 0 ]; then\n' ' exec /busybox sh /rpz_entrypoint.sh') for nb in irange(len(runs)): fp.write(' run %d' % nb) fp.write( '\n' 'fi\n' '\n' 'while [ $# != 0 ]; do\n' ' case "$1" in\n' ' help)\n' ' echo "Image built from reprounzip-docker" >&2\n' ' echo "Usage: docker run <image> [cmd "word [word ' '...]"] [run <R>]" >&2\n' ' echo " \\`cmd ...\\` changes the command for ' 'the next \\`run\\` option" >&2\n' ' echo " \\`run <name|number>\\` runs the ' 'specified run" >&2\n' ' echo "By default, all the runs are executed." ' '>&2\n' ' echo "The runs in this image are:" >&2\n') for run in runs: fp.write( ' echo " {name}: {cmdline}" >&2\n'.format( name=run['id'], cmdline=' '.join(shell_escape(a) for a in run['argv']))) fp.write( ' exit 0\n' ' ;;\n' ' do)\n' ' shift\n' ' $1\n' ' ;;\n' ' env)\n' ' shift\n' ' ENVVARS="$1"\n' ' ;;\n' ' cmd)\n' ' shift\n' ' COMMAND="$1"\n' ' ;;\n' ' run)\n' ' shift\n' ' case "$1" in\n') for i, run in enumerate(runs): cmdline = ' '.join([run['binary']] + run['argv'][1:]) fp.write( ' {name})\n' ' RUNCOMMAND={cmd}\n' ' RUNWD={wd}\n' ' RUNENV={env}\n' ' RUNUID={uid}\n' ' RUNGID={gid}\n' ' ;;\n'.format( name='%s|%d' % (run['id'], i), cmd=shell_escape(cmdline), wd=shell_escape(run['workingdir']), env=shell_escape(' '.join( '%s=%s' % (shell_escape(k), shell_escape(v)) for k, v in iteritems(run['environ']))), uid=run.get('uid', 1000), gid=run.get('gid', 1000))) fp.write( ' *)\n' ' echo "RPZ: Unknown run $1" >&2\n' ' exit 1\n' ' ;;\n' ' esac\n' ' if [ -n "$COMMAND" ]; then\n' ' RUNCOMMAND="$COMMAND"\n' ' COMMAND=\n' ' fi\n' ' export RUNWD; export RUNENV; export ENVVARS; ' 'export RUNCOMMAND\n' ' /rpzsudo "#$RUNUID" "#$RUNGID" /busybox sh -c ' '"cd \\"\\$RUNWD\\" && /busybox env -i $RUNENV $ENVVARS ' '$RUNCOMMAND; echo \\"*** Command finished, status: \\$?\\""\n' ' ENVVARS=\n' ' ;;\n' ' *)\n' ' echo "RPZ: Unknown option $1" >&2\n' ' exit 1\n' ' ;;\n' ' esac\n' ' shift\n' 'done\n') # Meta-data for reprounzip write_dict(target, metadata_initial_iofiles(config)) signals.post_setup(target=target, pack=pack) except Exception: target.rmtree(ignore_errors=True) raise
def vagrant_setup_create(args): """Sets up the experiment to be run in a Vagrant-built virtual machine. This can either build a chroot or not. If building a chroot, we do just like without Vagrant: we copy all the files and only get what's missing from the host. But we do install automatically the packages whose files are required. If not building a chroot, we install all the packages, and only unpack files that don't come from packages. In short: files from packages with packfiles=True will only be used if building a chroot. """ if not args.pack: logging.critical("setup/create needs the pack filename") sys.exit(1) pack = Path(args.pack[0]) target = Path(args.target[0]) if target.exists(): logging.critical("Target directory exists") sys.exit(1) use_chroot = args.use_chroot mount_bind = args.bind_magic_dirs record_usage(use_chroot=use_chroot, mount_bind=mount_bind) signals.pre_setup(target=target, pack=pack) # Unpacks configuration file rpz_pack = RPZPack(pack) rpz_pack.extract_config(target / 'config.yml') # Loads config runs, packages, other_files = config = load_config(target / 'config.yml', True) if not args.memory: memory = None else: try: memory = int(args.memory[-1]) except ValueError: logging.critical("Invalid value for memory size: %r", args.memory) sys.exit(1) if args.base_image and args.base_image[0]: record_usage(vagrant_explicit_image=True) box = args.base_image[0] if args.distribution: target_distribution = args.distribution[0] else: target_distribution = None else: target_distribution, box = select_box(runs, gui=args.gui) logging.info("Using box %s", box) logging.debug("Distribution: %s", target_distribution or "unknown") # If using chroot, we might still need to install packages to get missing # (not packed) files if use_chroot: packages = [pkg for pkg in packages if not pkg.packfiles] if packages: record_usage(vagrant_install_pkgs=True) logging.info("Some packages were not packed, so we'll install and " "copy their files\n" "Packages that are missing:\n%s", ' '.join(pkg.name for pkg in packages)) if packages: try: installer = select_installer(pack, runs, target_distribution) except CantFindInstaller as e: logging.error("Need to install %d packages but couldn't select a " "package installer: %s", len(packages), e) target.mkdir(parents=True) try: # Writes setup script logging.info("Writing setup script %s...", target / 'setup.sh') with (target / 'setup.sh').open('w', encoding='utf-8', newline='\n') as fp: fp.write('#!/bin/sh\n\nset -e\n\n') if packages: # Updates package sources update_script = installer.update_script() if update_script: fp.write(update_script) fp.write('\n') # Installs necessary packages fp.write(installer.install_script(packages)) fp.write('\n') # TODO : Compare package versions (painful because of sh) # Untar if use_chroot: fp.write('\n' 'mkdir /experimentroot; cd /experimentroot\n') fp.write('tar zpxf /vagrant/data.tgz --numeric-owner ' '--strip=1 %s\n' % rpz_pack.data_prefix) if mount_bind: fp.write('\n' 'mkdir -p /experimentroot/dev\n' 'mkdir -p /experimentroot/proc\n') for pkg in packages: fp.write('\n# Copies files from package %s\n' % pkg.name) for f in pkg.files: f = f.path dest = join_root(PosixPath('/experimentroot'), f) fp.write('mkdir -p %s\n' % shell_escape(unicode_(f.parent))) fp.write('cp -L %s %s\n' % ( shell_escape(unicode_(f)), shell_escape(unicode_(dest)))) fp.write( '\n' 'cp /etc/resolv.conf /experimentroot/etc/resolv.conf\n') else: fp.write('\ncd /\n') paths = set() pathlist = [] # Adds intermediate directories, and checks for existence in # the tar logging.info("Generating file list...") data_files = rpz_pack.data_filenames() for f in other_files: if f.path.name == 'resolv.conf' and ( f.path.lies_under('/etc') or f.path.lies_under('/run') or f.path.lies_under('/var')): continue path = PosixPath('/') for c in rpz_pack.remove_data_prefix(f.path).components: path = path / c if path in paths: continue paths.add(path) if path in data_files: pathlist.append(path) else: logging.info("Missing file %s", path) # FIXME : for some reason we need reversed() here, I'm not sure # why. Need to read more of tar's docs. # TAR bug: --no-overwrite-dir removes --keep-old-files # TAR bug: there is no way to make --keep-old-files not report # an error if an existing file is encountered. --skip-old-files # was introduced too recently. Instead, we just ignore the exit # status with (target / 'rpz-files.list').open('wb') as lfp: for p in reversed(pathlist): lfp.write(join_root(rpz_pack.data_prefix, p).path) lfp.write(b'\0') fp.write('tar zpxf /vagrant/data.tgz --keep-old-files ' '--numeric-owner --strip=1 ' '--null -T /vagrant/rpz-files.list || /bin/true\n') # Copies busybox if use_chroot: arch = runs[0]['architecture'] download_file(busybox_url(arch), target / 'busybox', 'busybox-%s' % arch) fp.write(r''' cp /vagrant/busybox /experimentroot/busybox chmod +x /experimentroot/busybox mkdir -p /experimentroot/bin [ -e /experimentroot/bin/sh ] || \ ln -s /busybox /experimentroot/bin/sh ''') # Copies pack logging.info("Copying pack file...") rpz_pack.copy_data_tar(target / 'data.tgz') rpz_pack.close() # Writes Vagrant file logging.info("Writing %s...", target / 'Vagrantfile') with (target / 'Vagrantfile').open('w', encoding='utf-8', newline='\n') as fp: # Vagrant header and version fp.write( '# -*- mode: ruby -*-\n' '# vi: set ft=ruby\n\n' 'VAGRANTFILE_API_VERSION = "2"\n\n' 'Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|\n') # Selects which box to install fp.write(' config.vm.box = "%s"\n' % box) # Run the setup script on the virtual machine fp.write(' config.vm.provision "shell", path: "setup.sh"\n') # Memory size if memory is not None or args.gui: fp.write(' config.vm.provider "virtualbox" do |v|\n') if memory is not None: fp.write(' v.memory = %d\n' % memory) if args.gui: fp.write(' v.gui = true\n') fp.write(' end\n') fp.write('end\n') # Meta-data for reprounzip write_dict(target, metadata_initial_iofiles(config, {'use_chroot': use_chroot, 'gui': args.gui})) signals.post_setup(target=target, pack=pack) except Exception: target.rmtree(ignore_errors=True) raise
def functional_tests(): destination = os.environ['TEJ_DESTINATION'] logging.info("Using TEJ_DESTINATION %s" % destination) for path in ('~/.tej', '~/tej2'): path = Path(path).expand_user() try: path.remove() except OSError: path.rmtree(ignore_errors=True) Path('~/tej2').expand_user().mkdir() logging.info("Creating default queue") check_call(['tej', 'setup', destination]) assert Path('~/.tej').expand_user().is_dir() Path('~/.tej').expand_user().rmtree() logging.info("Creating a queue with a link") check_call(['tej', 'setup', destination, '--queue', '~/tej2/queue', '--make-link', '~/tej2/link']) assert Path('~/tej2/queue').expand_user().is_dir() with Path('~/tej2/link').expand_user().open('r') as fp: assert fp.read() == 'tejdir: %s\n' % Path('~/tej2/queue').expand_user() logging.info("Adding links") check_call(['tej', 'setup', destination, '--only-links', '--queue', '~/tej2/queue', '--make-link', '~/tej2/link2']) with Path('~/tej2/link2').expand_user().open('r') as fp: assert fp.read() == 'tejdir: %s\n' % Path('~/tej2/queue').expand_user() assert not Path('~/.tej').expand_user().exists() check_call(['tej', 'setup', destination, '--only-links', '--queue', '~/tej2/queue', '--make-default-link']) with Path('~/.tej').expand_user().open('r') as fp: assert fp.read() == 'tejdir: %s\n' % Path('~/tej2/queue').expand_user() logging.info("Calling status for non-existent job") output = check_output(['tej', 'status', destination, '--id', 'nonexistent']) assert output == b'not found\n' logging.info("Submitting a job") jobdir = Path.tempdir(prefix='tej-tests-') try: with jobdir.open('w', 'start.sh', newline='\n') as fp: fp.write('#!/bin/sh\n' '[ -f dir1/data1 ] || exit 1\n' '[ "$(cat dir2/dir3/data2)" = data2 ] || exit 2\n' 'echo "stdout here"\n' 'while ! [ -e ~/tej2/job1done ]; do\n' ' sleep 1\n' 'done\n' 'echo "job output" > job1results\n') with jobdir.mkdir('dir1').open('wb', 'data1') as fp: fp.write(b'data1\n') with jobdir.mkdir('dir2').mkdir('dir3').open('w', 'data2') as fp: fp.write('data2\n') job_id = check_output(['tej', 'submit', destination, jobdir.path]) job_id = job_id.rstrip().decode('ascii') finally: jobdir.rmtree() logging.info("Check status of running job") output = check_output(['tej', 'status', destination, '--id', job_id]) assert output == b'running\n' logging.info("Finish job") Path('~/tej2/job1done').expand_user().open('w').close() time.sleep(2) logging.info("Check status of finished job") output = check_output(['tej', 'status', destination, '--id', job_id]) assert output == b'finished 0\n' logging.info("Download job results") destdir = Path.tempdir(prefix='tej-tests-') try: check_call(['tej', 'download', destination, '--id', job_id, 'job1results', '_stdout'], cwd=destdir.path) with destdir.open('r', 'job1results') as fp: assert fp.read() == 'job output\n' with destdir.open('r', '_stdout') as fp: assert fp.read() == 'stdout here\n' finally: destdir.rmtree() logging.info("List jobs") output = check_output(['tej', 'list', destination]) assert output == ('%s finished\n' % job_id).encode('ascii') logging.info("Remove finished job") check_call(['tej', 'delete', destination, '--id', job_id]) output = check_output(['tej', 'status', destination, '--id', job_id]) assert output == b'not found\n'
def functional_tests(): destination = os.environ['TEJ_DESTINATION'] logging.info("Using TEJ_DESTINATION %s" % destination) if 'COVER' in os.environ: tej = os.environ['COVER'].split(' ') + [ bytes(Path.cwd() / 'tej/__main__.py'), '-v', '-v'] else: tej = ['tej', '-v', '-v'] for path in ('~/.tej', '~/tej 2'): path = Path(path).expand_user() try: path.remove() except OSError: path.rmtree(ignore_errors=True) Path('~/tej 2').expand_user().mkdir() logging.info("Creating default queue") check_call(tej + ['setup', destination]) assert Path('~/.tej').expand_user().is_dir() RemoteQueue(destination, '~/.tej').cleanup() assert not Path('~/.tej').expand_user().exists() logging.info("Creating a queue with a link") check_call(tej + ['setup', destination, '--queue', 'tej 2/queue', '--make-link', 'tej 2/link']) assert Path('~/tej 2/queue').expand_user().is_dir() with Path('~/tej 2/link').expand_user().open('r') as fp: assert fp.read() == ('tejdir: %s\n' % Path('~/tej 2/queue').expand_user()) logging.info("Adding links") check_call(tej + ['setup', destination, '--only-links', '--queue', '~/tej 2/queue', '--make-link', '~/tej 2/link2']) with Path('~/tej 2/link2').expand_user().open('r') as fp: assert fp.read() == ('tejdir: %s\n' % Path('~/tej 2/queue').expand_user()) assert not Path('~/.tej').expand_user().exists() check_call(tej + ['setup', destination, '--only-links', '--queue', '~/tej 2/queue', '--make-default-link']) with Path('~/.tej').expand_user().open('r') as fp: assert fp.read() == ('tejdir: %s\n' % Path('~/tej 2/queue').expand_user()) logging.info("Calling status for non-existent job") output = check_output(tej + ['status', destination, '--id', 'nonexistent']) assert output == b'not found\n' logging.info("Submitting a job") jobdir = Path.tempdir(prefix='tej-tests-') try: with jobdir.open('w', 'start.sh', newline='\n') as fp: fp.write('#!/bin/sh\n' '[ -f dir1/data1 ] || exit 1\n' '[ "$(cat dir2/dir3/data2)" = data2 ] || exit 2\n' 'echo "stdout here"\n' 'while ! [ -e ~/"tej 2/job1done" ]; do\n' ' sleep 1\n' 'done\n' 'echo "job output" > job1results\n') with jobdir.mkdir('dir1').open('wb', 'data1') as fp: fp.write(b'data1\n') with jobdir.mkdir('dir2').mkdir('dir3').open('w', 'data2') as fp: fp.write('data2\n') job_id = check_output(tej + ['submit', destination, jobdir.path]) job_id = job_id.rstrip().decode('ascii') finally: jobdir.rmtree() logging.info("Check status while forgetting job id") assert call(tej + ['status', destination]) != 0 logging.info("Check status of running job") output = check_output(tej + ['status', destination, '--id', job_id]) assert output == b'running\n' logging.info("Finish job") Path('~/tej 2/job1done').expand_user().open('w').close() time.sleep(2) logging.info("Check status of finished job") output = check_output(tej + ['status', destination, '--id', job_id]) assert output == b'finished 0\n' logging.info("Download job results") destdir = Path.tempdir(prefix='tej-tests-') try: check_call(tej + ['download', destination, '--id', job_id, 'job1results', '_stdout'], cwd=destdir.path) with destdir.open('r', 'job1results') as fp: assert fp.read() == 'job output\n' with destdir.open('r', '_stdout') as fp: assert fp.read() == 'stdout here\n' finally: destdir.rmtree() logging.info("List jobs") output = check_output(tej + ['list', destination]) assert output == ('%s finished\n' % job_id).encode('ascii') logging.info("Kill already finished job") output = check_output(tej + ['kill', destination, '--id', job_id]) assert output == b'' logging.info("Remove finished job") check_call(tej + ['delete', destination, '--id', job_id]) output = check_output(tej + ['status', destination, '--id', job_id]) assert output == b'not found\n' logging.info("Submit another job") jobdir = Path.tempdir(prefix='tej-tests-') try: with jobdir.open('w', 'start.sh', newline='\n') as fp: fp.write('#!/bin/sh\n' 'sleep 20\n') job_id = make_unique_name() check_call(tej + ['submit', destination, '--id', job_id, jobdir.path]) finally: jobdir.rmtree() output = check_output(tej + ['status', destination, '--id', job_id]) assert output == b'running\n' logging.info("Remove still running job") assert call(tej + ['delete', destination, '--id', job_id]) != 0 logging.info("Kill running job") output = check_output(tej + ['kill', destination, '--id', job_id]) output = check_output(tej + ['status', destination, '--id', job_id]) assert re.match(b'finished [0-9]+\n', output) logging.info("Remove killed job") check_call(tej + ['delete', destination, '--id', job_id]) output = check_output(tej + ['status', destination, '--id', job_id]) assert output == b'not found\n' jobdir = Path.tempdir(prefix='tej-tests-') try: logging.info("Start remote command job") job_id = check_output(tej + ['submit', destination, '--script', 'echo "hi"', jobdir.path]) job_id = job_id.rstrip().decode('ascii') finally: jobdir.rmtree() time.sleep(2) logging.info("Download remote command job output") destdir = Path.tempdir(prefix='tej-tests-') try: check_call(tej + ['download', destination, '--id', job_id, '_stdout'], cwd=destdir.path) with destdir.open('r', '_stdout') as fp: assert fp.read() == 'hi\n' finally: destdir.rmtree() logging.info("Remove finished job") output = check_output(tej + ['delete', destination, '--id', job_id]) assert output == b'' output = check_output(tej + ['status', destination, '--id', job_id]) assert output == b'not found\n' RemoteQueue(destination, 'tej 2/link').cleanup() assert not Path('~/tej 2/link').expand_user().exists() assert not Path('~/tej 2/queue').expand_user().exists()
def docker_setup_create(args): """Sets up the experiment to be run in a Docker-built container. """ pack = Path(args.pack[0]) target = Path(args.target[0]) if target.exists(): logging.critical("Target directory exists") sys.exit(1) signals.pre_setup(target=target, pack=pack) target.mkdir() try: # Unpacks configuration file rpz_pack = RPZPack(pack) rpz_pack.extract_config(target / 'config.yml') # Loads config runs, packages, other_files = config = load_config( target / 'config.yml', True) if args.base_image: record_usage(docker_explicit_base=True) base_image = args.base_image[0] if args.distribution: target_distribution = args.distribution[0] else: target_distribution = None else: target_distribution, base_image = select_image(runs) logging.info("Using base image %s", base_image) logging.debug("Distribution: %s", target_distribution or "unknown") rpz_pack.copy_data_tar(target / 'data.tgz') arch = runs[0]['architecture'] # Writes Dockerfile logging.info("Writing %s...", target / 'Dockerfile') with (target / 'Dockerfile').open('w', encoding='utf-8', newline='\n') as fp: fp.write('FROM %s\n\n' % base_image) # Installs busybox download_file(busybox_url(arch), target / 'busybox', 'busybox-%s' % arch) fp.write('COPY busybox /busybox\n') # Installs rpzsudo download_file(sudo_url(arch), target / 'rpzsudo', 'rpzsudo-%s' % arch) fp.write('COPY rpzsudo /rpzsudo\n\n') fp.write('COPY data.tgz /reprozip_data.tgz\n\n') fp.write('COPY rpz-files.list /rpz-files.list\n') fp.write('RUN \\\n' ' chmod +x /busybox /rpzsudo && \\\n') if args.install_pkgs: # Install every package through package manager missing_packages = [] else: # Only install packages that were not packed missing_packages = [pkg for pkg in packages if pkg.packfiles] packages = [pkg for pkg in packages if not pkg.packfiles] if packages: record_usage(docker_install_pkgs=True) try: installer = select_installer(pack, runs, target_distribution) except CantFindInstaller as e: logging.error("Need to install %d packages but couldn't " "select a package installer: %s", len(packages), e) sys.exit(1) # Updates package sources update_script = installer.update_script() if update_script: fp.write(' %s && \\\n' % update_script) # Installs necessary packages fp.write(' %s && \\\n' % installer.install_script(packages)) logging.info("Dockerfile will install the %d software " "packages that were not packed", len(packages)) else: record_usage(docker_install_pkgs=False) # Untar paths = set() pathlist = [] # Add intermediate directories, and check for existence in the tar missing_files = chain.from_iterable(pkg.files for pkg in missing_packages) data_files = rpz_pack.data_filenames() listoffiles = list(chain(other_files, missing_files)) for f in listoffiles: path = PosixPath('/') for c in rpz_pack.remove_data_prefix(f.path).components: path = path / c if path in paths: continue paths.add(path) if path in data_files: pathlist.append(path) else: logging.info("Missing file %s", path) rpz_pack.close() # FIXME : for some reason we need reversed() here, I'm not sure why # Need to read more of tar's docs. # TAR bug: --no-overwrite-dir removes --keep-old-files with (target / 'rpz-files.list').open('wb') as lfp: for p in reversed(pathlist): lfp.write(join_root(rpz_pack.data_prefix, p).path) lfp.write(b'\0') fp.write(' cd / && ' '(tar zpxf /reprozip_data.tgz -U --recursive-unlink ' '--numeric-owner --strip=1 --null -T /rpz-files.list || ' '/busybox echo "TAR reports errors, this might or might ' 'not prevent the execution to run")\n') # Meta-data for reprounzip write_dict(target, metadata_initial_iofiles(config)) signals.post_setup(target=target, pack=pack) except Exception: target.rmtree(ignore_errors=True) raise
def trace(binary, argv, directory, append, verbosity='unset'): """Main function for the trace subcommand. """ if verbosity != 'unset': warnings.warn("The 'verbosity' parameter for trace() is deprecated. " "Please set a level on the 'reprozip' logger instead.", DeprecationWarning) if not isinstance(directory, Path): directory = Path(directory) if isinstance(binary, Path): binary = binary.path cwd = Path.cwd() if (any(cwd.lies_under(c) for c in magic_dirs + system_dirs) and not cwd.lies_under('/usr/local')): logger.warning( "You are running this experiment from a system directory! " "Autodetection of non-system files will probably not work as " "intended") # Trace directory if directory.exists(): if append is None: r = tty_prompt( "Trace directory %s exists\n" "(a)ppend run to the trace, (d)elete it or (s)top? [a/d/s] " % directory, 'aAdDsS') if r is None: logger.critical( "Trace directory %s exists\n" "Please use either --continue or --overwrite\n", directory) sys.exit(125) elif r in 'sS': sys.exit(125) elif r in 'dD': directory.rmtree() directory.mkdir() logger.warning( "You can use --overwrite to replace the existing trace " "(or --continue to append\nwithout prompt)") elif append is False: logger.info("Removing existing trace directory %s", directory) directory.rmtree() directory.mkdir(parents=True) else: if append is True: logger.warning("--continue was set but trace doesn't exist yet") directory.mkdir() # Runs the trace database = directory / 'trace.sqlite3' logger.info("Running program") # Might raise _pytracer.Error c = _pytracer.execute(binary, argv, database.path) if c != 0: if c & 0x0100: logger.warning("Program appears to have been terminated by " "signal %d", c & 0xFF) else: logger.warning("Program exited with non-zero code %d", c) logger.info("Program completed") return c
def functional_tests(): destination = os.environ['TEJ_DESTINATION'] logging.info("Using TEJ_DESTINATION %s" % destination) if 'COVER' in os.environ: tej = os.environ['COVER'].split(' ') + [ bytes(Path.cwd() / 'tej/__main__.py'), '-v', '-v' ] else: tej = ['tej', '-v', '-v'] for path in ('~/.tej', '~/tej 2'): path = Path(path).expand_user() try: path.remove() except OSError: path.rmtree(ignore_errors=True) Path('~/tej 2').expand_user().mkdir() logging.info("Creating default queue") check_call(tej + ['setup', destination]) assert Path('~/.tej').expand_user().is_dir() RemoteQueue(destination, '~/.tej').cleanup() assert not Path('~/.tej').expand_user().exists() logging.info("Creating a queue with a link") check_call(tej + [ 'setup', destination, '--queue', 'tej 2/queue', '--make-link', 'tej 2/link' ]) assert Path('~/tej 2/queue').expand_user().is_dir() with Path('~/tej 2/link').expand_user().open('r') as fp: assert fp.read() == ('tejdir: %s\n' % Path('~/tej 2/queue').expand_user()) logging.info("Adding links") check_call(tej + [ 'setup', destination, '--only-links', '--queue', '~/tej 2/queue', '--make-link', '~/tej 2/link2' ]) with Path('~/tej 2/link2').expand_user().open('r') as fp: assert fp.read() == ('tejdir: %s\n' % Path('~/tej 2/queue').expand_user()) assert not Path('~/.tej').expand_user().exists() check_call(tej + [ 'setup', destination, '--only-links', '--queue', '~/tej 2/queue', '--make-default-link' ]) with Path('~/.tej').expand_user().open('r') as fp: assert fp.read() == ('tejdir: %s\n' % Path('~/tej 2/queue').expand_user()) logging.info("Calling status for non-existent job") output = check_output(tej + ['status', destination, '--id', 'nonexistent']) assert output == b'not found\n' logging.info("Submitting a job") jobdir = Path.tempdir(prefix='tej-tests-') try: with jobdir.open('w', 'start.sh', newline='\n') as fp: fp.write('#!/bin/sh\n' '[ -f dir1/data1 ] || exit 1\n' '[ "$(cat dir2/dir3/data2)" = data2 ] || exit 2\n' 'echo "stdout here"\n' 'while ! [ -e ~/"tej 2/job1done" ]; do\n' ' sleep 1\n' 'done\n' 'echo "job output" > job1results\n') with jobdir.mkdir('dir1').open('wb', 'data1') as fp: fp.write(b'data1\n') with jobdir.mkdir('dir2').mkdir('dir3').open('w', 'data2') as fp: fp.write('data2\n') job_id = check_output(tej + ['submit', destination, jobdir.path]) job_id = job_id.rstrip().decode('ascii') finally: jobdir.rmtree() logging.info("Check status while forgetting job id") assert call(tej + ['status', destination]) != 0 logging.info("Check status of running job") output = check_output(tej + ['status', destination, '--id', job_id]) assert output == b'running\n' logging.info("Finish job") Path('~/tej 2/job1done').expand_user().open('w').close() time.sleep(2) logging.info("Check status of finished job") output = check_output(tej + ['status', destination, '--id', job_id]) assert output == b'finished 0\n' logging.info("Download job results") destdir = Path.tempdir(prefix='tej-tests-') try: check_call(tej + [ 'download', destination, '--id', job_id, 'job1results', '_stdout' ], cwd=destdir.path) with destdir.open('r', 'job1results') as fp: assert fp.read() == 'job output\n' with destdir.open('r', '_stdout') as fp: assert fp.read() == 'stdout here\n' finally: destdir.rmtree() logging.info("List jobs") output = check_output(tej + ['list', destination]) assert output == ('%s finished\n' % job_id).encode('ascii') logging.info("Kill already finished job") output = check_output(tej + ['kill', destination, '--id', job_id]) assert output == b'' logging.info("Remove finished job") check_call(tej + ['delete', destination, '--id', job_id]) output = check_output(tej + ['status', destination, '--id', job_id]) assert output == b'not found\n' logging.info("Submit another job") jobdir = Path.tempdir(prefix='tej-tests-') try: with jobdir.open('w', 'start.sh', newline='\n') as fp: fp.write('#!/bin/sh\n' 'sleep 20\n') job_id = make_unique_name() check_call(tej + ['submit', destination, '--id', job_id, jobdir.path]) finally: jobdir.rmtree() output = check_output(tej + ['status', destination, '--id', job_id]) assert output == b'running\n' logging.info("Remove still running job") assert call(tej + ['delete', destination, '--id', job_id]) != 0 logging.info("Kill running job") output = check_output(tej + ['kill', destination, '--id', job_id]) output = check_output(tej + ['status', destination, '--id', job_id]) assert re.match(b'finished [0-9]+\n', output) logging.info("Remove killed job") check_call(tej + ['delete', destination, '--id', job_id]) output = check_output(tej + ['status', destination, '--id', job_id]) assert output == b'not found\n' jobdir = Path.tempdir(prefix='tej-tests-') try: logging.info("Start remote command job") job_id = check_output( tej + ['submit', destination, '--script', 'echo "hi"', jobdir.path]) job_id = job_id.rstrip().decode('ascii') finally: jobdir.rmtree() time.sleep(2) logging.info("Download remote command job output") destdir = Path.tempdir(prefix='tej-tests-') try: check_call(tej + ['download', destination, '--id', job_id, '_stdout'], cwd=destdir.path) with destdir.open('r', '_stdout') as fp: assert fp.read() == 'hi\n' finally: destdir.rmtree() logging.info("Remove finished job") output = check_output(tej + ['delete', destination, '--id', job_id]) assert output == b'' output = check_output(tej + ['status', destination, '--id', job_id]) assert output == b'not found\n' RemoteQueue(destination, 'tej 2/link').cleanup() assert not Path('~/tej 2/link').expand_user().exists() assert not Path('~/tej 2/queue').expand_user().exists()