def download_to_file(self, remote_filename, local_filename): 'Download filename to local_filename.' remote_filename = vfs_path_util.normalize(remote_filename) p = self._make_local_file_path(remote_filename) if not path.exists(p): raise vfs_error('file not found: {}'.format(remote_filename)) if not path.isfile(p): raise vfs_error('not a file: {}'.format(remote_filename)) file_util.copy(p, local_filename)
def _tmp_example_dmg(self, filename): tmp_dir = temp_file.make_temp_dir(delete=not self.DEBUG) tmp_example = path.join(tmp_dir, filename) if self.DEBUG: print('tmp_example: %s' % (tmp_example)) file_util.copy(self.data_path(filename), tmp_example, use_hard_link=True) return tmp_example
def build(self, script_filename, output_filename): file_check.check_file(script_filename) check.check_string(output_filename) script_filename_abs = path.abspath(script_filename) output_filename_abs = path.abspath(output_filename) result = pyinstaller_build.build(script_filename_abs) file_util.copy(result.output_exe, output_filename_abs) return 0
def _apply_operation_copy(self, try_git): file_util.copy(self.src, self.dst) file_util.copy_mode(self.src, self.dst) if try_git: root_dir = git.find_root_dir(start_dir=path.dirname(self.dst)) should_ignore = git.check_ignore(root_dir, self.src) if not should_ignore: try: git.add(root_dir, [self.dst]) except git_error as ex: print(f'caught: {ex}')
def test_get_mime_type_cached(self): tmp_file = self.make_temp_file(suffix='.png') file_util.copy(self.png_file, tmp_file) self.assertEqual( 'image/png', file_attributes_metadata.get_mime_type(tmp_file, cached=True)) file_util.copy(self.jpg_file, tmp_file) if host.is_linux(): file_util.set_modification_date(tmp_file, datetime.now()) self.assertEqual( 'image/jpeg', file_attributes_metadata.get_mime_type(tmp_file, cached=True))
def download_to_file(clazz, url, filename, chunk_size=None, cookies=None, auth=None): 'Download url to filename.' tmp = clazz.download_to_temp_file(url, chunk_size=chunk_size, cookies=cookies, auth=auth) file_util.copy(tmp, filename) file_util.remove(tmp)
def _save(self): if path.exists(self.filename): old_checksum = file_util.checksum('sha256', self.filename) else: old_checksum = None tmp_file = temp_file.make_temp_file() self._properties.save(tmp_file, self._formatter) new_checksum = file_util.checksum('sha256', tmp_file) if old_checksum == new_checksum: return False if self._backup and not file_util.is_empty(self.filename): file_util.backup(self.filename) file_util.copy(tmp_file, self.filename) return True
def test_get_mime_type_change(self): tmp_file = self.make_temp_file(suffix='.png') file_util.copy(self.png_file, tmp_file) self.assertEqual('image/png', file_attributes_metadata.get_mime_type(tmp_file)) with open(tmp_file, 'wb') as to_file: with open(self.jpg_file, 'rb') as from_file: to_file.write(from_file.read()) to_file.flush() # for some reason on some linuxes the modification date does not change # when we clobber the png file with jpg content if host.is_linux(): file_util.set_modification_date(tmp_file, datetime.now()) self.assertEqual('image/jpeg', file_attributes_metadata.get_mime_type(tmp_file))
def upload_file(self, local_filename, remote_filename): 'Upload local_filename to remote_filename.' remote_filename = vfs_path_util.normalize(remote_filename) p = self._make_local_file_path(remote_filename) if path.isdir(p): raise vfs_error( 'filename exists and is a dir: {}'.format(remote_filename)) if path.exists(p) and not path.isfile(p): raise vfs_error('filename exists and is not a file: {}'.format( remote_filename)) if not path.exists(local_filename): raise vfs_error( 'local_filename not found: {}'.format(local_filename)) file_util.copy(local_filename, p) file_util.sync()
def _copy_context_file(clazz, filename, src_dir, dst_dir, substitutions=None): substitutions = substitutions or {} src_file = path.join(src_dir, filename) dst_file = clazz._make_dst_file(dst_dir, filename) if not path.isfile(src_file): raise docker_error('context file not found: "{}"'.format(src_file)) if substitutions: file_replace.copy_with_substitute(src_file, dst_file, substitutions, backup=False) else: file_util.copy(src_file, dst_file)
def xtest_find_duplicates_no_write_permission(self): if host.is_linux(): shell = 'dash' else: shell = 'sh' sh_exe = which.which(shell) bin_dir = path.dirname(sh_exe) tmp_dir = self.make_temp_dir() sh_exe_dup = path.join(tmp_dir, 'dupsh.exe') file_util.copy(sh_exe, sh_exe_dup) result = self._test( [], [], extra_dirs_before=[ _file_duplicate_tester_object._extra_dir(bin_dir, '${_bin}'), _file_duplicate_tester_object._extra_dir(tmp_dir, '${_tmp}'), ]) self.assertTrue( file_duplicates._dup_item('${{_bin}}/{}'.format(shell), ['${_tmp}/dupsh.exe']) in result)
def test_register_getter(self): class _test_getter_file_size(file_metadata_getter_base): @classmethod #@abstractmethod def name(clazz): return 'my_file_size' #@abstractmethod def get_value(self, manager, filename): value = 'kiwi:' + path.basename(filename) return value.encode('utf-8') #@abstractmethod def decode_value(self, value): return value.decode('utf-8') file_attributes_metadata.register_getter(_test_getter_file_size) tmp_file = self.make_temp_file(suffix='.png') file_util.copy(self.png_file, tmp_file) self.assertEqual( 'kiwi:' + path.basename(tmp_file), file_attributes_metadata.get_value(tmp_file, 'my_file_size', fallback=False))
def test_find_archives(self): tmp_dir = temp_file.make_temp_dir() tmp_zip = temp_archive.make_temp_archive([ temp_archive.item('foo.txt', content = 'foo.txt\n') ], archive_extension.ZIP) tmp_tar = temp_archive.make_temp_archive([ temp_archive.item('foo.txt', content = 'foo.txt\n') ], archive_extension.TAR) tmp_tgz = temp_archive.make_temp_archive([ temp_archive.item('foo.txt', content = 'foo.txt\n') ], archive_extension.TGZ) tmp_tar_gz = temp_archive.make_temp_archive([ temp_archive.item('foo.txt', content = 'foo.txt\n') ], archive_extension.TAR_GZ) file_util.copy(tmp_zip, path.join(tmp_dir, 'archives/zip/tmp_zip.zip')) file_util.copy(tmp_tar, path.join(tmp_dir, 'archives/tar/tmp_tar.tar')) file_util.copy(tmp_tgz, path.join(tmp_dir, 'archives/tgz/tmp_tgz.tgz')) file_util.copy(tmp_tar_gz, path.join(tmp_dir, 'archives/tar_gz/tmp_tar_gz.tar.gz')) file_util.save(path.join(tmp_dir, 'archives/zip/fake_zip.zip'), content = 'not a zip\n') file_util.save(path.join(tmp_dir, 'archives/tar/fake_tar.tar'), content = 'not a tar\n') file_util.save(path.join(tmp_dir, 'archives/tar_gz/fake_tar_gz.tar.gz'), content = 'not a tar.gz\n') self.assertEqual( self.native_filename_list([ 'archives/tar/tmp_tar.tar', 'archives/tar_gz/tmp_tar_gz.tar.gz', 'archives/tgz/tmp_tgz.tgz', 'archives/zip/tmp_zip.zip', ]), archiver.find_archives(tmp_dir) )
def run(clazz, image_id, command, run_files=None, run_files_substitutions=None, run_label=None, volumes=None, env=None, name=None, restart=False, detach=False, tty=False, interactive=False, expose=None, debug=False, non_blocking=True, remove=True, tmp_dir=None): check.check_string(image_id) check.check_string(command) check.check_string_seq(run_files, allow_none=True) check.check_dict(run_files_substitutions, check.STRING_TYPES, check.STRING_TYPES, allow_none=True) check.check_string(run_label, allow_none=True) check.check_string(name, allow_none=True) check.check_dict(env, check.STRING_TYPES, check.STRING_TYPES, allow_none=True) check.check_dict(volumes, check.STRING_TYPES, check.STRING_TYPES, allow_none=True) check.check_bool(restart) check.check_bool(detach) check.check_bool(tty) check.check_bool(interactive) check.check_int(expose, allow_none=True) check.check_bool(debug) check.check_bool(non_blocking) check.check_bool(remove) check.check_string(tmp_dir, allow_none=True) cli = command_line.parse_args(command) env = env or {} volumes = volumes or {} command_args = cli[1:] run_files = run_files or [] run_files_substitutions = run_files_substitutions or {} run_label = run_label or 'docker.run' tmp_dir = tmp_dir or os.getcwd() tmp_run_dir = temp_file.make_temp_dir(suffix='-{}'.format(run_label), dir=tmp_dir, delete=not debug) input_dir = path.join(tmp_run_dir, 'input') output_dir = path.join(tmp_run_dir, 'output') for cf in run_files: src_file = path.join(os.getcwd(), cf) dst_file = path.join(input_dir, cf) if not path.isfile(src_file): raise docker_error('run file not found: "{}"'.format(src_file)) if run_files_substitutions and file_mime.is_text(src_file): file_replace.copy_with_substitute(src_file, dst_file, run_files_substitutions, backup=False) else: file_util.copy(src_file, dst_file) docker_run_args = ['run'] if detach: docker_run_args.append('--detach') if tty: docker_run_args.append('--tty') if interactive: docker_run_args.append('--interactive') if remove: docker_run_args.append('--rm') docker_run_args.extend(clazz._make_env_args(env)) volumes[input_dir] = '/input' volumes[output_dir] = '/output' docker_run_args.extend(clazz._make_volume_args(volumes)) if expose: docker_run_args.extend(['--expose', str(expose)]) if restart: docker_run_args.extend(['--restart', restart]) if name: docker_run_args.extend(['--name', name]) docker_run_args.append(image_id) docker_run_args.append(cli[0]) docker_run_args.extend(cli[1:]) clazz.log.log_d('running docker: {}'.format(' '.join(docker_run_args))) rv = docker_exe.call_docker(docker_run_args, non_blocking=non_blocking) container_id = docker_container.last_container() return clazz._run_result(container_id, rv.exit_code, rv.stdout, input_dir, output_dir)
def execute(self, temp_dir): 'Execute this operation in a temp_dir of the unpacked archive.' file_util.copy(self._replacement, path.join(temp_dir, self._arcname))
def main(): DEBUG = os.environ.get('DEBUG', False) import bes vcli = version_cli(bes) parser = argparse.ArgumentParser() parser.add_argument('files', action='store', nargs='*', help='Files or directories to rename') vcli.version_add_arguments(parser) parser.add_argument('--dry-run', '-n', action='store_true', default=False, help='Only print what files will get tests [ False ]') parser.add_argument( '--timing', '-t', action='store_true', default=False, help='Show the amount of time it takes to run tests [ False ]') parser.add_argument('--verbose', '-v', action='store_true', default=False, help='Verbose debug output [ False ]') parser.add_argument('--stop', '-s', action='store_true', default=False, help='Stop right after the first failure. [ False ]') parser.add_argument( '--randomize', action='store_true', default=False, help='Randomize the order in which unit tests run. [ False ]') parser.add_argument( '--python', action='append', default=[], help= 'Python executable) to use. Multiple flags can be used for running with mutiple times with different python versions [ python ]' ) parser.add_argument('--page', '-p', action='store_true', default=False, help='Page output with $PAGER [ False ]') parser.add_argument( '--profile', action='store', default=None, help= 'Profile the code with cProfile and store the output in the given argument [ None ]' ) parser.add_argument( '--coverage', action='store', default=None, help= 'Run coverage on the code and store the output in the given argument [ None ]' ) parser.add_argument('--pager', action='store', default=os.environ.get('PAGER', 'more'), help='Pager to use when paging [ %s ]' % (os.environ.get('PAGER', 'more'))) parser.add_argument('--iterations', '-i', action='store', default=1, type=int, help='Python executable to use [ python ]') parser.add_argument( '--git', '-g', action='store_true', default=False, help='Use git status to figure out what has changed to test [ False ]') parser.add_argument( '--commit', '-c', action='store', type=str, default=None, help='Test only the files affected by the given git commit [ None ]') parser.add_argument('--pre-commit', action='store_true', default=False, help='Run pre commit checks [ False ]') parser.add_argument('--print-tests', action='store_true', default=False, help='Print the list of unit tests [ False ]') parser.add_argument('--print-python', action='store_true', default=False, help='Print the detected python executable [ False ]') parser.add_argument('--print-files', action='store_true', default=False, help='Print the list of unit files [ False ]') parser.add_argument( '--egg', action='store_true', default=False, help= 'Make an egg of the package and run the tests against that instead the live files. [ False ]' ) parser.add_argument( '--save-egg', action='store_true', default=False, help='Save the egg in the current directory. [ False ]') parser.add_argument('--ignore', action='append', default=[], help='Patterns of filenames to ignore []') parser.add_argument( '--root-dir', action='store', default=None, help= 'The root directory for all your projets. By default its computed from your git struture. [ None ]' ) parser.add_argument('--dont-hack-env', action='store_true', default=False, help='Dont hack PATH and PYTHONPATH. [ False ]') parser.add_argument( '--compile-only', action='store_true', default=False, help='Just compile the files to verify syntax [ False ]') parser.add_argument( '--print-deps', action='store_true', default=False, help='Print python dependencies for test files [ False ]') parser.add_argument('--print-configs', action='store_true', default=False, help='Print testing configs found [ False ]') parser.add_argument('--print-root-dir', action='store_true', default=False, help='Print the root dir [ False ]') parser.add_argument('--print-path', action='store_true', default=False, help='Print sys.path [ False ]') parser.add_argument( '--file-ignore-file', action='append', default=[], help= 'List of file ignore files. [ .bes_test_ignore .bes_test_internal_ignore ]' ) parser.add_argument('--env', action='append', default=[], help='Environment variables to set [ None ]') parser.add_argument('--no-env-deps', action='store_true', default=False, help='Dont use env deps. [ False ]') parser.add_argument( '--temp-dir', action='store', default=None, help= 'The directory to use for tmp files overriding the system default. [ None ]' ) parser.add_argument( '--keep-side-effects', action='store_true', default=DEBUG, help='Dont delete side effects - for debugging. [ False ]') parser.add_argument( '--ignore-side-effects', action='store_true', default=DEBUG, help='Dont delete side effects - for debugging. [ False ]') found_git_exe = git_exe.find_git_exe() if not found_git_exe: printer.writeln_name( 'ERROR: No git found. Git is needed to run bes_test.') return 1 for g in parser._action_groups: g._group_actions.sort(key=lambda x: x.dest) args = parser.parse_args() args.python = _resolve_python_exe_list(args.python) if not args.python: python_exe = python.find_python_exe() if python_exe: args.python = [python_exe] if not args.python: printer.writeln_name( 'ERROR: No python found. Python is needed to run bes_test.') return 1 _LOG.log_d('using python={}'.format(args.python)) if args.git and args.commit: printer.writeln_name( 'ERROR: Only one of --git or --commit can be given.') return 1 if args.temp_dir: file_util.mkdir(args.temp_dir) tempfile.tempdir = args.temp_dir if DEBUG: args.verbose = True cwd = os.getcwd() if args.version: vcli.version_print_version() return 0 args.env = _parse_args_env(args.env) if not args.files: args.files = [cwd] if not args.file_ignore_file: args.file_ignore_file = [ '.bes_test_ignore', '.bes_test_internal_ignore' ] if args.commit: if args.commit in ['HEAD', 'last']: args.commit = git.last_commit_hash('.') ar = argument_resolver(cwd, args.files, root_dir=args.root_dir, file_ignore_filename=args.file_ignore_file, check_git=args.git, git_commit=args.commit, use_env_deps=not args.no_env_deps) ar.num_iterations = args.iterations ar.randomize = args.randomize ar.ignore_with_patterns(args.ignore) if args.compile_only: total_files = len(ar.all_files) for i, f in enumerate(ar.all_files): tmp = temp_file.make_temp_file() filename_count_blurb = ' ' + _make_count_blurb(i + 1, total_files) short_filename = file_util.remove_head(f, cwd) blurb = '%7s:%s %s ' % ('compile', filename_count_blurb, short_filename) printer.writeln_name(blurb) py_compile.compile(f, cfile=tmp, doraise=True) return 0 if not ar.test_descriptions: return 1 if args.print_python: for python_exe in args.python: print(python_exe) return 0 if args.print_path: for p in sys.path: print(p) return 0 if args.print_configs: ar.print_configs() return 0 if args.print_root_dir: print(ar.root_dir) return 0 if args.print_files: ar.print_files() return 0 if args.print_tests: ar.print_tests() return 0 if args.print_deps or args.pre_commit and not ar.supports_test_dependency_files( ): printer.writeln_name( 'ERROR: Cannot figure out dependencies. snakefood missing.') return 1 if args.print_deps: dep_files = ar.test_dependency_files() for filename in sorted(dep_files.keys()): print(filename) for dep_file in dep_files[filename]: print(' %s' % (dep_file.filename)) return 0 # Read ~/.bes_test/bes_test.config (or use a default config) bes_test_config = _read_config_file() keep_patterns = bes_test_config.get_value_string_list( 'environment', 'keep_patterns') # Start with a clean environment so unit testing can be deterministic and not subject # to whatever the user happened to have exported. PYTHONPATH and PATH for dependencies # are set below by iterating the configs keep_keys = bes_test_config.get_value_string_list('environment', 'keep_keys') if args.dont_hack_env: keep_keys.extend(['PATH', 'PYTHONPATH']) keep_keys.extend(['TMPDIR', 'TEMP', 'TMP']) env = os_env.make_clean_env( keep_keys=keep_keys, keep_func=lambda key: _env_var_should_keep(key, keep_patterns)) env_var(env, 'PATH').prepend(path.dirname(found_git_exe)) for python_exe in args.python: env_var(env, 'PATH').prepend(path.dirname(python_exe)) env['PYTHONDONTWRITEBYTECODE'] = 'x' variables = { 'rebuild_dir': path.expanduser('~/.rebuild'), 'system': host.SYSTEM, } if not args.dont_hack_env: for var in ar.env_dependencies_variables(): ov = os_env_var(var) if ov.is_set: value = ov.value else: value = '' variables[var] = value ar.update_environment(env, variables) # Update env with whatever was given in --env env.update(args.env) # Use a custom TMP dir so that we can catch temporary side effects and flag them tmp_tmp = temp_file.make_temp_dir(prefix='bes_test_', suffix='.tmp.tmp.dir', delete=False) env.update({ 'TMPDIR': tmp_tmp, 'TEMP': tmp_tmp, 'TMP': tmp_tmp, }) side_effects = {} num_passed = 0 num_failed = 0 num_executed = 0 num_tests = len(ar.test_descriptions) failed_tests = [] # Remove current dir from sys.path to avoid side effects if cwd in sys.path: sys.path.remove(cwd) if args.egg: pythonpath = env_var(env, 'PYTHONPATH') pythonpath.remove(cwd) for config in ar.env_dependencies_configs: setup_dot_py = path.join(config.root_dir, 'setup.py') if not path.isfile(setup_dot_py): raise RuntimeError('No setup.py found in %s to make the egg.' % (cwd)) egg_zip = egg.make(config.root_dir, 'master', setup_dot_py, untracked=False) pythonpath.prepend(egg_zip) printer.writeln_name('using tmp egg: %s' % (egg_zip)) if args.save_egg: file_util.copy(egg_zip, path.join(cwd, path.basename(egg_zip))) if args.pre_commit: missing_from_git = [] for filename, dep_files in ar.test_dependency_files().items(): for dep_file in dep_files: if dep_file.config and not dep_file.git_tracked: missing_from_git.append(dep_file.filename) if missing_from_git: for f in missing_from_git: printer.writeln_name('PRE_COMMIT: missing from git: %s' % (path.relpath(f))) return 1 return 0 ar.cleanup_python_compiled_files() # Do all our work with a temporary working directory to be able to check for side effects tmp_cwd = temp_file.make_temp_dir(prefix='bes_test_', suffix='.tmp.cwd.dir', delete=False) tmp_home = temp_file.make_temp_dir(prefix='bes_test_', suffix='.tmp.home.dir', delete=False) os.environ['HOME'] = tmp_home os.chdir(tmp_cwd) # Use what the OS thinks the path is (to deal with symlinks and virtual tmpfs things) tmp_cwd = os.getcwd() if not args.dry_run and args.page: printer.OUTPUT = tempfile.NamedTemporaryFile(prefix='bes_test', delete=True, mode='w') total_tests = _count_tests(ar.inspect_map, ar.test_descriptions) total_files = len(ar.test_descriptions) total_num_tests = 0 if args.profile: args.profile = path.abspath(args.profile) if not _check_program('cprofilev'): return 1 if args.coverage: args.coverage = path.abspath(args.coverage) coverage_exe = _check_program('coverage') if not coverage_exe: return 1 args.python = [coverage_exe] if args.profile and args.coverage: printer.writeln_name( 'ERROR: --profile and --coverage are mutually exclusive.') return 1 options = test_options(args.dry_run, args.verbose, args.stop, args.timing, args.profile, args.coverage, args.python, args.temp_dir, tmp_home) timings = {} total_time_start = time.time() stopped = False for i, test_desc in enumerate(ar.test_descriptions): file_info = test_desc.file_info filename = file_info.filename if not filename in timings: timings[filename] = [] for python_exe in args.python: result = _test_execute(python_exe, ar.inspect_map, filename, test_desc.tests, options, i + 1, total_files, cwd, env) _collect_side_effects(side_effects, filename, tmp_home, 'home', args.keep_side_effects) _collect_side_effects(side_effects, filename, tmp_tmp, 'tmp', args.keep_side_effects) _collect_side_effects(side_effects, filename, os.getcwd(), 'cwd', args.keep_side_effects) timings[filename].append(result.elapsed_time) total_num_tests += result.num_tests_run num_executed += 1 if result.success: num_passed += 1 else: num_failed += 1 failed_tests.append((python_exe, filename, result)) if args.stop and not result.success: stopped = True if stopped: break total_elapsed_time = 1000 * (time.time() - total_time_start) if args.dry_run: return 0 num_skipped = num_tests - num_executed summary_parts = [] if total_num_tests == total_tests: function_summary = '(%d %s)' % (total_tests, _make_test_string(total_tests)) else: function_summary = '(%d of %d %s)' % (total_num_tests, total_tests, _make_test_string(total_tests)) if num_failed > 0: summary_parts.append('%d of %d fixtures FAILED' % (num_failed, num_tests)) summary_parts.append('%d of %d passed %s' % (num_passed, num_tests, function_summary)) if num_skipped > 0: summary_parts.append('%d of %d skipped' % (num_skipped, num_tests)) summary = '; '.join(summary_parts) printer.writeln_name('%s' % (summary)) if failed_tests: longest_python_exe = max( [len(path.basename(p)) for p in options.interpreters]) for python_exe, filename, result in failed_tests: if len(options.interpreters) > 1: python_exe_blurb = path.basename(python_exe).rjust( longest_python_exe) else: python_exe_blurb = '' error_status = unit_test_output.error_status(result.output) for error in error_status.errors: printer.writeln_name('%5s: %s %s :%s.%s' % (error.error_type, python_exe_blurb, file_util.remove_head(filename, cwd), error.fixture, error.function)) if num_failed > 0: rv = 1 else: rv = 0 if args.timing: filenames = sorted(timings.keys()) num_filenames = len(filenames) for i, filename in zip(range(0, num_filenames), filenames): short_filename = file_util.remove_head(filename, cwd) all_timings = timings[filename] num_timings = len(all_timings) avg_ms = _timing_average(all_timings) * 1000.0 if num_timings > 1: run_blurb = '(average of %d runs)' % (num_timings) else: run_blurb = '' if num_filenames > 1: count_blurb = '[%s of %s] ' % (i + 1, num_filenames) else: count_blurb = '' printer.writeln_name( 'timing: %s%s - %2.2f ms %s' % (count_blurb, short_filename, avg_ms, run_blurb)) if total_elapsed_time >= 1000.0: printer.writeln_name('total time: %2.2f s' % (total_elapsed_time / 1000.0)) else: printer.writeln_name('total time: %2.2f ms' % (total_elapsed_time)) if args.page: subprocess.call([args.pager, printer.OUTPUT.name]) current_cwd = os.getcwd() if current_cwd != tmp_cwd: rv = 1 printer.writeln_name( 'SIDE EFFECT: working directory was changed from %s to %s' % (tmp_cwd, current_cwd)) if not args.ignore_side_effects: for test, items in sorted(side_effects.items()): for item in items: rv = 1 filename = item.filename print('SIDE EFFECT [{}] {} {}'.format( item.label, test.replace(cwd + os.sep, ''), filename)) os.chdir('/tmp') if not args.keep_side_effects: file_util.remove(tmp_cwd) file_util.remove(tmp_home) file_util.remove(tmp_tmp) return rv