def test_move_files(self): tmp_dir = self.make_temp_dir() src_dir = path.join(tmp_dir, 'src') dst_dir = path.join(tmp_dir, 'dst') file_util.mkdir(dst_dir) temp_content.write_items([ 'file foo.txt "This is foo.txt\n" 644', 'file bar.txt "This is bar.txt\n" 644', 'file sub1/sub2/baz.txt "This is baz.txt\n" 644', 'file yyy/zzz/vvv.txt "This is vvv.txt\n" 644', 'file .hidden "this is .hidden\n" 644', 'file script.sh "#!/bin/bash\necho script.sh\nexit 0\n" 755', 'file .hushlogin "" 644', ], src_dir) expected = [ self.native_filename('.hidden'), self.native_filename('.hushlogin'), self.native_filename('bar.txt'), self.native_filename('foo.txt'), self.native_filename('script.sh'), self.native_filename('sub1/sub2/baz.txt'), self.native_filename('yyy/zzz/vvv.txt'), ] self.assertEqual(expected, file_find.find(src_dir, relative=True)) file_copy.move_files(src_dir, dst_dir) self.assertEqual(expected, file_find.find(dst_dir, relative=True)) self.assertEqual([], file_find.find(src_dir, relative=True))
def extract(clazz, dmg, dst_dir): file_check.check_file(dmg) file_util.mkdir(dst_dir) mnt = clazz._mount_at_temp_dir(dmg) #clazz._fix_extracted_dir_permissions(mnt.mount_point) file_copy.copy_tree(mnt.mount_point, dst_dir) clazz._eject(mnt.mount_point)
def call_pyinstaller(clazz, args, build_dir = None, replace_env = None): check.check_string_seq(args) check.check_string(build_dir, allow_none = True) check.check_dict(replace_env, check.STRING_TYPES, check.STRING_TYPES) cmd = command_line.parse_args(args) replace_env = replace_env or {} env = os_env.clone_current_env(d = {}) env.update(replace_env) clazz._log.log_d('using env: {}'.format(pprint.pformat(env))) clazz._log.log_d('calling pyinstaller: {}'.format(' '.join(cmd))) if build_dir: file_util.mkdir(build_dir) dist_dir = path.join(build_dir, 'dist') work_dir = path.join(build_dir, 'work') spec_dir = path.join(build_dir, 'spec') args = args[:] args.extend([ '--distpath', dist_dir ]) args.extend([ '--workpath', work_dir ]) args.extend([ '--specpath', spec_dir ]) try: with env_override(env = env) as _: PyInstaller_run(pyi_args = args, pyi_config = None) except Exception as ex: raise pyinstaller_error(str(ex))
def _move_files_test(self, extra_content_items=None, recursive=False, dry_run=False, dup_file_timestamp=None, dup_file_count=None): with dir_operation_tester( extra_content_items=extra_content_items) as test: file_util.mkdir(test.dst_dir) args = [ 'files', 'move', test.src_dir, test.dst_dir, ] if recursive: args.append('--recursive') if dry_run: args.append('--dry-run') if dup_file_timestamp: args.extend(['--dup-file-timestamp', dup_file_timestamp]) if dup_file_count: args.extend(['--dup-file-count', str(dup_file_count)]) test.result = self.run_program(self._program, args) return test
def rename_dirs(clazz, dirs, src_pattern, dst_pattern, options=None): check.check_string(src_pattern) check.check_string(dst_pattern) check.check_refactor_options(options, allow_none=True) options = options or refactor_options() clazz._log.log_method_d() resolved_empty_dirs = file_resolver.resolve_empty_dirs(dirs, recursive=True) # we need to figure out if there any empty directories that match the pattern # so we can manually rename them, since the _do_operation function only deal # with files. empty_dirs_operation_items, empty_dirs_affected_dirs = \ clazz._make_operation_items(refactor_operation_type.RENAME_DIRS, resolved_empty_dirs, src_pattern, dst_pattern, False, options.word_boundary, options.word_boundary_chars) result = clazz._do_operation(refactor_operation_type.RENAME_DIRS, dirs, src_pattern, dst_pattern, False, options) for item in empty_dirs_operation_items: file_util.mkdir(item.dst) assert dir_util.is_empty(item.src) dir_util.remove(item.src) for d in empty_dirs_affected_dirs: if path.exists(d) and dir_util.is_empty(d): dir_util.remove(d) return result
def archive(clazz, address, revision, base_name, output_filename, untracked = False, override_gitignore = None, debug = False): 'git archive with additional support to include untracked files for local repos.' tmp_repo_dir = temp_file.make_temp_dir(delete = not debug) if path.isdir(address): excludes = git_ignore.read_gitignore_file(address) file_copy.copy_tree(address, tmp_repo_dir, excludes = excludes) if override_gitignore: file_util.save(path.join(address, '.gitignore'), content = override_gitignore) if untracked: git_exe.call_git(tmp_repo_dir, [ 'add', '-A' ]) git_exe.call_git(tmp_repo_dir, [ 'commit', '-m', '"add untracked files just for tmp repo"' ]) else: if untracked: raise git_error('untracked can only be True for local repos.') clazz.clone(address, tmp_repo_dir) output_filename = path.abspath(output_filename) file_util.mkdir(path.dirname(output_filename)) args = [ 'archive', '--format=tgz', '--prefix=%s-%s/' % (base_name, revision), '-o', output_filename, revision ] rv = git_exe.call_git(tmp_repo_dir, args) return rv
def _determine_dest_dir(clazz, dest_dir, base_dir): if base_dir: dest_dir = path.join(dest_dir, base_dir) else: dest_dir = dest_dir file_util.mkdir(dest_dir) return dest_dir
def mkdir(self, remote_dir): 'Create a remote dir. Returns the fs specific directory id if appropiate or None' remote_dir = vfs_path_util.normalize(remote_dir) p = self._make_local_file_path(remote_dir) if path.exists(p): if not path.isdir(p): raise vfs_error('already a file: {}'.format(remote_dir)) return None file_util.mkdir(p)
def test_find_root_dir(self): tmp_repo = self._create_tmp_repo() self.assertEqual(tmp_repo, git.find_root_dir(start_dir=tmp_repo)) d = path.join(tmp_repo, 'foo', 'bar', 'baz') file_util.mkdir(d) self.assertEqual(tmp_repo, git.find_root_dir(start_dir=d)) self.assertEqual(None, git.find_root_dir(self.make_temp_dir()))
def extract_egg_test_files(clazz, egg, dst): with zipfile.ZipFile(file=egg, mode='r') as zf: members = clazz.filter_test_file_members(zf.infolist()) file_util.mkdir(dst) for member in members: zf.extract(member, path=dst) extracted_filename = path.join(dst, member.filename) ext = path.splitext(extracted_filename)[1] if ext in ['.py', '.sh']: os.chmod(extracted_filename, 0o755)
def _move_dir(clazz, from_dir, dest_dir): #print('FOO: from_dir: %s' % (from_dir)) #print('FOO: dest_dir: %s' % (dest_dir)) file_util.mkdir(dest_dir) # if file_util.same_device_id(from_dir, dest_dir): # print('FOO: calling shutil.move(%s, %s)' % (from_dir, dest_dir)) # assert False # shutil.move(from_dir, dest_dir) # return file_copy.copy_tree(from_dir, dest_dir) file_util.remove(from_dir)
def test_resolve(self): self.assertEqual( 'https://github.com/git/git.git', git_address_util.resolve('https://github.com/git/git.git')) self.assertEqual( '[email protected]/git/git.git', git_address_util.resolve('[email protected]/git/git.git')) tmp_repo = path.expanduser('~/minerepo') file_util.mkdir(tmp_repo) git.init(tmp_repo) self.assertEqual(tmp_repo, git_address_util.resolve('~/minerepo'))
def archive_to_dir(clazz, root, revision, output_dir): 'git archive to a dir.' file_util.mkdir(output_dir) tmp_archive = temp_file.make_temp_file(suffix = '.tar') args = [ 'archive', '--format=tar', '-o', tmp_archive, revision, ] git_exe.call_git(root, args) archiver.extract_all(tmp_archive, output_dir)
def make_dst_vmx_filename(clazz, src_vmx_filename, clone_name, where): check.check_string(src_vmx_filename) check.check_string(clone_name) check.check_string(where, allow_none=True) vms_root_dir = path.normpath( path.join(path.dirname(src_vmx_filename), path.pardir)) new_vm_root_dir_basename = '{}.vmwarevm'.format(clone_name) if not where: where = path.join(vms_root_dir, new_vm_root_dir_basename) file_util.mkdir(where) dst_vmx_basename = '{}.vmx'.format(clone_name) dst_vmx_filename = path.join(where, dst_vmx_basename) return dst_vmx_filename
def clone(clazz, address, root_dir, options = None): check.check_git_clone_options(options, allow_none = True) address = git_address_util.resolve(address) options = options or git_clone_options() clazz.log.log_d('clone: address={} root_dir={} options={}'.format(address, root_dir, options)) if path.exists(root_dir): if not path.isdir(root_dir): raise git_error('root_dir "{}" is not a directory.'.format(root_dir)) if options.enforce_empty_dir: if not dir_util.is_empty(root_dir): files = dir_util.list(root_dir, relative = True) sorted_files = sorted(files, key = lambda f: f.lower()) printed_files = '\n '.join(sorted_files).strip() raise git_error('root_dir "{}" is not empty:\n {}\n'.format(root_dir, printed_files)) else: file_util.mkdir(root_dir) args = [ 'clone' ] if options.depth: args.extend([ '--depth', str(options.depth) ]) if options.jobs: args.extend([ '--jobs', str(options.jobs) ]) if options.branch: args.extend([ '--branch', options.branch ]) if options.submodules_recursive: args.extend([ '--recursive' ]) if options.shallow_submodules: args.extend([ '--shallow-submodules' ]) args.append(address) args.append(root_dir) extra_env = git_lfs.lfs_make_env(options.lfs) clazz.log.log_d('clone: args="{}" extra_env={}'.format(' '.join(args), extra_env)) clone_rv = git_exe.call_git(os.getcwd(), args, extra_env = extra_env, num_tries = options.num_tries, retry_wait_seconds = options.retry_wait_seconds) clazz.log.log_d('clone: clone_rv="{}"'.format(str(clone_rv))) sub_rv = None if options.branch: git.checkout(root_dir, options.branch) if options.submodules or options.submodule_list: sub_rv = clazz._submodule_init(root_dir, options) return clone_rv, sub_rv
def make_cloned_vm_names(clazz, src_vmx_filename, clone_name, where): check.check_string(src_vmx_filename) check.check_string(clone_name) check.check_string(where) vms_root_dir = path.normpath( path.join(path.dirname(src_vmx_filename), path.pardir)) src_vmx_nickname = vmware_vmx_file(src_vmx_filename).nickname tmp_nickname_part = clazz._tmp_nickname_part() if not clone_name: clone_name = '{}_clone_{}'.format(src_vmx_nickname, tmp_nickname_part) new_vm_root_dir_basename = '{}.vmwarevm'.format(clone_name) if not where: where = path.join(vms_root_dir, new_vm_root_dir_basename) file_util.mkdir(where) dst_vmx_basename = '{}.vmx'.format(clone_name) dst_vmx_filename = path.join(where, dst_vmx_basename) return clazz._cloned_vm_names(src_vmx_filename, dst_vmx_filename, clone_name)
def __init__(self, options = None): check.check_pip_project_options(options) self._extra_env = {} self._options = options or check_pip_project_options() self._root_dir = self._options.resolve_root_dir() self._pip_cache_dir = path.join(self.droppings_dir, 'pip-cache') self._fake_home_dir = path.join(self.droppings_dir, 'fake-home') file_util.mkdir(self._fake_home_dir) self._fake_tmp_dir = path.join(self.droppings_dir, 'fake-tmp') file_util.mkdir(self._fake_tmp_dir) self._common_pip_args = [ '--cache-dir', self._pip_cache_dir, ] try: self._do_init(1) except python_error as ex: if 'version mismatch' in ex.message: self._options.blurber.blurb('{} - Fixing automagically.'.format(ex.message)) self._options.blurber.blurb('removing {}'.format(self._root_dir)) file_util.remove(self._root_dir) self._do_init(2)
def _do_operation(clazz, operation, dirs, src_pattern, dst_pattern, copy_dirs, options): assert options resolver_options = file_resolver_options(recursive=True, sort_order='depth', sort_reverse=True) resolved_files = file_resolver.resolve_files(dirs, options=resolver_options) operation_items, affected_dirs = \ clazz._make_operation_items(operation, resolved_files, src_pattern, dst_pattern, copy_dirs, options.word_boundary, options.word_boundary_chars) for next_operation_item in operation_items: is_safe = next_operation_item.is_safe(operation) if not is_safe.safe: if options.unsafe: options.blurber.blurb(f'UNSAFE: {is_safe.reason}') else: raise RuntimeError(is_safe.reason) new_dirs = algorithm.unique( [path.dirname(item.dst) for item in operation_items]) new_dirs = [d for d in new_dirs if d and not path.exists(d)] for next_new_dir in new_dirs: file_util.mkdir(next_new_dir) for next_operation_item in operation_items: next_operation_item.apply_operation(operation, options.try_git) if operation != refactor_operation_type.COPY_FILES: for d in affected_dirs: if path.exists(d) and dir_util.is_empty(d): dir_util.remove(d) return operation_items
def main(): DEBUG = os.environ.get('DEBUG', False) import bes vcli = version_cli(bes) parser = argparse.ArgumentParser() parser.add_argument('files', action='store', nargs='*', help='Files or directories to rename') vcli.version_add_arguments(parser) parser.add_argument('--dry-run', '-n', action='store_true', default=False, help='Only print what files will get tests [ False ]') parser.add_argument( '--timing', '-t', action='store_true', default=False, help='Show the amount of time it takes to run tests [ False ]') parser.add_argument('--verbose', '-v', action='store_true', default=False, help='Verbose debug output [ False ]') parser.add_argument('--stop', '-s', action='store_true', default=False, help='Stop right after the first failure. [ False ]') parser.add_argument( '--randomize', action='store_true', default=False, help='Randomize the order in which unit tests run. [ False ]') parser.add_argument( '--python', action='append', default=[], help= 'Python executable) to use. Multiple flags can be used for running with mutiple times with different python versions [ python ]' ) parser.add_argument('--page', '-p', action='store_true', default=False, help='Page output with $PAGER [ False ]') parser.add_argument( '--profile', action='store', default=None, help= 'Profile the code with cProfile and store the output in the given argument [ None ]' ) parser.add_argument( '--coverage', action='store', default=None, help= 'Run coverage on the code and store the output in the given argument [ None ]' ) parser.add_argument('--pager', action='store', default=os.environ.get('PAGER', 'more'), help='Pager to use when paging [ %s ]' % (os.environ.get('PAGER', 'more'))) parser.add_argument('--iterations', '-i', action='store', default=1, type=int, help='Python executable to use [ python ]') parser.add_argument( '--git', '-g', action='store_true', default=False, help='Use git status to figure out what has changed to test [ False ]') parser.add_argument( '--commit', '-c', action='store', type=str, default=None, help='Test only the files affected by the given git commit [ None ]') parser.add_argument('--pre-commit', action='store_true', default=False, help='Run pre commit checks [ False ]') parser.add_argument('--print-tests', action='store_true', default=False, help='Print the list of unit tests [ False ]') parser.add_argument('--print-python', action='store_true', default=False, help='Print the detected python executable [ False ]') parser.add_argument('--print-files', action='store_true', default=False, help='Print the list of unit files [ False ]') parser.add_argument( '--egg', action='store_true', default=False, help= 'Make an egg of the package and run the tests against that instead the live files. [ False ]' ) parser.add_argument( '--save-egg', action='store_true', default=False, help='Save the egg in the current directory. [ False ]') parser.add_argument('--ignore', action='append', default=[], help='Patterns of filenames to ignore []') parser.add_argument( '--root-dir', action='store', default=None, help= 'The root directory for all your projets. By default its computed from your git struture. [ None ]' ) parser.add_argument('--dont-hack-env', action='store_true', default=False, help='Dont hack PATH and PYTHONPATH. [ False ]') parser.add_argument( '--compile-only', action='store_true', default=False, help='Just compile the files to verify syntax [ False ]') parser.add_argument( '--print-deps', action='store_true', default=False, help='Print python dependencies for test files [ False ]') parser.add_argument('--print-configs', action='store_true', default=False, help='Print testing configs found [ False ]') parser.add_argument('--print-root-dir', action='store_true', default=False, help='Print the root dir [ False ]') parser.add_argument('--print-path', action='store_true', default=False, help='Print sys.path [ False ]') parser.add_argument( '--file-ignore-file', action='append', default=[], help= 'List of file ignore files. [ .bes_test_ignore .bes_test_internal_ignore ]' ) parser.add_argument('--env', action='append', default=[], help='Environment variables to set [ None ]') parser.add_argument('--no-env-deps', action='store_true', default=False, help='Dont use env deps. [ False ]') parser.add_argument( '--temp-dir', action='store', default=None, help= 'The directory to use for tmp files overriding the system default. [ None ]' ) parser.add_argument( '--keep-side-effects', action='store_true', default=DEBUG, help='Dont delete side effects - for debugging. [ False ]') parser.add_argument( '--ignore-side-effects', action='store_true', default=DEBUG, help='Dont delete side effects - for debugging. [ False ]') found_git_exe = git_exe.find_git_exe() if not found_git_exe: printer.writeln_name( 'ERROR: No git found. Git is needed to run bes_test.') return 1 for g in parser._action_groups: g._group_actions.sort(key=lambda x: x.dest) args = parser.parse_args() args.python = _resolve_python_exe_list(args.python) if not args.python: python_exe = python.find_python_exe() if python_exe: args.python = [python_exe] if not args.python: printer.writeln_name( 'ERROR: No python found. Python is needed to run bes_test.') return 1 _LOG.log_d('using python={}'.format(args.python)) if args.git and args.commit: printer.writeln_name( 'ERROR: Only one of --git or --commit can be given.') return 1 if args.temp_dir: file_util.mkdir(args.temp_dir) tempfile.tempdir = args.temp_dir if DEBUG: args.verbose = True cwd = os.getcwd() if args.version: vcli.version_print_version() return 0 args.env = _parse_args_env(args.env) if not args.files: args.files = [cwd] if not args.file_ignore_file: args.file_ignore_file = [ '.bes_test_ignore', '.bes_test_internal_ignore' ] if args.commit: if args.commit in ['HEAD', 'last']: args.commit = git.last_commit_hash('.') ar = argument_resolver(cwd, args.files, root_dir=args.root_dir, file_ignore_filename=args.file_ignore_file, check_git=args.git, git_commit=args.commit, use_env_deps=not args.no_env_deps) ar.num_iterations = args.iterations ar.randomize = args.randomize ar.ignore_with_patterns(args.ignore) if args.compile_only: total_files = len(ar.all_files) for i, f in enumerate(ar.all_files): tmp = temp_file.make_temp_file() filename_count_blurb = ' ' + _make_count_blurb(i + 1, total_files) short_filename = file_util.remove_head(f, cwd) blurb = '%7s:%s %s ' % ('compile', filename_count_blurb, short_filename) printer.writeln_name(blurb) py_compile.compile(f, cfile=tmp, doraise=True) return 0 if not ar.test_descriptions: return 1 if args.print_python: for python_exe in args.python: print(python_exe) return 0 if args.print_path: for p in sys.path: print(p) return 0 if args.print_configs: ar.print_configs() return 0 if args.print_root_dir: print(ar.root_dir) return 0 if args.print_files: ar.print_files() return 0 if args.print_tests: ar.print_tests() return 0 if args.print_deps or args.pre_commit and not ar.supports_test_dependency_files( ): printer.writeln_name( 'ERROR: Cannot figure out dependencies. snakefood missing.') return 1 if args.print_deps: dep_files = ar.test_dependency_files() for filename in sorted(dep_files.keys()): print(filename) for dep_file in dep_files[filename]: print(' %s' % (dep_file.filename)) return 0 # Read ~/.bes_test/bes_test.config (or use a default config) bes_test_config = _read_config_file() keep_patterns = bes_test_config.get_value_string_list( 'environment', 'keep_patterns') # Start with a clean environment so unit testing can be deterministic and not subject # to whatever the user happened to have exported. PYTHONPATH and PATH for dependencies # are set below by iterating the configs keep_keys = bes_test_config.get_value_string_list('environment', 'keep_keys') if args.dont_hack_env: keep_keys.extend(['PATH', 'PYTHONPATH']) keep_keys.extend(['TMPDIR', 'TEMP', 'TMP']) env = os_env.make_clean_env( keep_keys=keep_keys, keep_func=lambda key: _env_var_should_keep(key, keep_patterns)) env_var(env, 'PATH').prepend(path.dirname(found_git_exe)) for python_exe in args.python: env_var(env, 'PATH').prepend(path.dirname(python_exe)) env['PYTHONDONTWRITEBYTECODE'] = 'x' variables = { 'rebuild_dir': path.expanduser('~/.rebuild'), 'system': host.SYSTEM, } if not args.dont_hack_env: for var in ar.env_dependencies_variables(): ov = os_env_var(var) if ov.is_set: value = ov.value else: value = '' variables[var] = value ar.update_environment(env, variables) # Update env with whatever was given in --env env.update(args.env) # Use a custom TMP dir so that we can catch temporary side effects and flag them tmp_tmp = temp_file.make_temp_dir(prefix='bes_test_', suffix='.tmp.tmp.dir', delete=False) env.update({ 'TMPDIR': tmp_tmp, 'TEMP': tmp_tmp, 'TMP': tmp_tmp, }) side_effects = {} num_passed = 0 num_failed = 0 num_executed = 0 num_tests = len(ar.test_descriptions) failed_tests = [] # Remove current dir from sys.path to avoid side effects if cwd in sys.path: sys.path.remove(cwd) if args.egg: pythonpath = env_var(env, 'PYTHONPATH') pythonpath.remove(cwd) for config in ar.env_dependencies_configs: setup_dot_py = path.join(config.root_dir, 'setup.py') if not path.isfile(setup_dot_py): raise RuntimeError('No setup.py found in %s to make the egg.' % (cwd)) egg_zip = egg.make(config.root_dir, 'master', setup_dot_py, untracked=False) pythonpath.prepend(egg_zip) printer.writeln_name('using tmp egg: %s' % (egg_zip)) if args.save_egg: file_util.copy(egg_zip, path.join(cwd, path.basename(egg_zip))) if args.pre_commit: missing_from_git = [] for filename, dep_files in ar.test_dependency_files().items(): for dep_file in dep_files: if dep_file.config and not dep_file.git_tracked: missing_from_git.append(dep_file.filename) if missing_from_git: for f in missing_from_git: printer.writeln_name('PRE_COMMIT: missing from git: %s' % (path.relpath(f))) return 1 return 0 ar.cleanup_python_compiled_files() # Do all our work with a temporary working directory to be able to check for side effects tmp_cwd = temp_file.make_temp_dir(prefix='bes_test_', suffix='.tmp.cwd.dir', delete=False) tmp_home = temp_file.make_temp_dir(prefix='bes_test_', suffix='.tmp.home.dir', delete=False) os.environ['HOME'] = tmp_home os.chdir(tmp_cwd) # Use what the OS thinks the path is (to deal with symlinks and virtual tmpfs things) tmp_cwd = os.getcwd() if not args.dry_run and args.page: printer.OUTPUT = tempfile.NamedTemporaryFile(prefix='bes_test', delete=True, mode='w') total_tests = _count_tests(ar.inspect_map, ar.test_descriptions) total_files = len(ar.test_descriptions) total_num_tests = 0 if args.profile: args.profile = path.abspath(args.profile) if not _check_program('cprofilev'): return 1 if args.coverage: args.coverage = path.abspath(args.coverage) coverage_exe = _check_program('coverage') if not coverage_exe: return 1 args.python = [coverage_exe] if args.profile and args.coverage: printer.writeln_name( 'ERROR: --profile and --coverage are mutually exclusive.') return 1 options = test_options(args.dry_run, args.verbose, args.stop, args.timing, args.profile, args.coverage, args.python, args.temp_dir, tmp_home) timings = {} total_time_start = time.time() stopped = False for i, test_desc in enumerate(ar.test_descriptions): file_info = test_desc.file_info filename = file_info.filename if not filename in timings: timings[filename] = [] for python_exe in args.python: result = _test_execute(python_exe, ar.inspect_map, filename, test_desc.tests, options, i + 1, total_files, cwd, env) _collect_side_effects(side_effects, filename, tmp_home, 'home', args.keep_side_effects) _collect_side_effects(side_effects, filename, tmp_tmp, 'tmp', args.keep_side_effects) _collect_side_effects(side_effects, filename, os.getcwd(), 'cwd', args.keep_side_effects) timings[filename].append(result.elapsed_time) total_num_tests += result.num_tests_run num_executed += 1 if result.success: num_passed += 1 else: num_failed += 1 failed_tests.append((python_exe, filename, result)) if args.stop and not result.success: stopped = True if stopped: break total_elapsed_time = 1000 * (time.time() - total_time_start) if args.dry_run: return 0 num_skipped = num_tests - num_executed summary_parts = [] if total_num_tests == total_tests: function_summary = '(%d %s)' % (total_tests, _make_test_string(total_tests)) else: function_summary = '(%d of %d %s)' % (total_num_tests, total_tests, _make_test_string(total_tests)) if num_failed > 0: summary_parts.append('%d of %d fixtures FAILED' % (num_failed, num_tests)) summary_parts.append('%d of %d passed %s' % (num_passed, num_tests, function_summary)) if num_skipped > 0: summary_parts.append('%d of %d skipped' % (num_skipped, num_tests)) summary = '; '.join(summary_parts) printer.writeln_name('%s' % (summary)) if failed_tests: longest_python_exe = max( [len(path.basename(p)) for p in options.interpreters]) for python_exe, filename, result in failed_tests: if len(options.interpreters) > 1: python_exe_blurb = path.basename(python_exe).rjust( longest_python_exe) else: python_exe_blurb = '' error_status = unit_test_output.error_status(result.output) for error in error_status.errors: printer.writeln_name('%5s: %s %s :%s.%s' % (error.error_type, python_exe_blurb, file_util.remove_head(filename, cwd), error.fixture, error.function)) if num_failed > 0: rv = 1 else: rv = 0 if args.timing: filenames = sorted(timings.keys()) num_filenames = len(filenames) for i, filename in zip(range(0, num_filenames), filenames): short_filename = file_util.remove_head(filename, cwd) all_timings = timings[filename] num_timings = len(all_timings) avg_ms = _timing_average(all_timings) * 1000.0 if num_timings > 1: run_blurb = '(average of %d runs)' % (num_timings) else: run_blurb = '' if num_filenames > 1: count_blurb = '[%s of %s] ' % (i + 1, num_filenames) else: count_blurb = '' printer.writeln_name( 'timing: %s%s - %2.2f ms %s' % (count_blurb, short_filename, avg_ms, run_blurb)) if total_elapsed_time >= 1000.0: printer.writeln_name('total time: %2.2f s' % (total_elapsed_time / 1000.0)) else: printer.writeln_name('total time: %2.2f ms' % (total_elapsed_time)) if args.page: subprocess.call([args.pager, printer.OUTPUT.name]) current_cwd = os.getcwd() if current_cwd != tmp_cwd: rv = 1 printer.writeln_name( 'SIDE EFFECT: working directory was changed from %s to %s' % (tmp_cwd, current_cwd)) if not args.ignore_side_effects: for test, items in sorted(side_effects.items()): for item in items: rv = 1 filename = item.filename print('SIDE EFFECT [{}] {} {}'.format( item.label, test.replace(cwd + os.sep, ''), filename)) os.chdir('/tmp') if not args.keep_side_effects: file_util.remove(tmp_cwd) file_util.remove(tmp_home) file_util.remove(tmp_tmp) return rv
def _pre_create(self): 'Setup some stuff before create() is called.' d = path.dirname(self.filename) if d: file_util.mkdir(d)