def _test_extract_with_include_exclude(self, items, include, exclude): tmp_archive = self.make_temp_archive_for_reading(items) tmp_dir = temp_file.make_temp_dir() tmp_archive.extract(tmp_dir, include = include, exclude = exclude) actual_files = file_find.find(tmp_dir, relative = True) file_util.remove(tmp_dir) return actual_files
def extract(self, dest_dir, base_dir = None, strip_common_ancestor = False, strip_head = None, include = None, exclude = None): dest_dir = self._determine_dest_dir(dest_dir, base_dir) filtered_members = self._filter_for_extract(self.members, include, exclude) if filtered_members == self.members: return self.extract_all(dest_dir, base_dir = base_dir, strip_common_ancestor = strip_common_ancestor, strip_head = strip_head) return self.extract_all(dest_dir, base_dir = base_dir, strip_common_ancestor = strip_common_ancestor, strip_head = strip_head) self._handle_post_extract(dest_dir, include, exclude) return # Cheat by using a temporary zip file to do the actual work. Super innefecient but # easy since theres no library to extract just some stuff from dmg files. tmp_dir = temp_file.make_temp_dir() dmg.extract(self.filename, tmp_dir) tmp_zip = temp_file.make_temp_file(suffix = '.zip') az = archive_zip(tmp_zip) az.create(tmp_dir) az.extract(dest_dir, base_dir = base_dir, strip_common_ancestor = strip_common_ancestor, strip_head = strip_head, include = include, exclude = exclude) file_util.remove(tmp_zip) file_util.remove(tmp_dir)
def remove_checksums(self, packages, level): packages = object_util.listify(packages) check.check_package_descriptor_seq(packages) checksum_dirs = [self._checksum_dir(pd, level) for pd in packages] for d in checksum_dirs: self.blurb('removing checksums: %s' % (path.relpath(d))) file_util.remove(checksum_dirs)
def test_create_base_dir(self): self.maxDiff = None items = temp_archive.make_temp_item_list([ ( 'base-1.2.3/foo.txt', 'foo.txt\n' ), ( 'base-1.2.3/bar.txt', 'bar.txt\n' ), ]) tmp_dir = temp_archive.write_temp_items(items) base_dir = 'foo-666' archive = self.make_temp_archive_for_writing() archive.create(tmp_dir, base_dir = base_dir) self.assertTrue( path.isfile(archive.filename) ) tmp_extract_dir = temp_file.make_temp_dir() archive.extract_all(tmp_extract_dir) def _remove_base_dir(f): return file_util.remove_head(f, base_dir) self._compare_dirs(tmp_dir, tmp_extract_dir, transform = _remove_base_dir) file_util.remove([ tmp_dir, tmp_extract_dir])
def test_is_broken_link_false(self): tmp1 = tempfile.NamedTemporaryFile() tmp2 = tempfile.NamedTemporaryFile() file_util.remove(tmp1.name) os.symlink(tmp2.name, tmp1.name) self.assertEqual( True, path.islink(tmp1.name) ) self.assertEqual( False, file_util.is_broken_link(tmp1.name) )
def grep(clazz, tarball, pattern): 'Return the output of ag (silver searcher) for an archive.' tmp_dir = temp_file.make_temp_dir() archiver.extract(tarball, tmp_dir, strip_common_ancestor = True) result = execute.execute('ag %s .' % (pattern), cwd = tmp_dir, shell = True, raise_error = False).stdout file_util.remove(tmp_dir) return result
def execute(self, script, env, values, inputs): staged_files_lib_dir = path.join(script.staged_files_dir, 'lib') if path.isdir(staged_files_lib_dir): droppings = file_find.find_fnmatch(path.join( script.staged_files_dir, 'lib'), ['*.la'], relative=False) file_util.remove(droppings) return step_result(True, None)
def publish_artifacts(clazz, am): artifacts = file_find.find_fnmatch(am.root_dir, [ '*.tar.gz' ], relative = False) for artifact in artifacts: tmp_artifact = temp_file.make_temp_file() file_util.copy(artifact, tmp_artifact) file_util.remove(artifact) p = package(tmp_artifact) am.publish(tmp_artifact, p.metadata.build_target, False)
def remove_artifact(self, adesc): check.check_artifact_descriptor(adesc) if self._read_only: raise RuntimeError('artifact_manager is read only.') md = self.find_by_artifact_descriptor(adesc, False) if not md: raise NotInstalledError('package \"%s\" not found' % (str(adesc))) file_util.remove(md.filename) self._db.remove_artifact(adesc)
def _make_temp_archive_dmg(clazz, items, filename, mode): tmp_dir = temp_file.make_temp_dir() for item in items: assert item assert item.arcname file_util.save(path.join(tmp_dir, item.arcname), content = item.content) tmp_dmg = temp_file.make_temp_file() cmd = 'hdiutil create -srcfolder %s -ov -format UDZO %s' % (tmp_dir, filename) execute.execute(cmd) file_util.remove(tmp_dir)
def _test_create_with_include_exclude(self, items, include, exclude): tmp_dir = temp_archive.write_temp_items(items) archive = self.make_temp_archive_for_writing() archive.create(tmp_dir, include = include, exclude = exclude) self.assertTrue( path.isfile(archive.filename) ) tmp_extract_dir = temp_file.make_temp_dir() archive.extract_all(tmp_extract_dir) actual_files = file_find.find(tmp_extract_dir, relative = True) file_util.remove([ tmp_dir, tmp_extract_dir]) return actual_files
def autoconf_help(clazz, tarball): 'Return the output of configure --help for an autoconf archive.' tmp_dir = temp_file.make_temp_dir() archiver.extract(tarball, tmp_dir, strip_common_ancestor = True) confiugure_path = path.join(tmp_dir, 'configure') if not path.exists(confiugure_path): raise RuntimeError('No configure script found in %s' % (tarball)) help = execute.execute('./configure --help', cwd = tmp_dir, shell = True, raise_error = False).stdout file_util.remove(tmp_dir) return help
def _move_dir(clazz, from_dir, dest_dir): #print('FOO: from_dir: %s' % (from_dir)) #print('FOO: dest_dir: %s' % (dest_dir)) file_util.mkdir(dest_dir) # if file_util.same_device_id(from_dir, dest_dir): # print('FOO: calling shutil.move(%s, %s)' % (from_dir, dest_dir)) # assert False # shutil.move(from_dir, dest_dir) # return tar_util.copy_tree_with_tar(from_dir, dest_dir) file_util.remove(from_dir)
def execute(self, script, env, values, inputs): if not script.has_staged_files_dir(): return step_result( True, script.format_message( 'No droppings to cleanup in {staged_files_dir}')) droppings = file_find.find_fnmatch(script.staged_files_dir, ['*.bak'], relative=False) file_util.remove(droppings) return step_result(True, None)
def create(self, root_dir, base_dir = None, extra_items = None, include = None, exclude = None): self._pre_create() items = self._find(root_dir, base_dir, extra_items, include, exclude) tmp_dir = temp_file.make_temp_dir() for item in items: file_util.copy(item.filename, path.join(tmp_dir, item.arcname)) cmd = 'hdiutil create -srcfolder %s -ov -format UDZO %s' % (tmp_dir, self.filename) execute.execute(cmd) file_util.remove(tmp_dir)
def execute(self, script, env, values, inputs): droppings = [ 'lib/python/easy-install.pth', 'lib/python/site.py', 'lib/python/site.pyc', ] droppings = [ path.join(script.staged_files_dir, dropping) for dropping in droppings ] file_util.remove(droppings) return step_result(True, None)
def _handle_extract_strip_common_ancestor(self, members, strip_common_ancestor, strip_head, dest_dir): if strip_common_ancestor: common_ancestor = self._common_ancestor_for_members(members) if common_ancestor: from_dir = path.join(dest_dir, common_ancestor) tar_util.copy_tree_with_tar(from_dir, dest_dir) file_util.remove(from_dir) if strip_head: from_dir = path.join(dest_dir, strip_head) if path.isdir(from_dir): tar_util.copy_tree_with_tar(from_dir, dest_dir) file_util.remove(from_dir)
def _make_temp_archive_xz(clazz, items, filename, mode): tmp_dir = temp_file.make_temp_dir() for item in items: assert item assert item.arcname file_util.save(path.join(tmp_dir, item.arcname), content = item.content) tmp_xz = temp_file.make_temp_file() manifest_content = '\n'.join([ item.arcname for item in items ]) manifest = temp_file.make_temp_file(content = manifest_content) cmd = 'tar Jcf %s -C %s -T %s' % (filename, tmp_dir, manifest) execute.execute(cmd) file_util.remove(tmp_dir)
def _make_temp_archive_tar(clazz, items, filename, mode): with open(filename, 'wb') as fp: archive = tarfile.open(fileobj = fp, mode = mode) for item in items: assert item assert item.arcname tmp_content = temp_file.make_temp_file(item.content) archive.add(tmp_content, arcname = item.arcname) file_util.remove(tmp_content) archive.close() fp.flush() fp.close()
def create(self, root_dir, base_dir = None, extra_items = None, include = None, exclude = None): self._pre_create() items = self._find(root_dir, base_dir, extra_items, include, exclude) tmp_dir = temp_file.make_temp_dir() for item in items: file_util.copy(item.filename, path.join(tmp_dir, item.arcname)) manifest_content = '\n'.join([ item.arcname for item in items ]) manifest = temp_file.make_temp_file(content = manifest_content) cmd = 'tar Jcf %s -C %s -T %s' % (self.filename, tmp_dir, manifest) execute.execute(cmd) file_util.remove(tmp_dir)
def execute(self, script, env, values, inputs): delete_files = values.get('delete_files') ignore_missing = values.get('delete_files_ignore_missing') if not delete_files: message = 'No delete_files for %s' % (script.descriptor.full_name) self.log_d(message) return step_result(True, message) delete_files_in_staged_files_dir = [ path.join(script.staged_files_dir, f) for f in delete_files ] missing_files = [ f for f in delete_files_in_staged_files_dir if not path.exists(f) ] if missing_files and not ignore_missing: return step_result(False, 'File(s) to delete not found: %s' % (' '.join(missing_files))) file_util.remove(delete_files_in_staged_files_dir) return step_result(True, None)
def _test_extract_with_members(self, items, members, base_dir = None, strip_common_ancestor = False, strip_head = None): tmp_archive = self.make_temp_archive_for_reading(items) tmp_dir = temp_file.make_temp_dir() tmp_archive.extract(tmp_dir, base_dir = base_dir, strip_common_ancestor = strip_common_ancestor, strip_head = strip_head, include = members) actual_files = file_find.find(tmp_dir, relative = True) file_util.remove(tmp_dir) return actual_files
def create(self, root_dir, base_dir = None, extra_items = None, include = None, exclude = None): items = self._find(root_dir, base_dir, extra_items, include, exclude) ext = archive_extension.extension_for_filename(self.filename) mode = archive_extension.write_format_for_filename(self.filename) # print('CACA: ext=%s' % (ext)) # print('CACA: mode=%s' % (mode)) tmp_dir = temp_file.make_temp_dir() for item in items: file_util.copy(item.filename, path.join(tmp_dir, item.arcname)) manifest_content = '\n'.join([ item.arcname for item in items ]) manifest = temp_file.make_temp_file(content = manifest_content) cmd = 'tar Jcf %s -C %s -T %s' % (self.filename, tmp_dir, manifest) execute.execute(cmd) file_util.remove(tmp_dir)
def uninstall_package(self, pkg_name): self.log_i('uninstalling package: %s' % (pkg_name)) pkg = self.db.find_package(pkg_name) if not pkg: raise NotInstalledError('package %s not found' % (pkg_name)) paths = [ path.join(self._installation_dir, f) for f in pkg.files.files.filenames() ] paths.extend([ path.join(self._env_dir, f) for f in pkg.files.env_files.filenames() ]) file_util.remove(paths) self.db.remove_package(pkg_name)
def extract_members(self, members, dest_dir, base_dir = None, strip_common_ancestor = False, strip_head = None, include = None, exclude = None): # Cheat by using a temporary zip file to do the actual work. Super innefecient but # easy since theres no library to extract just some stuff from dmg files. tmp_dir = temp_file.make_temp_dir() dmg.extract(self.filename, tmp_dir) tmp_zip = temp_file.make_temp_file(suffix = '.zip') az = archive_zip(tmp_zip) az.create(tmp_dir) az.extract_members(members, dest_dir, base_dir = base_dir, strip_common_ancestor = strip_common_ancestor, strip_head = strip_head, include = include, exclude = exclude) file_util.remove(tmp_zip) file_util.remove(tmp_dir)
def __command_create(self): if not path.isfile(self._args.config): raise RuntimeError('File not found: %s' % (self._args.config)) variables = { 'root': self._args.location, 'source_dir': path.dirname(self._args.config), 'username': user.USERNAME, } if host.SYSTEM == host.MACOS: variables['DARWIN_USER_CACHE_DIR'] = execute.execute( 'getconf DARWIN_USER_CACHE_DIR').stdout.strip() cf = config_file(self._args.config, variables) if self._args.wipe: file_util.remove(self._args.location) jail.create(self._args.location, self._config, self._args.no_filters) return 0
def extract(self, root_dir, stuff_dir_basename, env_dir_basename): tmp_dir = temp_file.make_temp_dir(prefix='package.extract.', suffix='.dir', dir=root_dir) dst_stuff_dir = path.join(root_dir, stuff_dir_basename) dst_env_dir = path.join(root_dir, env_dir_basename) file_util.mkdir(dst_stuff_dir) file_util.mkdir(dst_env_dir) # tar cmd is 10x faster than archiver. need to fix archiver tar_cmd = ['tar', 'xf', self.tarball, '-C', tmp_dir] execute.execute(tar_cmd) #archiver.extract_all(self.tarball, tmp_dir) src_stuff_dir = path.join(tmp_dir, self.FILES_DIR) src_env_dir = path.join(tmp_dir, self.ENV_DIR) if path.isdir(src_stuff_dir): dir_util.move_files(src_stuff_dir, dst_stuff_dir) self._post_install_hooks(dst_stuff_dir) if path.isdir(src_env_dir): dir_util.move_files(src_env_dir, dst_env_dir) self._variable_substitution_hook(dst_env_dir, dst_stuff_dir) file_util.remove(tmp_dir)
def instructions(self, env): buf = StringIO() buf.write('#!/bin/bash\n') buf.write('echo "----1----"\n') buf.write('declare -px\n') buf.write('echo "----2----"\n') for f in self.files_abs: buf.write('source \"%s\"\n' % (f)) buf.write('echo "----3----"\n') buf.write('declare -px\n') buf.write('echo "----4----"\n') script = temp_file.make_temp_file(content = buf.getvalue(), delete = not self.DEBUG) if self.DEBUG: print('env_dir: script=%s' % (script)) os.chmod(script, 0o755) try: rv = execute.execute(script, raise_error = True, shell = True, env = env) finally: if not self.DEBUG: file_util.remove(script) parser = text_line_parser(rv.stdout) if rv.stderr: raise RuntimeError(rv.stderr) env1 = self._parse_env_lines(parser.cut_lines('----1----', '----2----')) env2 = self._parse_env_lines(parser.cut_lines('----3----', '----4----')) delta = self._env_delta(env1, env2) instructions = [] for key in delta.added: instructions.append(instruction(key, env2[key], action.SET)) for key in delta.removed: instructions.append(instruction(key, None, action.UNSET)) for key in delta.changed: value1 = env1[key] value2 = env2[key] for inst in self._determine_change_instructions(key, value1, value2): instructions.append(inst) return sorted(instructions, key = lambda x: ( x.key, x.value ) )
def test_update_site_dot_py_empty_dir(self): tmp_dir = temp_file.make_temp_dir() file_util.remove(tmp_dir) setup_tools.update_site_dot_py(tmp_dir) site_py_path = path.join(tmp_dir, setup_tools.SITE_DOT_PY_FILENAME) self.assertFalse( path.exists(site_py_path) )
def execute(self, script, env, values, inputs): info_dir = path.join(script.staged_files_dir, 'share/info') file_util.remove(info_dir) return step_result(True, None)
def test_is_broken_link_true(self): tmp = tempfile.NamedTemporaryFile() file_util.remove(tmp.name) os.symlink('/somethingnotthere', tmp.name) self.assertEqual( True, path.islink(tmp.name) ) self.assertEqual( True, file_util.is_broken_link(tmp.name) )
def extract_member_to_string(self, member): tmp_file = temp_file.make_temp_file() self.extract_member_to_file(member, tmp_file) result = file_util.read(tmp_file) file_util.remove(tmp_file) return result
def main(): import bes vcli = version_cli(bes) parser = argparse.ArgumentParser() parser.add_argument('files', action = 'store', nargs = '*', help = 'Files or directories to rename') vcli.version_add_arguments(parser) parser.add_argument('--dry-run', '-n', action = 'store_true', default = False, help = 'Only print what files will get tests [ False ]') parser.add_argument('--timing', '-t', action = 'store_true', default = False, help = 'Show the amount of time it takes to run tests [ False ]') parser.add_argument('--verbose', '-v', action = 'store_true', default = False, help = 'Verbose debug output [ False ]') parser.add_argument('--stop', '-s', action = 'store_true', default = False, help = 'Stop right after the first failure. [ False ]') parser.add_argument('--randomize', action = 'store_true', default = False, help = 'Randomize the order in which unit tests run. [ False ]') parser.add_argument('--python', action = 'append', default = [], help = 'Python executable) to use. Multiple flags can be used for running with mutiple times with different python versions [ python ]') parser.add_argument('--page', '-p', action = 'store_true', default = False, help = 'Page output with $PAGER [ False ]') parser.add_argument('--profile', action = 'store', default = None, help = 'Profile the code with cProfile and store the output in the given argument [ None ]') parser.add_argument('--coverage', action = 'store', default = None, help = 'Run coverage on the code and store the output in the given argument [ None ]') parser.add_argument('--pager', action = 'store', default = os.environ.get('PAGER', 'more'), help = 'Pager to use when paging [ %s ]' % (os.environ.get('PAGER', 'more'))) parser.add_argument('--iterations', '-i', action = 'store', default = 1, type = int, help = 'Python executable to use [ python ]') parser.add_argument('--git', '-g', action = 'store_true', default = False, help = 'Use git status to figure out what has changed to test [ False ]') parser.add_argument('--pre-commit', action = 'store_true', default = False, help = 'Run pre commit checks [ False ]') parser.add_argument('--print-tests', action = 'store_true', default = False, help = 'Print the list of unit tests [ False ]') parser.add_argument('--print-files', action = 'store_true', default = False, help = 'Print the list of unit files [ False ]') parser.add_argument('--egg', action = 'store_true', default = False, help = 'Make an egg of the package and run the tests against that instead the live files. [ False ]') parser.add_argument('--save-egg', action = 'store_true', default = False, help = 'Save the egg in the current directory. [ False ]') parser.add_argument('--ignore', action = 'append', default = [], help = 'Patterns of filenames to ignore []') parser.add_argument('--root-dir', action = 'store', default = None, help = 'The root directory for all your projets. By default its computed from your git struture. [ None ]') parser.add_argument('--dont-hack-env', action = 'store_true', default = False, help = 'Dont hack PATH and PYTHONPATH. [ False ]') parser.add_argument('--compile-only', '-c', action = 'store_true', default = False, help = 'Just compile the files to verify syntax [ False ]') parser.add_argument('--print-deps', action = 'store_true', default = False, help = 'Print python dependencies for test files [ False ]') parser.add_argument('--print-configs', action = 'store_true', default = False, help = 'Print testing configs found [ False ]') parser.add_argument('--print-root-dir', action = 'store_true', default = False, help = 'Print the root dir [ False ]') parser.add_argument('--print-path', action = 'store_true', default = False, help = 'Print sys.path [ False ]') parser.add_argument('--file-ignore-file', action = 'append', default = [], help = 'List of file ignore files. [ .bes_test_ignore .bes_test_internal_ignore ]') parser.add_argument('--env', action = 'append', default = [], help = 'Environment variables to set [ None ]') parser.add_argument('--no-env-deps', action = 'store_true', default = False, help = 'Dont use env deps. [ False ]') parser.add_argument('--temp-dir', action = 'store', default = None, help = 'The directory to use for tmp files overriding the system default. [ None ]') for g in parser._action_groups: g._group_actions.sort(key = lambda x: x.dest) args = parser.parse_args() if args.temp_dir: file_util.mkdir(args.temp_dir) tempfile.tempdir = args.temp_dir if os.environ.get('DEBUG', False): args.verbose = True cwd = os.getcwd() if args.version: vcli.version_print_version() return 0 args.env = _parse_args_env(args.env) if not args.files: args.files = [ cwd ] if not args.file_ignore_file: args.file_ignore_file = [ '.bes_test_ignore', '.bes_test_internal_ignore' ] ar = argument_resolver(cwd, args.files, root_dir = args.root_dir, file_ignore_filename = args.file_ignore_file, check_git = args.git, use_env_deps = not args.no_env_deps) ar.num_iterations = args.iterations ar.randomize = args.randomize ar.ignore_with_patterns(args.ignore) if args.compile_only: total_files = len(ar.all_files) for i, f in enumerate(ar.all_files): tmp = temp_file.make_temp_file() filename_count_blurb = ' ' + _make_count_blurb(i + 1, total_files) short_filename = file_util.remove_head(f, cwd) blurb = '%7s:%s %s ' % ('compile', filename_count_blurb, short_filename) printer.writeln_name(blurb) py_compile.compile(f, cfile = tmp, doraise = True) return 0 if not ar.test_descriptions: return 1 if args.print_path: for p in sys.path: print(p) return 0 if args.print_configs: ar.print_configs() return 0 if args.print_root_dir: print(ar.root_dir) return 0 if args.print_files: ar.print_files() return 0 if args.print_tests: ar.print_tests() return 0 if args.print_deps or args.pre_commit and not ar.supports_test_dependency_files(): printer.writeln_name('ERROR: Cannot figure out dependencies. snakefood missing.') return 1 if args.print_deps: dep_files = ar.test_dependency_files() for filename in sorted(dep_files.keys()): print(filename) for dep_file in dep_files[filename]: print(' %s' % (dep_file.filename)) return 0 # Start with a clean environment so unit testing can be deterministic and not subject # to whatever the user happened to have exported. PYTHONPATH and PATH for dependencies # are set below by iterating the configs keep_keys = [ 'BES_LOG', 'BES_VERBOSE', 'BESCFG_PATH', 'DEBUG', 'BES_TEMP_DIR' ] if args.dont_hack_env: keep_keys.extend([ 'PATH', 'PYTHONPATH']) env = os_env.make_clean_env(keep_keys = keep_keys) env['PYTHONDONTWRITEBYTECODE'] = 'x' variables = { 'rebuild_dir': path.expanduser('~/.rebuild'), 'system': host.SYSTEM, } if not args.dont_hack_env: ar.update_environment(env, variables) # Update env with whatever was given in --env env.update(args.env) num_passed = 0 num_failed = 0 num_executed = 0 num_tests = len(ar.test_descriptions) failed_tests = [] # Remove current dir from sys.path to avoid side effects if cwd in sys.path: sys.path.remove(cwd) if args.egg: pythonpath = env_var(env, 'PYTHONPATH') pythonpath.remove(cwd) for config in ar.env_dependencies_configs: setup_dot_py = path.join(config.root_dir, 'setup.py') if not path.isfile(setup_dot_py): raise RuntimeError('No setup.py found in %s to make the egg.' % (cwd)) egg_zip = egg.make(setup_dot_py) pythonpath.prepend(egg_zip) printer.writeln_name('using tmp egg: %s' % (egg_zip)) if args.save_egg: file_util.copy(egg_zip, path.join(cwd, path.basename(egg_zip))) if args.pre_commit: missing_from_git = [] for filename, dep_files in ar.test_dependency_files().items(): for dep_file in dep_files: if dep_file.config and not dep_file.git_tracked: missing_from_git.append(dep_file.filename) if missing_from_git: for f in missing_from_git: printer.writeln_name('PRE_COMMIT: missing from git: %s' % (path.relpath(f))) return 1 return 0 ar.cleanup_python_compiled_files() # Do all our work with a temporary working directory to be able to check for side effects tmp_cwd = temp_file.make_temp_dir(prefix = 'bes_test_', suffix = '.tmp.dir', delete = False) os.chdir(tmp_cwd) # Use what the OS thinks the path is (to deal with symlinks and virtual tmpfs things) tmp_cwd = os.getcwd() if not args.dry_run and args.page: printer.OUTPUT = tempfile.NamedTemporaryFile(prefix = 'bes_test', delete = True, mode = 'w') total_tests = _count_tests(ar.inspect_map, ar.test_descriptions) total_files = len(ar.test_descriptions) total_num_tests = 0 if not args.python: args.python = [ 'python' ] if args.profile: args.profile = path.abspath(args.profile) if not _check_program('cprofilev'): return 1 if args.coverage: args.coverage = path.abspath(args.coverage) coverage_exe = _check_program('coverage') if not coverage_exe: return 1 args.python = [ coverage_exe ] if args.profile and args.coverage: printer.writeln_name('ERROR: --profile and --coverage are mutually exclusive.') return 1 options = test_options(args.dry_run, args.verbose, args.stop, args.timing, args.profile, args.coverage, args.python, args.temp_dir) timings = {} total_time_start = time.time() stopped = False for i, test_desc in enumerate(ar.test_descriptions): file_info = test_desc.file_info filename = file_info.filename if not filename in timings: timings[filename] = [] for python_exe in args.python: result = _test_execute(python_exe, ar.inspect_map, filename, test_desc.tests, options, i + 1, total_files, cwd, env) timings[filename].append(result.elapsed_time) total_num_tests += result.num_tests_run num_executed += 1 if result.success: num_passed += 1 else: num_failed += 1 failed_tests.append(( python_exe, filename, result )) if args.stop and not result.success: stopped = True if stopped: break total_elapsed_time = 1000 * (time.time() - total_time_start) if args.dry_run: return 0 num_skipped = num_tests - num_executed summary_parts = [] if total_num_tests == total_tests: function_summary = '(%d %s)' % (total_tests, _make_test_string(total_tests)) else: function_summary = '(%d of %d %s)' % (total_num_tests, total_tests, _make_test_string(total_tests)) if num_failed > 0: summary_parts.append('%d of %d fixtures FAILED' % (num_failed, num_tests)) summary_parts.append('%d of %d passed %s' % (num_passed, num_tests, function_summary)) if num_skipped > 0: summary_parts.append('%d of %d skipped' % (num_skipped, num_tests)) summary = '; '.join(summary_parts) printer.writeln_name('%s' % (summary)) if failed_tests: longest_python_exe = max([len(path.basename(p)) for p in options.interpreters]) for python_exe, filename, result in failed_tests: if len(options.interpreters) > 1: python_exe_blurb = path.basename(python_exe).rjust(longest_python_exe) else: python_exe_blurb = '' error_status = unit_test_output.error_status(result.output) for error in error_status.errors: printer.writeln_name('%5s: %s %s :%s.%s' % (error.error_type, python_exe_blurb, file_util.remove_head(filename, cwd), error.fixture, error.function)) if num_failed > 0: rv = 1 else: rv = 0 if args.timing: filenames = sorted(timings.keys()) num_filenames = len(filenames) for i, filename in zip(range(0, num_filenames), filenames): short_filename = file_util.remove_head(filename, cwd) all_timings = timings[filename] num_timings = len(all_timings) avg_ms = _timing_average(all_timings) * 1000.0 if num_timings > 1: run_blurb = '(average of %d runs)' % (num_timings) else: run_blurb = '' if num_filenames > 1: count_blurb = '[%s of %s] ' % (i + 1, num_filenames) else: count_blurb = '' printer.writeln_name('timing: %s%s - %2.2f ms %s' % (count_blurb, short_filename, avg_ms, run_blurb)) if total_elapsed_time >= 1000.0: printer.writeln_name('total time: %2.2f s' % (total_elapsed_time / 1000.0)) else: printer.writeln_name('total time: %2.2f ms' % (total_elapsed_time)) if args.page: subprocess.call([ args.pager, printer.OUTPUT.name ]) current_cwd = os.getcwd() if current_cwd != tmp_cwd: printer.writeln_name('SIDE EFFECT: working directory was changed from %s to %s' % (tmp_cwd, current_cwd)) droppings = file_find.find(current_cwd, relative = False, file_type = file_find.ANY) for dropping in droppings: printer.writeln_name('SIDE EFFECT: dropping found: %s' % (dropping)) if not droppings: os.chdir('/tmp') file_util.remove(tmp_cwd) return rv
def cleanup_python_compiled_files(self): root_dirs = [ config.root_dir for config in self._env_dependencies_configs ] pyc_files = file_finder.find_python_compiled_files(root_dirs) file_util.remove(pyc_files)
def test_update_egg_directory_empty_dir(self): tmp_dir = temp_file.make_temp_dir() file_util.remove(tmp_dir) setup_tools.update_egg_directory(tmp_dir) easy_install_dot_pth_path = path.join(tmp_dir, setup_tools.EASY_INSTALL_DOT_PTH_FILENAME) self.assertFalse( path.exists(easy_install_dot_pth_path) )