def main() -> None: path = download_wiki("en-latest-all-titles-in") ensure_dependencies() own_sort_exe = find_executable("sort", project_path()) if own_sort_exe is None: warn(f"executable 'sort' not found in {project_path()}") sys.exit(1) with TemporaryDirectory() as tempdir: temp_path = Path(tempdir) coreutils_sort = temp_path.joinpath(path.name + ".coreutils-sort") own_sort = temp_path.joinpath(path.name + ".own-sort") with subtest("Run coreutils sort with 128MB limit"): with open(path) as stdin, open(coreutils_sort, "w") as stdout: run_with_ulimit("sort", stdin, stdout) with subtest("Run own sort with 128MB limit"): with open(path) as stdin, open(own_sort, "w") as stdout: run_with_ulimit(own_sort_exe, stdin, stdout) with subtest("Check if both results matches"): run(["cmp", str(coreutils_sort), str(own_sort)])
def main() -> None: path = download_wiki("scowiki-latest-all-titles-in") ensure_dependencies() with TemporaryDirectory() as tempdir: temp_path = Path(tempdir) coreutils_sort = temp_path.joinpath(path.name + ".coreutils-sort") own_sort = temp_path.joinpath(path.name + ".own-sort") with subtest("Run coreutils sort..."): with open(path) as stdin, open(coreutils_sort, "w") as stdout: run( ["sort", "-r"], stdin=stdin, stdout=stdout, extra_env=dict(LC_ALL="C"), ) with subtest("Run own sort..."): with open(path) as stdin, open(own_sort, "w") as stdout: run_project_executable("sort", ["-r"], stdin=stdin, stdout=stdout) with subtest("Check if both results matches"): run(["cmp", str(coreutils_sort), str(own_sort)])
def main() -> None: # Run the test program test_mutual_exclusion = test_root().joinpath("test_mutual_exclusion") if not test_mutual_exclusion.exists(): run(["make", "-C", str(test_root()), str(test_mutual_exclusion)]) with subtest("Checking mutual exclusion"): run_project_executable(str(test_mutual_exclusion))
def run_with_ulimit(exe: str, stdin: IO[Any], stdout: IO[Any]) -> None: # size is in kilobytes size = 128 * 1024 run( [f"ulimit -v {quote(str(size))}; {quote(str(exe))}"], stdin=stdin, stdout=stdout, extra_env=dict(LC_ALL="C"), shell=True, )
def fuse_check_mnt(tmpdir: str, mnt_path: str) -> None: with open(f'{tmpdir}/stdout', 'w+') as stdout: run( ["mount"], stdout=stdout, ) with open(f'{tmpdir}/stdout') as stdin: try: run( ["grep", "memfs"], stdin=stdin, ) except Exception as e: fuse_unmount(mnt_path) sys.exit(1)
def test_g(self): p4 = P4() top = os.getcwd() andrew = testsupport.users["andrew"] fname = "test_g.txt" try: os.chdir(andrew["home"]) fout = open(fname, "w") for i in range(10): fout.write("line %d\n" % i) fout.close() p4.add(fname) argv = ["px", "-g", "opened", fname] output, error, retval = testsupport.run(argv) result = eval("".join(output)) self.failUnless(type(result) == types.DictType) opened = p4.opened(fname)[0] self.failUnless(result["change"] == opened["change"]) self.failUnless(result["depotFile"] == opened["depotFile"]) self.failUnless(result["type"] == opened["type"]) self.failUnless(int(result["rev"]) == int(opened["rev"])) # cleanup p4.revert(fname) finally: os.chdir(top)
def test_backout_files_already_open(self): p4 = P4() top = os.getcwd() andrew = testsupport.users['andrew'] fname = 'test_backout_files_already_open.txt' try: os.chdir(andrew['home']) fout = open(fname, 'w') for i in range(10): fout.write('line %d\n' % i) fout.close() p4.add(fname) result = p4.submit(fname, 'first checkin of this file') self.failUnless(result['action'] == 'submitted') cnum = result['change'] p4.edit(fname) argv = ['px', 'backout', str(cnum)] output, error, retval = testsupport.run(argv) self.failUnless(retval, "This call should have failed but did "\ "not: argv=%s" % argv) # Cleanup p4.revert(fname) finally: os.chdir(top)
def test_added_binary_file(self): p4 = P4() top = os.getcwd() andrew = testsupport.users['andrew'] fname = 'test_added_binary_file.txt' try: os.chdir(andrew['home']) # Make the first revision. fout = open(fname, 'w') for i in range(10): fout.write('line %d\n' % i) fout.close() p4.add(fname, filetype='binary') change = p4.submit(fname, "first submission")['change'] # Generate a patch for the last change. argv = ['px', 'genpatch', str(change)] output, error, retval = testsupport.run(argv) self.failUnless(error[0].startswith('warn:'), "Did not get expected warning for not being "\ "able to inline the adde dbinary file.") self.failIf(retval) patch = ''.join(output) # Sync back to before the change, apply the patch, and see # if the results are the same as the actual change. os.chmod(fname, 0777) os.remove(fname) self._applyPatch(patch) self.failIf(os.path.exists(fname), "Did not expect patch to re-create the binary "\ "file: %r" % fname) finally: os.chdir(top)
def test_added_file_pending(self): p4 = P4() top = os.getcwd() andrew = testsupport.users['andrew'] fname = 'test_added_file_pending.txt' try: os.chdir(andrew['home']) # Make the first revision. fout = open(fname, 'w') for i in range(10): fout.write('line %d\n' % i) fout.close() p4.add(fname) # Generate a patch for the default pending changelist. argv = ['px', 'genpatch'] output, error, retval = testsupport.run(argv) self.failIf(error, "Unexpected error output: %r" % error) self.failIf(retval) patch = ''.join(output) # Sync back to before the change, apply the patch, and see # if the results are the same as the actual change. before = open(fname, 'r').read() p4.revert(fname) os.chmod(fname, 0777) os.remove(fname) self._applyPatch(patch) after = open(fname, 'r').read() self.failUnless(before == after, "Applying the generated patch did not work.") finally: os.chdir(top)
def test_PATHEXT_failure(self): os.environ["PATH"] += os.pathsep + self.tmpdir output, error, retval = testsupport.run(self.which + ' whichtestapp3') self.failUnless( retval == 1, "'which ...' should have returned 1: retval=%d" % retval)
def test_opt_help(self): output, error, retval = testsupport.run(self.which + ' --help') token = 'Usage:' self.failUnless(output.find(token) != -1, "'%s' was not found in 'which --help' output: '%s' "\ % (token, output)) self.failUnless(retval == 0, "'which --help' did not return 0: retval=%d" % retval)
def test_opt_help(self): output, error, retval = testsupport.run(self.which+' --help') token = 'Usage:'.encode('ascii') self.failUnless(output.find(token) != -1, "'%s' was not found in 'which --help' output: '%s' "\ % (token, output)) self.failUnless(retval == 0, "'which --help' did not return 0: retval=%d" % retval)
def test_version(self): argv = ["px", "--version"] output, error, retval = testsupport.run(argv) landmarkRe = re.compile("px \d\.\d\.\d") match = landmarkRe.search(output[0]) self.failUnless( match, "Could not find '%s' in first output line " "of %s: %r" % (landmarkRe.pattern, argv, output) )
def test_exts(self): os.environ["PATH"] += os.pathsep + self.tmpdir output, error, retval = testsupport.run(self.which+' -e .wta whichtestapp3') expectedOutput = os.path.join(self.tmpdir, "whichtestapp3") self.failUnless(self._match(output.strip(), expectedOutput), "Output, %r, and expected output, %r, do not match."\ % (output.strip(), expectedOutput)) self.failUnless(retval == 0, "'which ...' should have returned 0: retval=%d" % retval)
def test_help(self): argv = ["px", "--help"] output, error, retval = testsupport.run(argv) landmark = "px Options:" for line in output: if line.strip().startswith(landmark): break else: self.fail("No '%s' line in output of %s." % (landmark, argv))
def test_exts(self): os.environ["PATH"] += os.pathsep + self.tmpdir output, error, retval = testsupport.run(self.which + ' -e .wta whichtestapp3') expectedOutput = os.path.join(self.tmpdir, "whichtestapp3") self.failUnless(self._match(output.strip(), expectedOutput), "Output, %r, and expected output, %r, do not match."\ % (output.strip(), expectedOutput)) self.failUnless( retval == 0, "'which ...' should have returned 0: retval=%d" % retval)
def test_opt_version(self): output, error, retval = testsupport.run(self.which+' --version') versionRe = re.compile("^which \d+\.\d+\.\d+$".encode('ascii')) versionMatch = versionRe.search(output.strip()) self.failUnless(versionMatch, "Version, '%s', from 'which --version' does not "\ "match pattern, '%s'."\ % (output.strip(), versionRe.pattern)) self.failUnless(retval == 0, "'which --version' did not return 0: retval=%d"\ % retval)
def test_opt_version(self): output, error, retval = testsupport.run(self.which + ' --version') versionRe = re.compile("^which \d+\.\d+\.\d+$") versionMatch = versionRe.search(output.strip()) self.failUnless(versionMatch, "Version, '%s', from 'which --version' does not "\ "match pattern, '%s'."\ % (output.strip(), versionRe.pattern)) self.failUnless(retval == 0, "'which --version' did not return 0: retval=%d"\ % retval)
def test_same_as_p4_1(self): p4 = P4() top = os.getcwd() andrew = testsupport.users['andrew'] fname = 'test_same_as_p4_1.txt' try: os.chdir(andrew['home']) # Make first version of a file. fout = open(fname, 'w') for i in range(10): fout.write('line %d\n' % i) fout.close() p4.add(fname) p4.submit(fname, "for test_same_as_p4_1") # Make an edit to be able to compare the diffs. p4.edit(fname) fout = open(fname, 'a') fout.write("another line\n") fout.close() pxArgv = ['px', 'diff', '-du', './...'] pxOutput, pxError, pxRetval = testsupport.run(pxArgv) p4Argv = ['p4', 'diff', '-du', './...'] p4Output, p4Error, p4Retval = testsupport.run(p4Argv) self.failUnless(pxOutput == p4Output, "Output from running %s was not the same as %s. This %r "\ "vs this %r." % (pxArgv, p4Argv, pxOutput, p4Output)) self.failUnless(pxError == p4Error, "Error from running %s was not the same as %s. This %r "\ "vs this %r." % (pxArgv, p4Argv, pxError, p4Error)) self.failUnless(pxRetval == p4Retval, "Retval from running %s was not the same as %s. This %r "\ "vs this %r." % (pxArgv, p4Argv, pxRetval, p4Retval)) # cleanup p4.revert(fname) finally: os.chdir(top)
def main() -> None: with tempfile.TemporaryDirectory() as tmpdir: temp_path = Path(tmpdir) mnt_path = fuse_mount(temp_path, "memfs_mnt") fuse_check_mnt(tmpdir, mnt_path) with subtest(""): run([os.path.join("tests", "file_io.sh"), "-o", os.path.join(mnt_path, "foo"), "hello"]) proc = run([os.path.join("tests", "file_io.sh"), "-d", os.path.join(mnt_path, "foo")], stdout=subprocess.PIPE) try: if proc.stdout != "hello\n": fuse_unmount(mnt_path) print("Written and read data don't match") sys.exit(1) except Exception as e: fuse_unmount(mnt_path) sys.exit(1) fuse_unmount(mnt_path) sys.exit(0)
def test_two_successes(self): os.environ["PATH"] += os.pathsep + self.tmpdir apps = ['whichtestapp1', 'whichtestapp2'] output, error, retval = testsupport.run( self.which + ' -q ' + ' '.join(apps)) lines = output.strip().split("\n".encode('ascii')) for app, line in zip(apps, lines): expected = os.path.join(self.tmpdir, app) self.failUnless(self._match(line, expected), "Output, %r, and expected output, %r, do not match."\ % (line, expected)) self.failUnless(retval == 0, "'which ...' should have returned 0: retval=%d" % retval)
def test_two_successes(self): os.environ["PATH"] += os.pathsep + self.tmpdir apps = ['whichtestapp1', 'whichtestapp2'] output, error, retval = testsupport.run(self.which + ' -q ' + ' '.join(apps)) lines = output.strip().split("\n") for app, line in zip(apps, lines): expected = os.path.join(self.tmpdir, app) self.failUnless(self._match(line, expected), "Output, %r, and expected output, %r, do not match."\ % (line, expected)) self.failUnless( retval == 0, "'which ...' should have returned 0: retval=%d" % retval)
def test_no_newline_at_end_of_file(self): p4 = P4() top = os.getcwd() andrew = testsupport.users['andrew'] fname = 'test_no_newline_at_end_of_file.txt' try: os.chdir(andrew['home']) # Make the first revision. fout = open(fname, 'w') for i in range(10): fout.write('line %d\n' % i) fout.close() p4.add(fname) p4.submit(fname, "first submission") # Make a second revision (with *no* trailing newline) p4.edit(fname) fout = open(fname, 'a') fout.write("another line") fout.close() change = p4.submit(fname, "add another line")['change'] # Generate a patch for the last change. argv = ['px', 'genpatch', str(change)] output, error, retval = testsupport.run(argv) self.failIf(error) self.failIf(retval) patch = ''.join(output) self.failUnless(patch.find("No newline at end of file") != -1) # Sync back to before the change, apply the patch, and see # if the results are the same as the actual change. before = open(fname, 'r').read() p4.sync(fname+"#1") self._applyPatch(patch) after = open(fname, 'r').read() if after.endswith('\n'): # This is sort of cheating the test, but 'patch' ignores # the correctly placed '/ No newline at end of file' # that both "diff" and "px genpatch" produce. What are # you gonna do? after = after[:-1] self.failUnless(before == after, "Applying the generated patch did not work.") finally: os.chdir(top)
def test_pending(self): p4 = P4() top = os.getcwd() andrew = testsupport.users['andrew'] fname = 'test_pending.txt' try: os.chdir(andrew['home']) # Make the first revision. fout = open(fname, 'w') for i in range(10): fout.write('line %d\n' % i) fout.close() p4.add(fname) p4.submit(fname, "first submission") # Make a pending change with edits. p4.edit(fname) fout = open(fname, 'a') fout.write("another line\n") fout.close() change = p4.change(fname, "my pending edits")['change'] # Generate a patch for the default pending change. argv = ['px', 'genpatch', str(change)] output, error, retval = testsupport.run(argv) self.failIf(error) self.failIf(retval) patch = ''.join(output) # Sync back to before the change, apply the patch, and see # if the results are the same as the actual change. before = open(fname, 'r').read() p4.revert(fname) p4.sync(fname+"#1") self._applyPatch(patch) after = open(fname, 'r').read() self.failUnless(before == after, "Applying the generated patch did not work.") finally: os.chdir(top)
def test_empty_added_file(self): p4 = P4() top = os.getcwd() andrew = testsupport.users['andrew'] fname = 'test_empty_added_file.txt' try: os.chdir(andrew['home']) # "Add" a file that does not exist. p4.add(fname) # Generate a patch for the default pending change. argv = ['px', 'genpatch', 'default'] output, error, retval = testsupport.run(argv) self.failIf(error, "This command, %r, failed with this error "\ "output: %s" % (argv, error)) self.failIf(retval) patch = ''.join(output) finally: p4.revert(fname) os.chdir(top)
def test_backout_add(self): p4 = P4() top = os.getcwd() andrew = testsupport.users['andrew'] fname = 'test_backout_add.txt' try: os.chdir(andrew['home']) fout = open(fname, 'w') for i in range(10): fout.write('line %d\n' % i) fout.close() p4.add(fname) result = p4.submit(fname, 'first checkin of this file') self.failUnless(result['action'] == 'submitted') cnum = result['change'] argv = ['px', 'backout', str(cnum)] output, error, retval = testsupport.run(argv) lineRe = re.compile('^Change (\d+) created to backout change '\ '(\d+)\.$') match = lineRe.match(output[0]) self.failUnless(match, "Unknown 'px backout' first output "\ "line: '%s'." % output[0]) self.failUnless(int(match.group(2)) == cnum) pendingCnum = int(match.group(1)) c = p4.change(change=pendingCnum) self.failUnless(c['description'].startswith("Backout change")) self.failUnless(len(c['files']) == 1) self.failUnless(c['files'][0]['action'] == 'delete') self.failUnless(c['files'][0]['depotFile'] == p4.where(fname)[0]['depotFile']) # cleanup p4.change([], change=pendingCnum) p4.change(change=pendingCnum, delete=1) p4.revert(fname) finally: os.chdir(top)
def _testOneInputFile(self, fname): import preprocess DEBUG = False # Set to true to dump status info for each test run. # Determine input options to use, if any. optsfile = os.path.join('inputs', fname+'.opts') # input options opts = [] if os.path.exists(optsfile): for line in open(optsfile, 'r').readlines(): if line[-1] == "\n": line = line[:-1] opts.append(line.strip()) #print "options from '%s': %s" % (optsfile, pprint.pformat(opts)) # Preprocess. infile = os.path.join('inputs', fname) # input outfile = os.path.join('tmp', fname) # actual output preprocess_py = join(dirname(dirname(abspath(__file__))), "lib", "preprocess.py") argv = [sys.executable, preprocess_py] + opts + ["-o", outfile, infile] dummy, err, retval = testsupport.run(argv) try: out = open(outfile, 'r').read() except IOError, ex: self.fail("unexpected error running '%s': '%s' was not generated:\n" "\t%s" % (' '.join(argv), outfile, err))
def test_list_new_files(self): p4 = P4() top = os.getcwd() andrew = testsupport.users['andrew'] fname = 'test_list_new_files.txt' try: os.chdir(andrew['home']) fout = open(fname, 'w') for i in range(10): fout.write('line %d\n' % i) fout.close() p4.add(fname) argv = ['px', 'diff', '-sn', './...'] output, error, retval = testsupport.run(argv) files = [f[:-1] for f in output] # drop newlines self.failUnless(os.path.abspath(fname) in files) self.failIf(error) self.failIf(retval) # cleanup p4.revert(fname) finally: os.chdir(top)
def test_no_args(self): output, error, retval = testsupport.run(self.which) self.failUnless(retval == -1, "'which' with no args should return -1: retval=%d"\ % retval)
def test_one_failure(self): output, error, retval = testsupport.run( self.which+' whichtestapp1') self.failUnless(retval == 1, "One failure did not return 1: retval=%d" % retval)
def _testOneInputFile(self, fname): import preprocess DEBUG = False # Set to true to dump status info for each test run. # Determine input options to use, if any. optsfile = os.path.join('inputs', fname + '.opts') # input options opts = [] if os.path.exists(optsfile): for line in open(optsfile, 'r').readlines(): if line[-1] == "\n": line = line[:-1] opts.append(line.strip()) #print "options from '%s': %s" % (optsfile, pprint.pformat(opts)) # Preprocess. infile = os.path.join('inputs', fname) # input outfile = os.path.join('tmp', fname) # actual output preprocess_py = join(dirname(dirname(abspath(__file__))), "lib", "preprocess.py") argv = [sys.executable, preprocess_py] + opts + ["-o", outfile, infile] dummy, err, retval = testsupport.run(argv) try: out = open(outfile, 'r').read() except IOError as ex: self.fail("unexpected error running '%s': '%s' was not generated:\n" "\t%s" % (' '.join(argv), outfile, err)) if DEBUG: print() print("*" * 50, "cmd") print(' '.join(argv)) print("*" * 50, "stdout") print(out) print("*" * 50, "stderr") print(err) print("*" * 50, "retval") print(str(retval)) print("*" * 50) # Compare results with the expected. expoutfile = os.path.join('outputs', fname) # expected stdout output experrfile = os.path.join('outputs', fname + '.err') # expected error output if os.path.exists(expoutfile): expout = open(expoutfile, 'r').read() #print "expected stdout output: %r" % expout if not sys.platform.startswith("win"): expout = expout.replace('\\', '/') # use Un*x paths if expout != out: diff = list(difflib.ndiff(expout.splitlines(1), out.splitlines(1))) self.fail("%r != %r:\n%s"\ % (expoutfile, outfile, pprint.pformat(diff))) if os.path.exists(experrfile): experr = open(experrfile, 'r').read() #print "expected stderr output: %r" % experr massaged_experr = experr.replace("inputs/", "inputs" + os.sep) diff = list( difflib.ndiff(massaged_experr.strip().splitlines(1), err.strip().splitlines(1))) self.assertEqual(massaged_experr.strip(), err.strip(), "<expected error> != <actual error>:\n%s"\ % pprint.pformat(diff)) elif err: self.fail("there was error output when processing '%s', but no "\ "expected stderr output file, '%s'" % (infile, experrfile)) # Ensure next test file gets a clean preprocess. del sys.modules['preprocess']
def fuse_unmount(mnt_path: str) -> "subprocess.CompletedProcess[Text]": if os.path.isdir(mnt_path): if os.path.ismount(mnt_path): run(["fusermount", "-u", str(mnt_path)])
def test_PATHEXT_failure(self): os.environ["PATH"] += os.pathsep + self.tmpdir output, error, retval = testsupport.run(self.which+' whichtestapp3') self.failUnless(retval == 1, "'which ...' should have returned 1: retval=%d" % retval)
def main() -> None: # Run the test program test_lock_hashmap = test_root().joinpath("lockfree_hashmap") if not test_lock_hashmap.exists(): run(["make", "-C", str(test_root()), str(test_lock_hashmap)]) times = [] with tempfile.TemporaryDirectory() as tmpdir: with subtest("Checking correctness"): with open(f"{tmpdir}/stdout", "w+") as stdout: run_project_executable( str(test_lock_hashmap), args=[ "-d20000", "-i10000", "-n4", "-r10000", "-u100", "-b1" ], stdout=stdout, ) output = open(f"{tmpdir}/stdout").readlines() sanity_check(output[1:], 1, 10000, 4) with subtest("Checking 1 thread time"): with open(f"{tmpdir}/stdout", "w+") as stdout: runtime = 0.0 for i in range(0, 3): run_project_executable( str(test_lock_hashmap), args=[ "-d20000", "-i10000", "-n1", "-r10000", "-u10", "-b1" ], stdout=stdout, ) output = open(f"{tmpdir}/stdout").readlines() runtime += float(output[0].strip()) sanity_check(output[1:], 1, 10000, 1) times.append(runtime / 3) with subtest("Checking 2 thread time"): with open(f"{tmpdir}/stdout", "w+") as stdout: runtime = 0.0 for i in range(0, 3): run_project_executable( str(test_lock_hashmap), args=[ "-d20000", "-i10000", "-n2", "-r10000", "-u10", "-b1" ], stdout=stdout, ) output = open(f"{tmpdir}/stdout").readlines() runtime += float(output[0].strip()) sanity_check(output[1:], 1, 10000, 2) times.append(runtime / 3) with subtest("Checking 4 thread time"): with open(f"{tmpdir}/stdout", "w+") as stdout: runtime = 0.0 for i in range(0, 3): run_project_executable( str(test_lock_hashmap), args=[ "-d20000", "-i10000", "-n4", "-r10000", "-u10", "-b1" ], stdout=stdout, ) output = open(f"{tmpdir}/stdout").readlines() runtime += float(output[0].strip()) sanity_check(output[1:], 1, 10000, 4) times.append(runtime / 3) f1 = times[0] / times[1] f2 = times[1] / times[2] if f1 < 1.4 or f2 < 1.4: warn("Hashmap is not scaling properly: " + str(times)) exit(1) with subtest("Checking if hashmap cleans up items when removing"): test_cleanup_lockfree = test_root().joinpath( "test_cleanup_lockfree") if not test_cleanup_lockfree.exists(): run([ "make", "-C", str(test_root()), str(test_cleanup_lockfree) ]) with open(f"{tmpdir}/stdout", "w+") as stdout: run_project_executable(str(test_cleanup_lockfree), stdout=stdout) stdout.seek(0) lines = stdout.readlines() first = float(lines[0]) second = float(lines[1]) if second / first > 1.5: warn( f"Hashmap does not cleanup properly when removing items: {first}, {second}" ) exit(1)
def test_two_failures(self): output, error, retval = testsupport.run( self.which+' whichtestapp1 whichtestapp2') self.failUnless(retval == 2, "Two failures did not return 2: retval=%d" % retval)
def test_one_failure(self): output, error, retval = testsupport.run(self.which + ' whichtestapp1') self.failUnless(retval == 1, "One failure did not return 1: retval=%d" % retval)
def test_backout_addeditdelete(self): p4 = P4() top = os.getcwd() andrew = testsupport.users['andrew'] fnameAdd = 'test_backout_addeditdelete_add.txt' fnameEdit = 'test_backout_addeditdelete_edit.txt' fnameDelete = 'test_backout_addeditdelete_delete.txt' try: os.chdir(andrew['home']) # Make this change: # ... //depot/foo#1 add # ... //depot/bar#3 delete # ... //depot/ola#4 edit fout = open(fnameEdit, 'w') for i in range(10): fout.write('line %d\n' % i) fout.close() fout = open(fnameDelete, 'w') fout.write("Hello from the add file.") fout.close() p4.add([fnameEdit, fnameDelete]) p4.submit([fnameEdit, fnameDelete], 'setup for test_backout_addeditdelete') p4.delete(fnameDelete) p4.edit(fnameEdit) contents = open(fnameEdit, 'r').readlines() contents[0] = contents[0][:-1] + " (hello again)\n" fout = open(fnameEdit, 'w') fout.write(''.join(contents)) fout.close() fout = open(fnameAdd, 'w') fout.write("Hello from the add file.") fout.close() p4.add(fnameAdd) result = p4.submit([fnameAdd, fnameEdit, fnameDelete], 'test submission to backout') cnum = result['change'] argv = ['px', 'backout', str(cnum)] output, error, retval = testsupport.run(argv) lineRe = re.compile('^Change (\d+) created to backout change '\ '(\d+)\.$') match = lineRe.match(output[0]) self.failUnless(match, "Unknown 'px backout' first output "\ "line: '%s'." % output[0]) self.failUnless(int(match.group(2)) == cnum) pendingCnum = int(match.group(1)) c = p4.change(change=pendingCnum) self.failUnless(c['description'].startswith("Backout change")) self.failUnless(len(c['files']) == 3) for file in c['files']: localFile = p4.where(file['depotFile'])[0]['localFile'] basename = os.path.basename(localFile) if basename == fnameAdd: self.failUnless(file['action'] == 'delete') if basename == fnameDelete: self.failUnless(file['action'] == 'add') if basename == fnameEdit: self.failUnless(file['action'] == 'edit') # cleanup p4.change([], change=pendingCnum) p4.change(change=pendingCnum, delete=1) p4.revert([fnameAdd, fnameDelete, fnameEdit]) finally: os.chdir(top)
def test_two_failures(self): output, error, retval = testsupport.run(self.which + ' whichtestapp1 whichtestapp2') self.failUnless(retval == 2, "Two failures did not return 2: retval=%d" % retval)
def main() -> None: # Run the test program lib = ensure_library("liblockhashmap.so") extra_env = {"LD_LIBRARY_PATH": str(os.path.dirname(lib))} test_lock_hashmap = test_root().joinpath("lock_hashmap") if not test_lock_hashmap.exists(): run(["make", "-C", str(test_root()), str(test_lock_hashmap)]) times = [] with tempfile.TemporaryDirectory() as tmpdir: with open(f"{tmpdir}/stdout", "w+") as stdout: run_project_executable( str(test_lock_hashmap), args=["-d20000", "-i10000", "-n4", "-r10000", "-u100", "-b1"], stdout=stdout, extra_env=extra_env, ) output = open(f"{tmpdir}/stdout").readlines() sanity_check(output[1:], 1, 10000, 4) with subtest("Checking 1 thread time"): with open(f"{tmpdir}/stdout", "w+") as stdout: runtime = 0.0 for i in range(0, 3): run_project_executable( str(test_lock_hashmap), args=[ "-d2000000", "-i100000", "-n1", "-r100000", "-u10" ], stdout=stdout, extra_env=extra_env, ) output = open(f"{tmpdir}/stdout").readlines() runtime += float(output[0].strip()) sanity_check(output[1:], 512, 100000, 1) times.append(runtime / 3) with subtest("Checking 2 thread time"): with open(f"{tmpdir}/stdout", "w+") as stdout: runtime = 0.0 for i in range(0, 3): run_project_executable( str(test_lock_hashmap), args=[ "-d2000000", "-i100000", "-n2", "-r100000", "-u10" ], stdout=stdout, extra_env=extra_env, ) output = open(f"{tmpdir}/stdout").readlines() runtime += float(output[0].strip()) sanity_check(output[1:], 512, 100000, 2) times.append(runtime / 3) with subtest("Checking 4 thread time"): with open(f"{tmpdir}/stdout", "w+") as stdout: runtime = 0.0 for i in range(0, 3): run_project_executable( str(test_lock_hashmap), args=[ "-d2000000", "-i100000", "-n4", "-r100000", "-u10" ], stdout=stdout, extra_env=extra_env, ) output = open(f"{tmpdir}/stdout").readlines() runtime += float(output[0].strip()) sanity_check(output[1:], 512, 100000, 4) times.append(runtime / 3) f1 = times[0] / times[1] f2 = times[1] / times[2] if f1 < 1.4 or f2 < 1.4: warn("Hashmap is not scaling properly: " + str(times)) exit(1)