op.add_option("-f", "--failures", action="store_true", default=False, help="show only failures") op.add_option("-m", "--missing", action="store_true", default=False, help="show only missing tests") op.add_option("-t", "--tolerance", action="store", default=1e-13, help="tolerance used to compare numbers") op.add_option("-s", "--sgtolerance", action="store", default=1e-5, help="shogun tolerance used to compare numbers in shogun objects") op.set_usage("[<file1> <file2> ...]") (opts, args)=op.parse_args() if opts.debug: cmp_method=compare_dbg else: cmp_method=compare tests = setup_tests(args) failed = tester(tests, cmp_method, opts) if failed: print("The following tests failed!") for f in failed: print("\t" + f[0]) if is_python2(): print("Detailed failures:") for f in failed: print("\t" + f[0]) got=get_split_string(f[1]) expected=get_split_string(f[2]) #print "=== EXPECTED ==========" #import pdb #pdb.set_trace()
op = OptionParser() op.add_option("-d", "--debug", action="store_true", default=False, help="detailed debug output of objects that don't match") op.add_option("-f", "--failures", action="store_true", default=False, help="show only failures") op.add_option("-m", "--missing", action="store_true", default=False, help="show only missing tests") op.add_option("-t", "--tolerance", action="store", default=None, help="tolerance used to estimate accuracy") op.set_usage("[<file1> <file2> ...]") (opts, args) = op.parse_args() if opts.debug: cmp_method = compare_dbg else: cmp_method = compare tests = setup_tests(args) tester(tests, cmp_method, opts.tolerance, opts.failures, opts.missing)