def run_test(inp, regex): parser = argparse.ArgumentParser(description='Run cmark tests.') parser.add_argument('--program', dest='program', nargs='?', default=None, help='program to test') parser.add_argument('--library-dir', dest='library_dir', nargs='?', default=None, help='directory containing dynamic library') args = parser.parse_args(sys.argv[1:]) cmark = CMark(prog=args.program, library_dir=args.library_dir) [rc, actual, err] = cmark.to_html(inp) if rc != 0: print('[ERRORED (return code %d)]' % rc) print(err) exit(1) elif regex.search(actual): print('[PASSED]') else: print('[FAILED (mismatch)]') print(repr(actual)) exit(1)
def converter(md): cmark = CMark(prog=args.program, library_dir=args.library_dir) [ec, result, err] = cmark.to_commonmark(md) if ec == 0: [ec, html, err] = cmark.to_html(result) if ec == 0: # In the commonmark writer we insert dummy HTML # comments between lists, and between lists and code # blocks. Strip these out, since the spec uses # two blank lines instead: return [ec, re.sub('<!-- end list -->\n', '', html), ''] else: return [ec, html, err] else: return [ec, result, err]
if __name__ == "__main__": if args.debug_normalization: out(normalize_html(sys.stdin.read())) exit(0) all_tests = get_tests(args.spec) if args.pattern: pattern_re = re.compile(args.pattern, re.IGNORECASE) else: pattern_re = re.compile('.') tests = [ test for test in all_tests if re.search(pattern_re, test['section']) and ( not args.number or test['example'] == args.number) ] if args.dump_tests: out(json.dumps(tests, ensure_ascii=False, indent=2)) exit(0) else: skipped = len(all_tests) - len(tests) converter = CMark(prog=args.program, library_dir=args.library_dir).to_html result_counts = {'pass': 0, 'fail': 0, 'error': 0, 'skip': skipped} for test in tests: do_test(converter, test, args.normalize, result_counts) out("{pass} passed, {fail} failed, {error} errored, {skip} skipped\n". format(**result_counts)) exit(result_counts['fail'] + result_counts['error'])
if re.search(pattern_re, test['section']): result = do_test(test, normalize) if result == 'pass': passed += 1 elif result == 'fail': failed += 1 else: errored += 1 else: skipped += 1 print "%d passed, %d failed, %d errored, %d skipped" % (passed, failed, errored, skipped) return (failed == 0 and errored == 0) if __name__ == "__main__": if args.debug_normalization: print normalize_html(sys.stdin.read()) exit(0) tests = get_tests(args.spec) if args.dump_tests: print json.dumps(tests, ensure_ascii=False, indent=2) exit(0) else: cmark = CMark(prog=args.program, library_dir=args.library_dir) if do_tests(cmark, tests, args.pattern, args.normalize): exit(0) else: exit(1)
start_line = line_number - 1 markdown_lines.append(line) elif state == 2: html_lines.append(line) elif state == 0 and re.match(header_re, line): headertext = header_re.sub('', line).strip() return tests if __name__ == "__main__": if args.debug_normalization: out(normalize_html(sys.stdin.read())) exit(0) all_tests = get_tests(args.spec) if args.pattern: pattern_re = re.compile(args.pattern, re.IGNORECASE) else: pattern_re = re.compile('.') tests = [ test for test in all_tests if re.search(pattern_re, test['section']) and (not args.number or test['example'] == args.number) ] if args.dump_tests: out(json.dumps(tests, indent=2)) exit(0) else: skipped = len(all_tests) - len(tests) converter = CMark(prog=args.program, library_dir=args.library_dir, extensions=args.extensions).to_html result_counts = {'pass': 0, 'fail': 0, 'error': 0, 'skip': skipped} for test in tests: do_test(converter, test, args.normalize, result_counts) out("{pass} passed, {fail} failed, {error} errored, {skip} skipped\n".format(**result_counts)) exit(result_counts['fail'] + result_counts['error'])