def main(): parser = argparse.ArgumentParser(description='_Functional_ tests runner. ' 'For _unit_ tests -- run with nosetests3') parser.add_argument('--nonstop', action='store_true', required=False, default=False, help="don't stop on error") args = parser.parse_args(sys.argv[1:]) print(args) all_passed = True for bench_size in realizable + unrealizable + unknown: result = run_benchmark('elli.py', bench_size[0], { 100: REALIZABLE_RC, 10: UNREALIZABLE_RC, 1: UNKNOWN_RC }[(bench_size in realizable) * 100 + (bench_size in unrealizable) * 10 + (bench_size in unknown)], bench_size[1]) all_passed &= result if not args.nonstop and result is False: exit(1) print('-' * 80) print(['SOME TESTS FAILED', 'ALL TESTS PASSED'][all_passed])
def main(): parser = argparse.ArgumentParser( description='_Functional_ tests runner for star.py ' 'For _unit_ tests -- run with nosetests3') parser.add_argument('--nonstop', action='store_true', required=False, default=False, help="don't stop on error") args = parser.parse_args(sys.argv[1:]) print(args) all_passed = True for benchmark in realizable + unknown: # TODO: should be separated into two separate loops, # for realizable benchmarks: set size_expected # for unknown: use None print('testing ' + benchmark) result = run_benchmark('star.py', benchmark, _get_status(benchmark), None) all_passed &= result if not args.nonstop and result is False: exit(1) print('-' * 80) print(['SOME TESTS FAILED', 'ALL TESTS PASSED'][all_passed])
def main(): parser = argparse.ArgumentParser(description='_Functional_ tests runner. ' 'For _unit_ tests -- run with nosetests') parser.add_argument('--nonstop', action='store_true', required=False, default=False, help="don't stop on error") args = parser.parse_args(sys.argv[1:]) print(args) all_passed = True for benchmark in realizable + unrealizable + unknown: result = run_benchmark('src/elli.py', benchmark, _get_status(benchmark)) all_passed &= result if not args.nonstop and result is False: exit(1) print('-' * 80) print(['SOME TESTS FAILED', 'ALL TESTS PASSED'][all_passed])
"non_parameterized/others/full_arbiter2.ltl --moore --bound 3 --incr", "non_parameterized/others/pnueli_arbiter2.ltl --moore --bound 2 --incr", ] if __name__ == '__main__': parser = argparse.ArgumentParser( description='Parametrized Synthesis Tool for token rings architecture') parser.add_argument('--nonstop', action='store_true', required=False, default=False, help="don't stop on error") args = parser.parse_args(sys.argv[1:]) print(args) realizable = _REALIZABLE unrealizable = _UNREALIZABLE all_passed = True for benchmark in realizable + unrealizable: result = run_benchmark('src/bosy.py', benchmark, benchmark in realizable) all_passed &= result if not args.nonstop and result is False: exit(1) print('-' * 80) print(['SOME TESTS FAILED', 'ALL TESTS PASSED'][all_passed])
# random subset for --incr "non_parameterized/others/count2.ltl --moore --bound 2 --incr", "non_parameterized/others/full_arbiter2.ltl --moore --bound 3 --incr", "non_parameterized/others/pnueli_arbiter2.ltl --moore --bound 2 --incr", ] if __name__ == '__main__': parser = argparse.ArgumentParser(description='Parametrized Synthesis Tool for token rings architecture') parser.add_argument('--nonstop', action='store_true', required=False, default=False, help="don't stop on error") args = parser.parse_args(sys.argv[1:]) print(args) realizable = _REALIZABLE unrealizable = _UNREALIZABLE all_passed = True for benchmark in realizable + unrealizable: result = run_benchmark('src/bosy.py', benchmark, benchmark in realizable) all_passed &= result if not args.nonstop and result is False: exit(1) print('-'*80) print(['SOME TESTS FAILED', 'ALL TESTS PASSED'][all_passed])