def test_wheel_timer(): task_data = Path(utils.project_root() / 'tasks' / 'part-0-mock' / 'wheels.desc').read_text() task = GridTask(Task.parse(task_data)) actionlist = ('QQDD' + 'F' + 'ZZZZZZZZZZZZZZZZZZZZ' + 'F' + 'ZZZZZZZZZZZZZZZZZZZZ' + 'ZZZZZZZZZ' + 'ZZZZZZZZZZZZZZZZZZZZ' + 'ZZZZZZZZZZZZZZZZZZZZ' + 'ZZZZZZZZZZ' + 'D') game = Game(task) for a in actionlist: game.apply_action(Action.simple(a)) solution = compose_actions(game.get_actions()) expected_score = game.finished() assert expected_score is None game = Game(task) for a in actionlist: game.apply_action(Action.simple(a)) game.apply_action(Action.WSAD('D')) solution = compose_actions(game.get_actions()) expected_score = game.finished() er = validate.run(task_data, solution) assert er.time is not None assert er.time == expected_score
def validate_replay(task_data, expected_score, actions): import logging logging.basicConfig( level=logging.INFO, format='%(levelname).1s %(module)10.10s:%(lineno)-4d %(message)s') solution_data = compose_actions(actions) sr = interface.SolverResult(data=solution_data, expected_score=expected_score) scent = f'manual ({getpass.getuser()})' logging.info('Checking with validator...') er = validate.run(task_data, sr.data) if er.time is None: result = solver_worker.Result(status='CHECK_FAIL', scent=scent, score=None, solution=solution_data, extra=dict( validator=er.extra, expected_score=sr.expected_score)) logging.info(result) else: result = solver_worker.Result(status='DONE', scent=scent, score=er.time, solution=solution_data, extra=dict( validator=er.extra, expected_score=sr.expected_score)) logging.info(f'Validator score: {er.time}') return result
def test_invalid_solution(): task = (utils.project_root() / 'tasks' / 'part-1-examples' / 'example-01.desc').read_text() sol = 'WSAD' result = run(task, sol) print(result) assert result.time is None
def test_insect(): s = (utils.project_root() / 'tasks' / 'part-0-mock' / 'prob-2003.desc').read_text() solver = InsectSolver([]) result = solver.solve(s) print(result) vr = validate.run(s, result.data) print(vr)
def test_example1(name, Solver): s = utils.get_problem_raw(1) result = Solver([]).solve(s) if isinstance(result.data, interface.Pass): pytest.skip(f'solver {Solver} passed') vres = validate.run(s, result.data) print(vres) assert vres.time == result.expected_score
def test_valid_solution(): task = (utils.project_root() / 'tasks' / 'part-1-examples' / 'example-01.desc').read_text() sol = (utils.project_root() / 'tasks' / 'part-1-examples' / 'example-01-1.sol').read_text() result = run(task, sol) print(result) assert result.time == 48
def main(): if len(sys.argv) < 2: print('Usage:') print( ' python -m production.solver_runner <solver> [<solver args>...]' ) print(f'where <solver> is one of {ALL_SOLVERS.keys()}') sys.exit(1) conn = db.get_conn() cur = conn.cursor() solver = ALL_SOLVERS[sys.argv[1]](sys.argv[2:]) logger.info(f'Solver scent: {solver.scent()!r}') cur.execute(''' SELECT id FROM tasks ''') problem_ids = [id for [id] in cur] logger.info(f'Problems to solve: {problem_ids}') for problem_id in problem_ids: logger.info('-' * 50) cur.execute('SELECT name, data, extra FROM tasks WHERE id = %s', [problem_id]) [problem_name, task_data, extra] = cur.fetchone() logger.info( f'Solving task/{problem_id} ({problem_name}, {extra["legend"]})...' ) task_data = zlib.decompress(task_data).decode() sr = solver.solve(task_data) if sr.extra: logging.info(f'extra = {sr.extra}') if isinstance(sr.data, interface.Pass): logging.info('Solver passed') elif isinstance(sr.data, interface.Fail): logging.warning('Solver failed') else: logging.info(f'Expected score = {sr.expected_score}, checking ...') res = validate.run(task_data, sr.data) assert res.time is not None, res if sr.expected_score is None: logging.info(f'Actual score = {res.time}') else: if sr.expected_score != res.time: logging.error( f'Actual score = {res.time}, the solver was wrong!!!' ) else: logging.info('Validation ok') logger.info('All done')
def main(): # s = utils.get_problem_raw(1) s = Path(utils.project_root() / 'tasks' / 'part-0-mock' / 'prob-2003.desc').read_text() solver = InsectSolver([]) result = solver.solve(s) logging.info(result) logging.info('validating...') vr = validate.run(s, result.data) logging.info(vr)
def test_greedy_with_extensions(): s = utils.get_problem_raw(2) task = Task.parse(s) expected_score, actions, extra = solve(task) assert extra['final_manipulators'] > 4 res = validate.run(s, compose_actions(actions)) print(res) assert res.time is not None assert res.time == expected_score
def run_one_bot_game(actions: List[Action]): task_data = Path(utils.project_root() / 'tasks' / 'part-0-mock' / 'prob-2003.desc').read_text() task = GridTask(Task.parse(task_data)) game = Game(task) for a in actions: game.apply_action(a) solution = compose_actions(game.get_actions()) expected_score = game.finished() er = validate.run(task_data, solution) assert er.time is not None assert er.time == expected_score
def run_cloned_game(actions: List[List[Action]]): task_data = Path(utils.project_root() / 'tasks' / 'part-0-mock' / 'prob-2003.desc').read_text() task = GridTask(Task.parse(task_data)) game = Game(task) indices = [0] * len(actions) current = 0 while not game.finished(): if current == 0: botcount = len(game.bots) i = indices[current] game.apply_action(actions[current][i], current) indices[current] += 1 current = (current + 1) % botcount solution = compose_actions(game.get_actions()) expected_score = game.finished() er = validate.run(task_data, solution) assert er.time is not None assert er.time == expected_score
def solve(solver: interface.Solver, task_data: str) -> Result: logging.info('Solving...') start = time.time() try: sr = solver.solve(task_data) except KeyboardInterrupt: raise except: exc = StringIO() traceback.print_exc(file=exc) sr = interface.SolverResult(data=interface.Fail(), expected_score=None, extra=dict(tb=exc.getvalue())) solver_time = time.time() - start logging.info(f'It took {solver_time}') if isinstance(sr.data, interface.Pass): logging.info(f'Solver passed: {sr.extra}') return Result(scent=solver.scent(), status='PASS', score=None, solution=None, extra=dict(solver=sr.extra, solver_time=solver_time)) elif isinstance(sr.data, interface.Fail): logging.info(f'Solver failed: {sr.extra}') return Result(scent=solver.scent(), status='FAIL', score=None, solution=None, extra=dict(solver=sr.extra, solver_time=solver_time)) elif isinstance(sr.data, str): logging.info('Checking with validator...') start = time.time() er = validate.run(task_data, sr.data) validator_time = time.time() - start logging.info(f'It took {validator_time}') if er.time is None: logging.info(f'Check failed: {er.extra}') return Result(scent=solver.scent(), status='CHECK_FAIL', score=None, solution=sr.data, extra=dict(solver=sr.extra, validator=er.extra, expected_score=sr.expected_score, solver_time=solver_time, validator_time=validator_time)) else: logging.info(f'Solution verified, score={er.time}') # TODO: warn if score != expected_score return Result(scent=solver.scent(), status='DONE', score=er.time, solution=sr.data, extra=dict(solver=sr.extra, validator=er.extra, expected_score=sr.expected_score, solver_time=solver_time, validator_time=validator_time)) else: assert False, sr.data