Exemple #1
0
def test_example1(name, Solver):
    s = utils.get_problem_raw(1)
    result = Solver([]).solve(s)
    if isinstance(result.data, interface.Pass):
        pytest.skip(f'solver {Solver} passed')
    vres = validate.run(s, result.data)
    print(vres)
    assert vres.time == result.expected_score
def main():
    problem = 11
    sol = GreedyBeamSolver([]).solve(utils.get_problem_raw(problem))
    print(sol.extra)
    sol = sol.data
    print(len(sol), 'time units')
    sol_path = Path(utils.project_root() / 'outputs' / f'beam-{problem}.sol')
    sol_path.write_text(sol)
    print('result saved to', sol_path)
Exemple #3
0
def test_greedy_with_extensions():
    s = utils.get_problem_raw(2)
    task = Task.parse(s)
    expected_score, actions, extra = solve(task)

    assert extra['final_manipulators'] > 4

    res = validate.run(s, compose_actions(actions))
    print(res)
    assert res.time is not None
    assert res.time == expected_score
Exemple #4
0
if __name__ == '__main__':
    logging.basicConfig(
        level=logging.INFO,
        format='%(levelname).1s %(module)10.10s:%(lineno)-4d %(message)s')
logger = logging.getLogger(__name__)

import time
import cProfile
import subprocess

from production import utils
from production.data_formats import *

from production.solvers.rotator import RotatorSolver

if __name__ == '__main__':
    profile_path = utils.project_root() / 'outputs' / 'profile'
    callgraph_path = utils.project_root() / 'outputs' / 'callgraph.png'

    task = utils.get_problem_raw(100)
    solver = RotatorSolver([])

    start = time.time()
    cProfile.run('solver.solve(task)', profile_path)
    logging.info(f'it took {time.time() - start}s')

    subprocess.check_call(
        f'gprof2dot -f pstats -n 2 {profile_path} | dot -Tpng -o {callgraph_path}',
        shell=True)
    print(f'see {callgraph_path}')
Exemple #5
0
 def from_problem(n):
     s = utils.get_problem_raw(n)
     return GridTask(Task.parse(s))
Exemple #6
0
def test_parse_and_serialize_all_problems():
    for n in range(1, 300 + 1, 17):
        s = utils.get_problem_raw(n)
        t = Task.parse(s)
        assert s == str(t)
Exemple #7
0
def interactive(task_number):
    task_data = utils.get_problem_raw(task_number)
    use_db = task_number <= 999

    if use_db:
        conn = db.get_conn()
        cur = conn.cursor()
        cur.execute(
            '''
        SELECT id, data FROM tasks WHERE name = %s
        ''', [f'prob-{task_number:03d}'])
        [task_id, task_data_db] = cur.fetchone()
        task_data_db = zlib.decompress(task_data_db).decode()
        assert task_data_db == task_data

    task = GridTask(Task.parse(task_data))
    game = Game(task)
    score = None

    with contextlib.closing(Display(game)) as display:
        code, c = '', ''
        display.draw_initial(game)

        while not score:
            display.draw(game, f'lastchar = {code} {c!r}')

            code = display.stdscr.getch()

            action = None

            c = chr(code).upper()

            if c in '\x1B':
                break

            if c in Action.SIMPLE:
                action = Action.simple(c)

            # to perform complex action type it without spaces: B(1,-1)
            elif c in Action.PARAM:
                while c[-1] != ')':
                    code = display.stdscr.getch()
                    c = c + chr(code).upper()
                action = Action.parameterized(c)

            if display.current == 0:
                botcount = len(game.bots)
            if action:
                try:
                    game.apply_action(action, display.current)
                except InvalidActionException as exc:
                    display.draw_error(str(exc))
                else:
                    display.current = (display.current + 1) % botcount

            score = game.finished()

    if score is not None:
        print(f'Score: {score}')
        result = validate_replay(task_data, score, game.get_actions())
        print(result)
        if use_db:
            submit_replay(conn, task_id, result)
        else:
            mock_solutions = utils.project_root(
            ) / 'outputs' / 'mock_solutions'
            mock_solutions.mkdir(parents=True, exist_ok=True)
            with mock_solutions / f'prob-{task_number}.sol' as fin:
                fin.write_text(result.solution)