def test_log():
    with open(os.path.join(os.path.dirname(__file__), "log.txt")) as f:
        flag = False
        for row in f:
            if row[:5] == "train":
                file_kbn = list(row.split())
            elif row[:4] == "eval":
                file_kbn = list(row.split())
            elif row[:6] == "Failed":
                flag = False
            elif row[:6] == "Solved":
                flag = True
            # print(row, file_kbn)
            if flag and row[0] == "[":
                op_list = eval(row.strip())
                p = Runner(int(file_kbn[1]), file_kbn[0], verbose=True)
                for op in op_list:
                    p.run(op)
                ac, wa = p.eval_test(verbose=True)
                if wa == 0 and file_kbn[0] == "eval":
                    # print(file_kbn[1], op_list)
                    pass
                if wa > 0:
                    print(op_list)
                    print(ac, wa)
                assert p.eval_distance() == 0
def main(dir_path, time_limit):

    output_id_list = []
    output_list = []

    for i, file_name in enumerate(list(sorted(os.listdir(dir_path)))):

        if file_name[-5:] == ".json":
            p = Runner()
            with open(dir_path + file_name, "r") as file:
                data = json.load(file)
            p.initialize_from_data(data)

            try:
                p.auto_run(time_limit=time_limit)
                solved_dict = p.output()
                for j in solved_dict.keys():
                    assert len(solved_dict[j]) <= 3

                    output_id_list.append(f'{file_name[:-5]}_{j}')
                    output_list.append(" ".join(solved_dict[j]))
            except:
                for j in range(len(data["test"])):
                    output_id_list.append(f'{file_name[:-5]}_{j}')
                    output_list.append("")

    res_df = pd.DataFrame({"output_id": output_id_list, "output": output_list})
    # print(res_df)
    res_df.to_csv("submission.csv", index=False)
Exemplo n.º 3
0
def test_488():
    p = Runner(88, "eval", verbose=True)
    p.run("connect")
    p.run("count_hole")
    p.run("freq")
    p.run("sort_ascending")
    p.run("trim_background")
def main():
    runner = None

    try:
        config = Config.load()

        init_logging(config)

        runner = Runner()
        runner.open(config)
        runner.run()

        return 0

    except KeyboardInterrupt:
        # if runner is not None:
        #     runner.close()
        return 0

    except Exception as ex:
        _logger.exception(ex)
        # no runner.close() to signal abnomal termination!
        return 1

    finally:
        if runner is not None:
            runner.close()
Exemplo n.º 5
0
def test_58():
    p = Runner(58, verbose=True)
    p.run("mesh_split")
    p.run("n_cell")
    p.run("keep_max")
    p.run("max_color")
    p.run("paste_color_full")
def test_train_pipeline(fix_seed, config, gpus):
    config = OmegaConf.create(config)

    train_dataloader, test_dataloader = get_data_loaders(config=config)
    lr_logger = LearningRateLogger()
    model = build_model(model_conf=config.model)
    runner = Runner(model=model, config=config.runner)

    trainer = Trainer(
        distributed_backend=config.runner.trainer.distributed_backend,
        fast_dev_run=True,
        gpus=gpus,
        amp_level="O2",
        row_log_interval=10,
        callbacks=[lr_logger],
        max_epochs=1,
        weights_summary="top",
        reload_dataloaders_every_epoch=False,
        resume_from_checkpoint=None,
        benchmark=False,
        deterministic=True,
        num_sanity_val_steps=5,
        overfit_batches=0.0,
        precision=32,
        profiler=True,
    )

    trainer.fit(model=runner,
                train_dataloader=train_dataloader,
                val_dataloaders=test_dataloader)
Exemplo n.º 7
0
def run_local(kbn="training", time_limit=0.2, verbose=True):

    total_ac = 0
    total_wa = 0
    res_list = []
    p_list = []
    ind_list = []

    assert kbn == "training" or kbn == "evaluation"

    for ind in range(400):
        if kbn == "training":
            p = Runner(ind, "train", verbose=verbose)
        elif kbn == "evaluation":
            p = Runner(ind, "eval", verbose=verbose)
        else:
            raise ValueError
        p.auto_run(time_limit=time_limit)
        solved_dict = p.output()

        for j in solved_dict.keys():
            p_list.append(ind)
            ind_list.append(j)
            assert len(solved_dict[j]) <= 3
            answer_str = p.answer_list[j]
            if answer_str in solved_dict[j]:
                # print(f'AC: {i, j}')
                total_ac += 1
                res_list.append(1)
            else:
                # print(f'WA: {i, j}')
                total_wa += 1
                res_list.append(0)

    pct = 1 - total_ac / (total_ac + total_wa)
    print(f'{kbn} done, AC: {total_ac}, total: {total_ac + total_wa}, {pct}')
    res_arr = np.concatenate([
        np.array(res_list).reshape((-1, 1)),
        np.array(p_list).reshape((-1, 1)),
        np.array(ind_list).reshape((-1, 1))
    ],
                             axis=1)
    np.save(f'../local_eval_log/{kbn}-{time_limit}-{pct}', res_arr)
    return None
Exemplo n.º 8
0
def test_440():
    p = Runner(40, "eval", verbose=True)
    p.run("connect")
Exemplo n.º 9
0
def test_221():
    p = Runner(221, verbose=True)
Exemplo n.º 10
0
def test_261():
    p = Runner(261, verbose=True)
Exemplo n.º 11
0
def test_654():
    p = Runner(254, "eval", verbose=True)
    p.run("connect")
    p.run("auto_keep")
Exemplo n.º 12
0
def test_217():
    p = Runner(217, verbose=True)
Exemplo n.º 13
0
def test_555():
    p = Runner(155, "eval", verbose=True)
    p.run("trim_background")
Exemplo n.º 14
0
def test_578():
    p = Runner(178, "eval", verbose=True)
    p.run("trim_background")
Exemplo n.º 15
0
def test_586():
    p = Runner(186, "eval", verbose=True)
    p.run("trim_background")
Exemplo n.º 16
0
def test_64():
    p = Runner(64, verbose=True)
Exemplo n.º 17
0
def test_607():
    p = Runner(207, "eval", verbose=True)
    p.run("mesh_split")
    p.run("collect_max")
Exemplo n.º 18
0
def test_61():
    p = Runner(61, verbose=True)
Exemplo n.º 19
0
def test_756():
    p = Runner(356, "eval", verbose=True)
    p.run("color")
Exemplo n.º 20
0
def test_738():
    p = Runner(338, "eval", verbose=True)
    p.run("change_background")
    p.run("connect4")
    p.run("auto_paste_a")
Exemplo n.º 21
0
def test_719():
    p = Runner(319, "eval", verbose=True)
Exemplo n.º 22
0
from src.runner.runner import Runner
from src.adityaork.tree import predict_from_json

if __name__ == "__main__":
    p = Runner(14, file_list="train", verbose=True)
    s = predict_from_json(p.task_json)
    print(s)
    p = Runner(15, file_list="train", verbose=True)
    s = predict_from_json(p.task_json)
    print(s)
Exemplo n.º 23
0
def test_85():
    p = Runner(85, verbose=True)
Exemplo n.º 24
0
def test_551():
    p = Runner(151, "eval", verbose=True)
Exemplo n.º 25
0
def test_134():
    p = Runner(134, verbose=True)
    p.run("divide")
Exemplo n.º 26
0
def test_561():
    p = Runner(161, "eval", verbose=True)
    p.run("trim_background")
Exemplo n.º 27
0
def test_140():
    p = Runner(140, verbose=True)
Exemplo n.º 28
0
def test_124():
    p = Runner(124, verbose=True)
    p.run("change_background")
Exemplo n.º 29
0
def test_652():
    p = Runner(252, "eval", verbose=True)
    p.run("color")
    p.run("auto_keep")
    p.run("align")