示例#1
0
文件: run.py 项目: inyukwo1/rat-sql
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('mode',
                        help="preprocess/train/eval",
                        choices=["preprocess", "train", "eval"])
    parser.add_argument('exp_config_file', help="jsonnet file for experiments")
    parser.add_argument('--model_config_args',
                        help="optional overrides for model config args")
    parser.add_argument('--logdir', help="optional override for logdir")
    parser.add_argument('--ray', action='store_true')
    args = parser.parse_args()

    exp_config = json.loads(_jsonnet.evaluate_file(args.exp_config_file))
    model_config_file = exp_config["model_config"]
    if "model_config_args" in exp_config:
        model_config_args = exp_config["model_config_args"]
        if args.model_config_args is not None:
            model_config_args_json = _jsonnet.evaluate_snippet(
                "", args.model_config_args)
            model_config_args.update(json.loads(model_config_args_json))
        model_config_args = json.dumps(model_config_args)
    elif args.model_config_args is not None:
        model_config_args = _jsonnet.evaluate_snippet("",
                                                      args.model_config_args)
    else:
        model_config_args = None

    logdir = args.logdir or exp_config["logdir"]

    if args.mode == "preprocess":
        preprocess_config = PreprocessConfig(model_config_file,
                                             model_config_args)
        preprocess.main(preprocess_config)
    elif args.mode == "train":
        mp.spawn(train_model,
                 nprocs=8,
                 args=(model_config_file, model_config_args, logdir, args.ray))

    elif args.mode == "eval":
        for step in exp_config["eval_steps"]:
            infer_output_path = f"{exp_config['eval_output']}/{exp_config['eval_name']}-step{step}.infer"
            infer_config = InferConfig(
                model_config_file,
                model_config_args,
                logdir,
                exp_config["eval_section"],
                exp_config["eval_beam_size"],
                infer_output_path,
                step,
                use_heuristic=exp_config["eval_use_heuristic"])
            infer.main(infer_config)

            eval_output_path = f"{exp_config['eval_output']}/{exp_config['eval_name']}-step{step}.eval"
            eval_config = EvalConfig(model_config_file, model_config_args,
                                     logdir, exp_config["eval_section"],
                                     infer_output_path, eval_output_path)
            eval.main(eval_config)

            res_json = json.load(open(eval_output_path))
            print(step, res_json['total_scores']['all']['exact'])
示例#2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('mode',
                        help="preprocess/train/eval",
                        choices=["preprocess", "train", "eval"])
    parser.add_argument('exp_config_file', help="jsonnet file for experiments")
    parser.add_argument('--model_config_args',
                        help="optional overrides for model config args")
    parser.add_argument('--logdir', help="optional override for logdir")
    parser.add_argument('--model_name',
                        help="optional override for model_name")
    parser.add_argument('--skip_infer', action='store_true', default=False)
    parser.add_argument('--skip_eval', action='store_true', default=False)
    parser.add_argument('--eval_name', help="optional override for eval name")
    args = parser.parse_args()

    exp_config = json.loads(_jsonnet.evaluate_file(args.exp_config_file))
    model_config_file = exp_config["model_config"]
    if "model_config_args" in exp_config:
        model_config_args = exp_config["model_config_args"]
        if args.model_config_args is not None:
            model_config_args_json = _jsonnet.evaluate_snippet(
                "", args.model_config_args)
            model_config_args.update(json.loads(model_config_args_json))
        model_config_args = json.dumps(model_config_args)
    elif args.model_config_args is not None:
        model_config_args = _jsonnet.evaluate_snippet("",
                                                      args.model_config_args)
    else:
        model_config_args = None

    logdir = args.logdir or exp_config["logdir"]
    if args.eval_name:
        exp_config["eval_name"] = args.eval_name

    if args.mode == "preprocess":
        preprocess_config = PreprocessConfig(model_config_file,
                                             model_config_args)
        preprocess.main(preprocess_config)
    elif args.mode == "train":
        train_config = TrainConfig(model_config_file, model_config_args,
                                   logdir)
        train.main(train_config)
    elif args.mode == "eval":
        print(exp_config["eval_name"])
        for step in exp_config["eval_steps"]:
            print(step)
            model = None
            for section in exp_config["eval_sections"]:
                infer_output_path = "{}/{}-step{}/{}.infer".format(
                    exp_config["eval_output"], exp_config["eval_name"], step,
                    section)
                infer_config = InferConfig(
                    model_config_file,
                    model_config_args,
                    logdir,
                    section,
                    exp_config["eval_beam_size"],
                    infer_output_path,
                    step,
                    args.model_name,
                    use_heuristic=exp_config["eval_use_heuristic"],
                    output_history=True)
                if not args.skip_infer:
                    model = infer.main(infer_config, model)
                if not args.skip_eval:
                    eval_output_path = "{}/{}-step{}/{}.eval".format(
                        exp_config["eval_output"], exp_config["eval_name"],
                        step, section)
                    eval_config = EvalConfig(model_config_file,
                                             model_config_args, logdir,
                                             section, infer_output_path,
                                             eval_output_path)
                    eval.main(eval_config)
                    print(section)
                    for infer_type in ['inferred_code']:
                        print(infer_type)
                        res_json = json.load(
                            open(
                                eval_output_path.replace(
                                    '.eval', '_{}.eval'.format(infer_type))))
                        print('exact match: %.4f exec acc: %.4f,' %
                              (res_json['total_scores']['all']['exact'],
                               res_json['total_scores']['all']['exec']))
示例#3
0
文件: run.py 项目: kmzjy110/rat-sql
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('mode',
                        help="preprocess/train/eval",
                        choices=["preprocess", "train", "eval", "finetune"])
    parser.add_argument('exp_config_file', help="jsonnet file for experiments")
    parser.add_argument('--model_config_args',
                        help="optional overrides for model config args")
    parser.add_argument('--logdir', help="optional override for logdir")
    args = parser.parse_args()

    exp_config = json.loads(_jsonnet.evaluate_file(args.exp_config_file))
    model_config_file = exp_config["model_config"]
    if "model_config_args" in exp_config:
        model_config_args = exp_config["model_config_args"]
        if args.model_config_args is not None:
            model_config_args_json = _jsonnet.evaluate_snippet(
                "", args.model_config_args)
            model_config_args.update(json.loads(model_config_args_json))
        model_config_args = json.dumps(model_config_args)
    elif args.model_config_args is not None:
        model_config_args = _jsonnet.evaluate_snippet("",
                                                      args.model_config_args)
    else:
        model_config_args = None

    logdir = args.logdir or exp_config["logdir"]

    if args.mode == "preprocess":
        preprocess_config = PreprocessConfig(model_config_file,
                                             model_config_args)
        preprocess.main(preprocess_config)
    elif args.mode == "train":
        train_config = TrainConfig(model_config_file, model_config_args,
                                   logdir)
        train.main(train_config)
    elif args.mode == "finetune":
        finetunedir = exp_config["finetunedir"]
        #TODO: FIX STEPS TO MAKE IT INTO CONFIG FILE
        step = exp_config['eval_steps'][-1]
        infer_output_path = f"{exp_config['finetunedir']}/infer/"
        fine_tune_config = FineTuneConfig(
            model_config_file,
            model_config_args,
            logdir,
            finetunedir,
            infer_output_path,
            exp_config["eval_beam_size"],
            use_heuristic=exp_config["eval_use_heuristic"])
        finetune.main(fine_tune_config)

    elif args.mode == "eval":
        for step in exp_config["eval_steps"]:
            infer_output_path = f"{exp_config['eval_output']}/{exp_config['eval_name']}-step{step}.infer"
            infer_config = InferConfig(
                model_config_file,
                model_config_args,
                logdir,
                exp_config["eval_section"],
                exp_config["eval_beam_size"],
                infer_output_path,
                step,
                use_heuristic=exp_config["eval_use_heuristic"])
            infer.main(infer_config)

            eval_output_path = f"{exp_config['eval_output']}/{exp_config['eval_name']}-step{step}.eval"
            eval_config = EvalConfig(model_config_file, model_config_args,
                                     logdir, exp_config["eval_section"],
                                     infer_output_path, eval_output_path)
            eval.main(eval_config)

            res_json = json.load(open(eval_output_path))
            print(step, res_json['total_scores']['all']['exact'])
示例#4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('mode', help="preprocess/train/eval", choices=["preprocess", "train", "eval"])
    parser.add_argument('exp_config_file', help="jsonnet file for experiments")
    parser.add_argument('--use_scheduler', help='whether to use lr_scheduler for training.')
    parser.add_argument('--model_config_args', help="optional overrides for model config args")
    parser.add_argument('--logdir', help="optional override for logdir")
    args = parser.parse_args()

    exp_config = json.loads(_jsonnet.evaluate_file(args.exp_config_file))
    model_config_file = exp_config["model_config"]
    if "model_config_args" in exp_config:
        model_config_args = exp_config["model_config_args"]
        if args.model_config_args is not None:
            model_config_args_json = _jsonnet.evaluate_snippet("", args.model_config_args)
            model_config_args.update(json.loads(model_config_args_json))
        model_config_args = json.dumps(model_config_args)
    elif args.model_config_args is not None:
        model_config_args = _jsonnet.evaluate_snippet("", args.model_config_args)
    else:
        model_config_args = None

    logdir = args.logdir or exp_config["logdir"]

    trainset = exp_config["trainset"]
    valset = exp_config["valset"]

    if args.mode == "preprocess":
        preprocess_config = PreprocessConfig(model_config_file, model_config_args)
        preprocess.main(preprocess_config)
    elif args.mode == "train":
        train_config = TrainConfig(model_config_file,
                                   model_config_args, logdir, trainset, valset)
        if args.use_scheduler is None or args.use_scheduler == 'True':
            train.main(train_config)
        elif args.use_scheduler == 'False':
            train_noscheduler.main(train_config)
        else:
            raise AssertionError
    elif args.mode == "eval":
        for step in exp_config["eval_steps"]:
            infer_output_path = f"{exp_config['eval_output']}/{exp_config['eval_name']}-step{step}.infer"
            infer_config = InferConfig(
                model_config_file,
                model_config_args,
                logdir,
                exp_config["eval_section"],
                exp_config["eval_beam_size"],
                infer_output_path,
                step,
                use_heuristic=exp_config["eval_use_heuristic"]
            )
            infer.main(infer_config)

            eval_output_path = f"{exp_config['eval_output']}/{exp_config['eval_name']}-step{step}.eval"
            eval_config = EvalConfig(
                model_config_file,
                model_config_args,
                logdir,
                exp_config["eval_section"],
                infer_output_path,
                eval_output_path
            )
            eval.main(eval_config)

            res_json = json.load(open(eval_output_path))
            print(step, res_json['total_scores']['all']['exact'])