示例#1
0
            os.mkdir(model_path)

        shutil.copyfile(conf_path, os.path.join(model_path, "model.conf"))
        for pyfile in ["featurizer.py"]:
            shutil.copyfile(pyfile, os.path.join(model_path, pyfile))
        if config["model_type"] == "pytorch":
            shutil.copyfile("modeling/torch_model.py",
                            os.path.join(model_path, "torch_model.py"))
        elif config["model_type"] == "tf":
            shutil.copyfile("modeling/tf_model.py",
                            os.path.join(model_path, "tf_model.py"))
        else:
            raise Exception("model_type is not supported")

    featurizer = HydraFeaturizer(config)
    model = create_model(config, is_train=True)
    evaluator = HydraEvaluator(model_path, config, featurizer, model, note)

    is_meta = "meta_train" in config.keys() and config["meta_train"] == "True"
    if "use_content" in config.keys() and config["use_content"] == "True":
        processed_data_path = config["train_data_path"] +\
            "_{}_{}_{}".format(
                config["base_class"],
                config["base_name"],
                "filtered" if "filter_content" in config.keys() and config["filter_content"] == "True" else "unfilt"
        )
    else:
        processed_data_path = config["train_data_path"] +\
            "_{}_{}".format(config["base_class"], config["base_name"])
    if is_meta:
        processed_data_path += "_meta_train.pickle"
示例#2
0
            print(eval_file + ": " + result_str)

            if "DEBUG" in self.config:
                for text in sq:
                    print(text[0] + ":" + text[1] + "\t" + text[2])
            else:
                with open(self.eval_history_file, "a+", encoding="utf8") as f:
                    f.write("[{0}, epoch {1}] ".format(eval_file, epochs) +
                            result_str + "\n")

                bad_case_file = os.path.join(
                    self.bad_case_dir,
                    "{0}_epoch_{1}.log".format(eval_file, epochs))
                with open(bad_case_file, "w", encoding="utf8") as f:
                    for text in sq:
                        f.write(text[0] + ":" + text[1] + "\t" + text[2] +
                                "\n")


if __name__ == "__main__":
    os.environ["CUDA_VISIBLE_DEVICES"] = "3"
    config = utils.read_conf(os.path.join("conf", "wikisql.conf"))
    config["DEBUG"] = 1
    config["num_train_steps"] = 1000
    config["num_warmup_steps"] = 100

    featurizer = HydraFeaturizer(config)
    model = create_model(config, is_train=True, num_gpu=1)
    evaluator = HydraEvaluator("output", config, featurizer, model,
                               "debug evaluator")
    evaluator.eval(0)
示例#3
0
    ###================================================================================================###

    # All Best
    model_path = "output/20210505_235209"
    epoch = 4

    engine = DBEngine(db_file)
    config = utils.read_conf(os.path.join(model_path, "model.conf"))
    # config["DEBUG"] = 1
    featurizer = HydraFeaturizer(config)
    pred_data = SQLDataset(in_file, config, featurizer, False)
    print("num of samples: {0}".format(len(pred_data.input_features)))

    ##======================EG + TOP_k=============================##

    model = create_model(config, is_train=False)
    model.load(model_path, epoch)

    if "DEBUG" in config:
        model_out_file = model_out_file + ".partial"

    if os.path.exists(model_out_file):
        model_outputs = pickle.load(open(model_out_file, "rb"))
    else:
        model_outputs = model.dataset_inference(pred_data)
        pickle.dump(model_outputs, open(model_out_file, "wb"))

    beam_size = args.beam_size
    top_k = args.topk

    print("===HydraNet+EG===")
def execute_one_test(dataset, shot, model_moment, epoch):
    os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1, 2, 3"
    model_path = "output/" + model_moment

    in_file = "data/wiki{}_content.jsonl".format(
        dataset) if shot == "orig" else "data/wiki{}_{}_content.jsonl".format(
            dataset, shot)
    db_file = "WikiSQL/data/{}.db".format(dataset)
    label_file = "WikiSQL/data/{}.jsonl".format(
        dataset) if shot == "orig" else "WikiSQL/data_{}/{}.jsonl".format(
            shot, dataset)
    out_path = "predictions/{}_{}_{}_{}".format(model_moment, epoch, dataset,
                                                shot)
    if not os.path.exists(out_path):
        os.mkdir(out_path)
    out_file = os.path.join(out_path, "out.jsonl")
    eg_out_file = os.path.join(out_path, "out_eg.jsonl")
    model_out_file = os.path.join(out_path, "model_out.pkl")
    test_result_file = os.path.join(out_path, "result.txt")

    engine = DBEngine(db_file)
    config = utils.read_conf(os.path.join(model_path, "model.conf"))
    # config["DEBUG"] = 1
    featurizer = HydraFeaturizer(config)
    pred_data = SQLDataset(in_file, config, featurizer, False)
    print("num of samples: {0}".format(len(pred_data.input_features)))

    model = create_model(config, is_train=False)
    model.load(model_path, epoch)

    if "DEBUG" in config:
        model_out_file = model_out_file + ".partial"
    model_outputs = model.dataset_inference(pred_data)
    pickle.dump(model_outputs, open(model_out_file, "wb"))
    # model_outputs = pickle.load(open(model_out_file, "rb"))

    print("===HydraNet===")
    pred_sqls = model.predict_SQL(pred_data, model_outputs=model_outputs)
    with open(out_file, "w") as g:
        for pred_sql in pred_sqls:
            # print(pred_sql)
            result = {"query": {}}
            result["query"]["agg"] = int(pred_sql[0])
            result["query"]["sel"] = int(pred_sql[1])
            result["query"]["conds"] = [(int(cond[0]), int(cond[1]),
                                         str(cond[2])) for cond in pred_sql[2]]
            g.write(json.dumps(result) + "\n")
    normal_res = print_metric(label_file, out_file, db_file)

    print("===HydraNet+EG===")
    pred_sqls = model.predict_SQL_with_EG(engine,
                                          pred_data,
                                          model_outputs=model_outputs)
    with open(eg_out_file, "w") as g:
        for pred_sql in pred_sqls:
            # print(pred_sql)
            result = {"query": {}}
            result["query"]["agg"] = int(pred_sql[0])
            result["query"]["sel"] = int(pred_sql[1])
            result["query"]["conds"] = [(int(cond[0]), int(cond[1]),
                                         str(cond[2])) for cond in pred_sql[2]]
            g.write(json.dumps(result) + "\n")
    eg_res = print_metric(label_file, eg_out_file, db_file)

    with open(test_result_file, "w") as g:
        g.write("normal results:\n" + normal_res + "eg results:\n" + eg_res)