Esempio n. 1
0
File: run.py Progetto: yhcc/fastNLP
def predict():
    # Config Loader
    test_args = ConfigSection()
    ConfigLoader().load_config(cfgfile, {"POS_test": test_args})

    # fetch dictionary size and number of labels from pickle files
    word2index = load_pickle(pickle_path, "word2id.pkl")
    test_args["vocab_size"] = len(word2index)
    index2label = load_pickle(pickle_path, "label2id.pkl")
    test_args["num_classes"] = len(index2label)

    # load dev data
    dev_data = load_pickle(pickle_path, "data_dev.pkl")

    # Define the same model
    model = AdvSeqLabel(test_args)

    # Dump trained parameters into the model
    ModelLoader.load_pytorch(model, "./save/trained_model.pkl")
    print("model loaded!")

    # Tester
    test_args["evaluator"] = SeqLabelEvaluator()
    tester = SeqLabelTester(**test_args.data)

    # Start testing
    tester.test(model, dev_data)
Esempio n. 2
0
File: run.py Progetto: yhcc/fastNLP
def infer():
    # Config Loader
    test_args = ConfigSection()
    ConfigLoader().load_config(cfgfile, {"POS_test": test_args})

    # fetch dictionary size and number of labels from pickle files
    word2index = load_pickle(pickle_path, "word2id.pkl")
    test_args["vocab_size"] = len(word2index)
    index2label = load_pickle(pickle_path, "label2id.pkl")
    test_args["num_classes"] = len(index2label)

    # Define the same model
    model = AdvSeqLabel(test_args)

    try:
        ModelLoader.load_pytorch(model, "./save/trained_model.pkl")
        print('model loaded!')
    except Exception as e:
        print('cannot load model!')
        raise

    # Data Loader
    infer_data = SeqLabelDataSet(load_func=BaseLoader.load_lines)
    infer_data.load(data_infer_path,
                    vocabs={"word_vocab": word2index},
                    infer=True)
    print('data loaded')

    # Inference interface
    infer = SeqLabelInfer(pickle_path)
    results = infer.predict(model, infer_data)

    print(results)
    print("Inference finished!")
Esempio n. 3
0
def infer():
    # Load infer configuration, the same as test
    test_args = ConfigSection()
    ConfigLoader("config.cfg").load_config(config_dir,
                                           {"POS_infer": test_args})

    # fetch dictionary size and number of labels from pickle files
    word2index = load_pickle(pickle_path, "word2id.pkl")
    test_args["vocab_size"] = len(word2index)
    index2label = load_pickle(pickle_path, "id2class.pkl")
    test_args["num_classes"] = len(index2label)

    # Define the same model
    model = SeqLabeling(test_args)

    # Dump trained parameters into the model
    ModelLoader.load_pytorch(model, os.path.join(pickle_path, model_name))
    print("model loaded!")

    # Data Loader
    raw_data_loader = BaseLoader(data_infer_path)
    infer_data = raw_data_loader.load_lines()

    # Inference interface
    infer = SeqLabelInfer(pickle_path)
    results = infer.predict(model, infer_data)

    for res in results:
        print(res)
    print("Inference finished!")
Esempio n. 4
0
def infer():
    # load dataset
    print("Loading data...")
    word_vocab = load_pickle(save_dir, "word2id.pkl")
    label_vocab = load_pickle(save_dir, "label2id.pkl")
    print("vocabulary size:", len(word_vocab))
    print("number of classes:", len(label_vocab))

    infer_data = TextClassifyDataSet(load_func=ClassDataSetLoader.load)
    infer_data.load(train_data_dir, vocabs={"word_vocab": word_vocab, "label_vocab": label_vocab})

    model_args = ConfigSection()
    model_args["vocab_size"] = len(word_vocab)
    model_args["num_classes"] = len(label_vocab)
    ConfigLoader.load_config(config_dir, {"text_class_model": model_args})

    # construct model
    print("Building model...")
    cnn = CNNText(model_args)

    # Dump trained parameters into the model
    ModelLoader.load_pytorch(cnn, os.path.join(save_dir, model_name))
    print("model loaded!")

    infer = ClassificationInfer(pickle_path=save_dir)
    results = infer.predict(cnn, infer_data)
    print(results)
Esempio n. 5
0
def infer():
    # Load infer configuration, the same as test
    test_args = ConfigSection()
    ConfigLoader().load_config(config_path, {"POS_infer": test_args})

    # fetch dictionary size and number of labels from pickle files
    word2index = load_pickle(pickle_path, "word2id.pkl")
    test_args["vocab_size"] = len(word2index)
    index2label = load_pickle(pickle_path, "label2id.pkl")
    test_args["num_classes"] = len(index2label)

    # Define the same model
    model = SeqLabeling(test_args)

    # Dump trained parameters into the model
    ModelLoader.load_pytorch(model, "./save/saved_model.pkl")
    print("model loaded!")

    # Load infer data
    infer_data = SeqLabelDataSet(load_func=BaseLoader.load)
    infer_data.load(data_infer_path, vocabs={"word_vocab": word2index}, infer=True)

    # inference
    infer = SeqLabelInfer(pickle_path)
    results = infer.predict(model, infer_data)
    print(results)
Esempio n. 6
0
 def __init__(self, pickle_path):
     self.batch_size = 1
     self.batch_output = []
     self.iterator = None
     self.pickle_path = pickle_path
     self.index2label = load_pickle(self.pickle_path, "id2class.pkl")
     self.word2index = load_pickle(self.pickle_path, "word2id.pkl")
Esempio n. 7
0
    def __init__(self, pickle_path, post_processor):
        """

        :param pickle_path: str, the path to the pickle files.
        :param post_processor: a function or callable object, that takes list of batch outputs as input

        """
        self.batch_size = 1
        self.batch_output = []
        self.pickle_path = pickle_path
        self._post_processor = post_processor
        self.label_vocab = load_pickle(self.pickle_path, "label2id.pkl")
        self.word_vocab = load_pickle(self.pickle_path, "word2id.pkl")
Esempio n. 8
0
    def __init__(self, pickle_path, task):
        """

        :param pickle_path: str, the path to the pickle files.
        :param task: str, specify which task the predictor will perform. One of ("seq_label", "text_classify").

        """
        self.batch_size = 1
        self.batch_output = []
        self.pickle_path = pickle_path
        self._task = task  # one of ("seq_label", "text_classify")
        self.label_vocab = load_pickle(self.pickle_path, "class2id.pkl")
        self.word_vocab = load_pickle(self.pickle_path, "word2id.pkl")
Esempio n. 9
0
    def load(self, model_name, config_file="config", section_name="model"):
        """
        Load a pre-trained FastNLP model together with additional data.
        :param model_name: str, the name of a FastNLP model.
        :param config_file: str, the name of the config file which stores the initialization information of the model.
                (default: "config")
        :param section_name: str, the name of the corresponding section in the config file. (default: model)
        """
        assert type(model_name) is str
        if model_name not in FastNLP_MODEL_COLLECTION:
            raise ValueError("No FastNLP model named {}.".format(model_name))

        if not self.model_exist(model_dir=self.model_dir):
            self._download(model_name, FastNLP_MODEL_COLLECTION[model_name]["url"])

        model_class = self._get_model_class(FastNLP_MODEL_COLLECTION[model_name]["class"])
        print("Restore model class {}".format(str(model_class)))

        model_args = ConfigSection()
        ConfigLoader.load_config(os.path.join(self.model_dir, config_file), {section_name: model_args})
        print("Restore model hyper-parameters {}".format(str(model_args.data)))

        # fetch dictionary size and number of labels from pickle files
        self.word_vocab = load_pickle(self.model_dir, "word2id.pkl")
        model_args["vocab_size"] = len(self.word_vocab)
        self.label_vocab = load_pickle(self.model_dir, "label2id.pkl")
        model_args["num_classes"] = len(self.label_vocab)

        # Construct the model
        model = model_class(model_args)
        print("Model constructed.")

        # To do: framework independent
        ModelLoader.load_pytorch(model, os.path.join(self.model_dir, FastNLP_MODEL_COLLECTION[model_name]["pickle"]))
        print("Model weights loaded.")

        self.model = model
        self.infer_type = FastNLP_MODEL_COLLECTION[model_name]["type"]

        print("Inference ready.")
Esempio n. 10
0
def infer():
    # Load infer configuration, the same as test
    test_args = ConfigSection()
    ConfigLoader("config.cfg").load_config("./data_for_tests/config",
                                           {"POS_test": test_args})

    # fetch dictionary size and number of labels from pickle files
    word2index = load_pickle(pickle_path, "word2id.pkl")
    test_args["vocab_size"] = len(word2index)
    index2label = load_pickle(pickle_path, "id2class.pkl")
    test_args["num_classes"] = len(index2label)

    # Define the same model
    model = SeqLabeling(test_args)

    # Dump trained parameters into the model
    ModelLoader.load_pytorch(model, "./data_for_tests/saved_model.pkl")
    print("model loaded!")

    # Data Loader
    raw_data_loader = BaseLoader(data_infer_path)
    infer_data = raw_data_loader.load_lines()
    """
        Transform strings into list of list of strings. 
        [
            [word_11, word_12, ...],
            [word_21, word_22, ...],
            ...
        ]
        In this case, each line in "people_infer.txt" is already a sentence. So load_lines() just splits them.
    """

    # Inference interface
    infer = Predictor(pickle_path)
    results = infer.predict(model, infer_data)

    print(results)
    print("Inference finished!")
Esempio n. 11
0
def infer():
    # Load infer configuration, the same as test
    test_args = ConfigSection()
    ConfigLoader().load_config(config_dir, {"POS_infer": test_args})

    # fetch dictionary size and number of labels from pickle files
    word_vocab = load_pickle(pickle_path, "word2id.pkl")
    label_vocab = load_pickle(pickle_path, "label2id.pkl")
    test_args["vocab_size"] = len(word_vocab)
    test_args["num_classes"] = len(label_vocab)
    print("vocabularies loaded")

    # Define the same model
    model = SeqLabeling(test_args)
    print("model defined")

    # Dump trained parameters into the model
    ModelLoader.load_pytorch(model, os.path.join(pickle_path, model_name))
    print("model loaded!")

    # Data Loader
    infer_data = SeqLabelDataSet(load_func=BaseLoader.load)
    infer_data.load(data_infer_path,
                    vocabs={
                        "word_vocab": word_vocab,
                        "label_vocab": label_vocab
                    },
                    infer=True)
    print("data set prepared")

    # Inference interface
    infer = SeqLabelInfer(pickle_path)
    results = infer.predict(model, infer_data)

    for res in results:
        print(res)
    print("Inference finished!")