Esempio n. 1
0
def main():
    config_data = yaml.safe_load(open("config_data.yml", "r"))
    config_model = yaml.safe_load(open("config_model.yml", "r"))
    config_preprocess = yaml.safe_load(open("config_preprocessor.yml", "r"))

    config = HParams({}, default_hparams=None)
    config.add_hparam('config_data', config_data)
    config.add_hparam('config_model', config_model)
    config.add_hparam('preprocessor', config_preprocess)

    reader = CoNLL03Reader()

    # Keep the vocabulary processor as a simple counter
    vocab_processor = CoNLL03VocabularyProcessor()

    ner_trainer = CoNLLNERTrainer()
    ner_predictor = CoNLLNERPredictor()
    ner_evaluator = CoNLLNEREvaluator()

    train_pipe = TrainPipeline(train_reader=reader,
                               trainer=ner_trainer,
                               dev_reader=reader,
                               configs=config,
                               preprocessors=[vocab_processor],
                               predictor=ner_predictor,
                               evaluator=ner_evaluator)
    train_pipe.run()
    def setUp(self):
        root_path = os.path.abspath(
            os.path.join(
                os.path.dirname(os.path.abspath(__file__)),
                os.pardir,
                os.pardir,
                os.pardir,
            ))

        file_path: str = os.path.join(root_path,
                                      "data_samples/data_pack_dataset_test")
        reader = CoNLL03Reader()
        context_type = Sentence
        request = {Sentence: []}
        skip_k = 0

        self.input_files = ["conll03_1.conll", "conll03_2.conll"]
        self.feature_schemes = {}

        train_pl: Pipeline = Pipeline()
        train_pl.set_reader(reader)
        train_pl.initialize()
        pack_iterator: Iterator[PackType] = train_pl.process_dataset(file_path)

        self.data_source: DataPackIterator = DataPackIterator(
            pack_iterator, context_type, request, skip_k)
Esempio n. 3
0
    def setUp(self):
        # Define and config the Pipeline
        self.dataset_path = "data_samples/conll03"

        self.nlp = Pipeline()

        self.nlp.set_reader(CoNLL03Reader())
        self.nlp.add_processor(DummyPackProcessor())
        self.nlp.add_processor(DummyPackProcessor())

        self.nlp.initialize()
Esempio n. 4
0
    def test_CharExtractor(self):
        pipeline = Pipeline[DataPack]()
        reader = CoNLL03Reader()
        pipeline.set_reader(reader)
        pipeline.initialize()

        config1 = {
            "entry_type": "ft.onto.base_ontology.Token",
            "need_pad": True,
            "vocab_use_unk": True,
        }

        config2 = {
            "entry_type": "ft.onto.base_ontology.Token",
            "need_pad": True,
            "vocab_use_unk": True,
            "max_char_length": 4,
        }

        for config in [config1, config2]:
            extractor = CharExtractor()
            extractor.initialize(config=config)

            sentence = (
                "The European Commission said on Thursday it disagreed "
                "with German advice to consumers to shun British lamb "
                "until scientists determine whether mad cow disease "
                "can be transmitted to sheep .")

            for pack in pipeline.process_dataset(self.dataset_path):
                for instance in pack.get(Sentence):
                    extractor.update_vocab(pack, instance)

            features = []
            for pack in pipeline.process_dataset(self.dataset_path):
                for instance in pack.get(Sentence):
                    features.append(extractor.extract(pack, instance))

            for feat in features:
                recovered = [[extractor.id2element(idx) for idx in sent]
                             for sent in feat.data[0]]

                recovered = ["".join(chars) for chars in recovered]
                recovered = " ".join(recovered)
                if "max_char_length" not in config:
                    self.assertEqual(recovered, sentence)
                else:
                    truncated_sent = [
                        token[:config["max_char_length"]]
                        for token in sentence.split(" ")
                    ]
                    truncated_sent = " ".join(truncated_sent)
                    self.assertEqual(recovered, truncated_sent)
 def setUp(self):
     root_path = os.path.abspath(
         os.path.join(
             os.path.dirname(os.path.abspath(__file__)),
             os.pardir,
             os.pardir,
             os.pardir,
             os.pardir,
         ))
     # Define and config the Pipeline
     self.dataset_path = os.path.join(root_path, "data_samples/conll03")
     self.nlp = Pipeline[DataPack]()
     self.nlp.set_reader(CoNLL03Reader())
     self.nlp.initialize()
Esempio n. 6
0
    def setUp(self):
        file_path: str = "data_samples/data_pack_dataset_test"
        reader = CoNLL03Reader()
        context_type = Sentence
        request = {Sentence: []}
        skip_k = 0

        self.input_files = ["conll03_1.conll", "conll03_2.conll"]
        self.feature_schemes = {}

        train_pl: Pipeline = Pipeline()
        train_pl.set_reader(reader)
        train_pl.initialize()
        pack_iterator: Iterator[PackType] = \
            train_pl.process_dataset(file_path)

        self.data_source: DataPackIterator = DataPackIterator(
            pack_iterator, context_type, request, skip_k)
Esempio n. 7
0
    def test_FixedSizeDataPackBatcherWithExtractor(self):
        r"""This funciton tests the corectness of cross_pack."""
        pipeline = Pipeline[DataPack]()
        pipeline.set_reader(CoNLL03Reader())
        pipeline.initialize()

        text_extractor = AttributeExtractor()
        text_extractor.initialize({
            "need_pad": True,
            "entry_type": "ft.onto.base_ontology.Token",
            "attribute": "text",
        })

        pack_num = 0
        for pack in pipeline.process_dataset(self.dataset_path):
            pack_num += 1
            for instance in pack.get(Sentence):
                text_extractor.update_vocab(pack, instance)
        self.assertEqual(pack_num, 2)

        batch_size = 2
        batcher = FixedSizeDataPackBatcherWithExtractor()
        batcher.initialize({
            "context_type": Sentence,
            "batch_size": batch_size,
            "feature_scheme": {
                "text_tag": {
                    "extractor": text_extractor,
                    "converter": Converter(),
                    "type": TrainPreprocessor.DATA_INPUT,
                }
            },
        })

        batch_num = 0
        for pack in pipeline.process_dataset(self.dataset_path):
            for batch in batcher.get_batch(pack):
                batch_num += 1
                self.assertEqual(len(batch[0]), batch_size)
        for _ in batcher.flush():
            batch_num += 1
        self.assertEqual(batch_num, 1)
Esempio n. 8
0
    def test_Predictor(self):
        pipeline = Pipeline[DataPack]()
        pipeline.set_reader(CoNLL03Reader())
        pipeline.initialize()

        text_extractor = AttributeExtractor({
            "need_pad": True,
            "entry_type": Token,
            "attribute": "text",
        })
        for pack in pipeline.process_dataset(self.dataset_path):
            for instance in pack.get(Sentence):
                text_extractor.update_vocab(pack, instance)

        ner_extractor = BioSeqTaggingExtractor({
            "entry_type": EntityMention,
            "need_pad": True,
            "attribute": "ner_type",
            "tagging_unit": Token,
        })
        for pack in pipeline.process_dataset(self.dataset_path):
            for instance in pack.get(Sentence):
                ner_extractor.update_vocab(pack, instance)

        FAKEOUTPUT = 2
        expected_ners = [
            ner_extractor.id2element(FAKEOUTPUT)[0] for _ in range(30)
        ]

        class Model:
            def __call__(self, batch):
                text_feature = batch["text_tag"]["data"]
                return {
                    "ner_tag":
                    [[FAKEOUTPUT for j in range(len(text_feature[0]))]
                     for i in range(len(text_feature))]
                }

        model = Model()

        class NERPredictor(Predictor):
            def predict(self, batch):
                return self.model(batch)

        predictor = NERPredictor()

        predictor_pipeline = Pipeline[DataPack]()
        predictor_pipeline.set_reader(CoNLL03Reader())

        predictor_config = {
            "scope": Sentence,
            "batch_size": 2,
            "feature_scheme": {
                "text_tag": {
                    "extractor": text_extractor,
                    "converter": Converter({}),
                    "type": TrainPreprocessor.DATA_INPUT
                },
                "ner_tag": {
                    "extractor": ner_extractor,
                    "converter": Converter({}),
                    "type": TrainPreprocessor.DATA_OUTPUT
                },
            },
        }
        predictor.load(model)
        predictor_pipeline.add(predictor, predictor_config)
        predictor_pipeline.initialize()

        for pack in predictor_pipeline.process_dataset(self.dataset_path):
            for instance in pack.get(Sentence):
                ners = [
                    e.ner_type for e in list(pack.get(EntityMention, instance))
                ]
                self.assertListEqual(ners, expected_ners)
Esempio n. 9
0
from forte.common.configuration import Config
from forte.data.data_pack import DataPack
from forte.pipeline import Pipeline
from forte.data.readers.conll03_reader import CoNLL03Reader
from forte.processors.ner_predictor import CoNLLNERPredictor
from ft.onto.base_ontology import Token, Sentence, EntityMention

config_data = yaml.safe_load(open("config_data.yml", "r"))
config_model = yaml.safe_load(open("config_model.yml", "r"))

config = Config({}, default_hparams=None)
config.add_hparam('config_data', config_data)
config.add_hparam('config_model', config_model)

pl = Pipeline[DataPack]()
pl.set_reader(CoNLL03Reader())
pl.add(CoNLLNERPredictor(), config=config)

pl.initialize()

for pack in pl.process_dataset(config.config_data.test_path):
    for pred_sentence in pack.get_data(
            context_type=Sentence,
            request={
                Token: {
                    "fields": ["ner"]
                },
                Sentence: [],  # span by default
                EntityMention: {}
            }):
        print("============================")
Esempio n. 10
0
    def setUp(self):
        self.config = {
            "max_char_length": 45,
            "train_path": "data_samples/train_pipeline_test",
            "val_path": "data_samples/train_pipeline_test",
            "num_epochs": 1,
            "batch_size_tokens": 5,
            "learning_rate": 0.01,
            "momentum": 0.9,
            "nesterov": True
        }

        text_extractor: AttributeExtractor = \
            AttributeExtractor(config={"entry_type": Token,
                                       "vocab_method": "indexing",
                                       "attribute": "text"})

        char_extractor: CharExtractor = \
            CharExtractor(
                config={"entry_type": Token,
                        "vocab_method": "indexing",
                        "max_char_length": self.config["max_char_length"]})

        # Add output part in request based on different task type
        ner_extractor: BioSeqTaggingExtractor = \
            BioSeqTaggingExtractor(config={"entry_type": EntityMention,
                                           "attribute": "ner_type",
                                           "tagging_unit": Token,
                                           "vocab_method": "indexing"})

        self.tp_request = {
            "scope": Sentence,
            "schemes": {
                "text_tag": {
                    "type": TrainPreprocessor.DATA_INPUT,
                    "extractor": text_extractor
                },
                "char_tag": {
                    "type": TrainPreprocessor.DATA_INPUT,
                    "extractor": char_extractor
                },
                "ner_tag": {
                    "type": TrainPreprocessor.DATA_OUTPUT,
                    "extractor": ner_extractor
                }
            }
        }

        self.reader = CoNLL03Reader()

        self.evaluator = CoNLLNEREvaluator()

        self.tp_config = {
            "dataset": {
                "batch_size": self.config["batch_size_tokens"]
            }
        }

        train_pl: Pipeline = Pipeline()
        train_pl.set_reader(self.reader)
        train_pl.initialize()
        pack_iterator: Iterator[PackType] = \
            train_pl.process_dataset(self.config["train_path"])

        self.train_preprocessor = \
            TrainPreprocessor(pack_iterator=pack_iterator,
                              request=self.tp_request,
                              config=self.tp_config)
Esempio n. 11
0
    def setUp(self):
        root_path = os.path.abspath(
            os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir,
                         os.pardir))

        self.config = {
            "max_char_length":
            45,
            "train_path":
            os.path.join(root_path, "data_samples/train_pipeline_test"),
            "val_path":
            os.path.join(root_path, "data_samples/train_pipeline_test"),
            "num_epochs":
            1,
            "batch_size_tokens":
            5,
            "learning_rate":
            0.01,
            "momentum":
            0.9,
            "nesterov":
            True,
        }

        text_extractor = (
            "forte.data.extractors.attribute_extractor.AttributeExtractor")
        text_extractor_config = {
            "entry_type": "ft.onto.base_ontology.Token",
            "vocab_method": "indexing",
            "attribute": "text",
        }

        char_extractor = "forte.data.extractors.char_extractor.CharExtractor"
        char_extractor_config = {
            "entry_type": "ft.onto.base_ontology.Token",
            "vocab_method": "indexing",
            "max_char_length": self.config["max_char_length"],
        }

        # Add output part in request based on different task type
        ner_extractor = "forte.data.extractors.seqtagging_extractor.BioSeqTaggingExtractor"  # pylint: disable=line-too-long
        ner_extractor_config = {
            "entry_type": "ft.onto.base_ontology.EntityMention",
            "attribute": "ner_type",
            "tagging_unit": "ft.onto.base_ontology.Token",
            "vocab_method": "indexing",
        }

        self.tp_request = {
            "scope": "ft.onto.base_ontology.Sentence",
            "feature_scheme": {
                "text_tag": {
                    "type": "data_input",
                    "extractor": {
                        "class_name": text_extractor,
                        "config": text_extractor_config,
                    },
                },
                "char_tag": {
                    "type": "data_input",
                    "extractor": {
                        "class_name": char_extractor,
                        "config": char_extractor_config,
                    },
                },
                "ner_tag": {
                    "type": "data_output",
                    "extractor": {
                        "class_name": ner_extractor,
                        "config": ner_extractor_config,
                    },
                },
            },
        }

        self.tp_config = {
            "request": self.tp_request,
            "dataset": {
                "batch_size": self.config["batch_size_tokens"]
            },
        }

        self.reader = CoNLL03Reader()

        self.evaluator = CoNLLNEREvaluator()

        train_pl: Pipeline = Pipeline()
        train_pl.set_reader(self.reader)
        train_pl.initialize()
        pack_iterator: Iterator[PackType] = train_pl.process_dataset(
            self.config["train_path"])

        self.train_preprocessor = TrainPreprocessor(
            pack_iterator=pack_iterator)
        self.train_preprocessor.initialize(config=self.tp_config)
Esempio n. 12
0
    def test_Predictor(self):
        pipeline = Pipeline[DataPack]()
        pipeline.set_reader(CoNLL03Reader())
        pipeline.initialize()

        text_extractor_name = (
            "forte.data.extractors.attribute_extractor.AttributeExtractor")
        text_extractor_config = {
            "need_pad": True,
            "entry_type": "ft.onto.base_ontology.Token",
            "attribute": "text",
        }

        ner_extractor_name = "forte.data.extractors.seqtagging_extractor.BioSeqTaggingExtractor"  # pylint: disable=line-too-long
        ner_extractor_config = {
            "entry_type": "ft.onto.base_ontology.EntityMention",
            "need_pad": True,
            "attribute": "ner_type",
            "tagging_unit": "ft.onto.base_ontology.Token",
        }

        model = DummyModel()

        predictor_pipeline = Pipeline[DataPack]()
        predictor_pipeline.set_reader(CoNLL03Reader())

        predictor_config = {
            "scope": "ft.onto.base_ontology.Sentence",
            "batch_size": 2,
            "feature_scheme": {
                "text_tag": {
                    "type": "data_input",
                    "extractor": {
                        "class_name": text_extractor_name,
                        "config": text_extractor_config,
                    },
                },
                "ner_tag": {
                    "type": "data_output",
                    "extractor": {
                        "class_name": ner_extractor_name,
                        "config": ner_extractor_config,
                    },
                },
            },
            "do_eval": True,
        }
        # dummy = DummyRelationExtractor()
        # config = {"batcher": {"batch_size": 5}}
        evaluator_config = {
            "entry_type": "ft.onto.base_ontology.EntityMention",
            "attribute": "ner_type",
            "tagging_unit": "ft.onto.base_ontology.Token",
        }
        predictor = NERPredictor()
        predictor.load(model)
        predictor_pipeline.add(predictor, predictor_config)
        # predictor_pipeline.add(dummy, config)
        predictor_pipeline.add(CoNLLNEREvaluator(), evaluator_config)
        predictor_pipeline.initialize()

        text_extractor = predictor.configs.feature_scheme.text_tag.extractor
        ner_extractor = predictor.configs.feature_scheme.ner_tag.extractor

        for pack in pipeline.process_dataset(self.dataset_path):
            for instance in pack.get(Sentence):
                text_extractor.update_vocab(pack, instance)

        for pack in pipeline.process_dataset(self.dataset_path):
            for instance in pack.get(Sentence):
                ner_extractor.update_vocab(pack, instance)
        expected_ners = [
            ner_extractor.id2element(FAKEOUTPUT)[0] for _ in range(30)
        ]

        for pack in predictor_pipeline.process_dataset(self.dataset_path):
            for instance in pack.get(Sentence):
                ners = [
                    e.ner_type for e in list(pack.get(EntityMention, instance))
                ]
                self.assertListEqual(ners, expected_ners)
Esempio n. 13
0
    def test_Predictor(self):
        pipeline = Pipeline[DataPack]()
        pipeline.set_reader(CoNLL03Reader())
        pipeline.initialize()

        text_extractor = AttributeExtractor({
            "need_pad": True,
            "entry_type": Token,
            "attribute": "text",
        })
        for pack in pipeline.process_dataset(self.dataset_path):
            for instance in pack.get(Sentence):
                text_extractor.update_vocab(pack, instance)

        ner_extractor = BioSeqTaggingExtractor({
            "entry_type": EntityMention,
            "need_pad": True,
            "attribute": "ner_type",
            "tagging_unit": Token,
        })
        for pack in pipeline.process_dataset(self.dataset_path):
            for instance in pack.get(Sentence):
                ner_extractor.update_vocab(pack, instance)

        expected_ners = [
            ner_extractor.id2element(FAKEOUTPUT)[0] for _ in range(30)]

        model = DummyModel()

        predictor_pipeline = Pipeline[DataPack]()
        predictor_pipeline.set_reader(CoNLL03Reader())

        predictor_config = {
            "scope": Sentence,
            "batch_size": 2,
            "feature_scheme": {
                "text_tag": {
                    "extractor": text_extractor,
                    "converter": Converter(),
                    "type": TrainPreprocessor.DATA_INPUT
                },
                "ner_tag": {
                    "extractor": ner_extractor,
                    "converter": Converter(),
                    "type": TrainPreprocessor.DATA_OUTPUT
                },
            },
        }

        # dummy = DummyRelationExtractor()
        # config = {"batcher": {"batch_size": 5}}

        predictor = NERPredictor()
        predictor.load(model)
        predictor_pipeline.add(predictor, predictor_config)
        # predictor_pipeline.add(dummy, config)

        predictor_pipeline.add(CoNLLNEREvaluator())

        predictor_pipeline.initialize()
        for pack in predictor_pipeline.process_dataset(self.dataset_path):
            for instance in pack.get(Sentence):
                ners = [e.ner_type for e in
                        list(pack.get(EntityMention, instance))]
                self.assertListEqual(ners, expected_ners)
Esempio n. 14
0
 def setUp(self):
     # Define and config the Pipeline
     self.dataset_path = "data_samples/conll03"
     self.nlp = Pipeline[DataPack]()
     self.nlp.set_reader(CoNLL03Reader())
     self.nlp.initialize()
Esempio n. 15
0
    def test_BioSeqTaggingExtractor(self):
        pipeline = Pipeline[DataPack]()
        reader = CoNLL03Reader()
        pipeline.set_reader(reader)
        pipeline.initialize()

        config = {
            "entry_type": EntityMention,
            "need_pad": True,
            "attribute": "ner_type",
            "tagging_unit": Token,
        }

        expected = [(None, 'O'), ('ORG', 'B'), ('ORG', 'I'), (None, 'O'),
                    (None, 'O'), (None, 'O'), (None, 'O'), (None, 'O'),
                    (None, 'O'), ('MISC', 'B'), (None, 'O'), (None, 'O'),
                    (None, 'O'), (None, 'O'), (None, 'O'), ('MISC', 'B'),
                    (None, 'O'), (None, 'O'), (None, 'O'), (None, 'O'),
                    (None, 'O'), (None, 'O'), (None, 'O'), (None, 'O'),
                    (None, 'O'), (None, 'O'), (None, 'O'), (None, 'O'),
                    (None, 'O'), (None, 'O')]

        invalid = [(None, 'O'), ('MISC', 'B'), ('ORG', 'I'), (None, 'O'),
                   (None, 'O'), (None, 'O'), (None, 'O'), (None, 'O'),
                   (None, 'O'), ('MISC', 'B'), (None, 'O'), (None, 'O'),
                   (None, 'O'), (None, 'O'), (None, 'O'), ('MISC', 'I'),
                   (None, 'O'), (None, 'O'), (None, 'O'), (None, 'O'),
                   (None, 'O'), (None, 'O'), (None, 'O'), (None, 'O'),
                   (None, 'O'), (None, 'O'), (None, 'O'), (None, 'O'),
                   (None, 'O'), (None, 'O')]

        corrected = [(None, 'O'), ('MISC', 'B'), ('ORG', 'B'), (None, 'O'),
                     (None, 'O'), (None, 'O'), (None, 'O'), (None, 'O'),
                     (None, 'O'), ('MISC', 'B'), (None, 'O'), (None, 'O'),
                     (None, 'O'), (None, 'O'), (None, 'O'), ('MISC', 'B'),
                     (None, 'O'), (None, 'O'), (None, 'O'), (None, 'O'),
                     (None, 'O'), (None, 'O'), (None, 'O'), (None, 'O'),
                     (None, 'O'), (None, 'O'), (None, 'O'), (None, 'O'),
                     (None, 'O'), (None, 'O')]

        extractor = BioSeqTaggingExtractor(config)

        for pack in pipeline.process_dataset(self.dataset_path):
            for instance in pack.get(Sentence):
                extractor.update_vocab(pack, instance)

        extractor.predefined_vocab({"MISC", "ORG"})
        invalid = [extractor.element2repr(ele) for ele in invalid]

        for pack in pipeline.process_dataset(self.dataset_path):
            for instance in pack.get(Sentence):
                feature = extractor.extract(pack, instance)
                recovered = [
                    extractor.id2element(idx) for idx in feature._data
                ]
                self.assertListEqual(expected, recovered)
                extractor.pre_evaluation_action(pack, instance)
                extractor.add_to_pack(pack, instance, feature._data)
            pack.add_all_remaining_entries()

            for instance in pack.get(Sentence):
                extractor.pre_evaluation_action(pack, instance)
                extractor.add_to_pack(pack, instance, invalid)
            pack.add_all_remaining_entries()

            for instance in pack.get(Sentence):
                feature = extractor.extract(pack, instance)
                recovered = [
                    extractor.id2element(idx) for idx in feature._data
                ]
                self.assertListEqual(corrected, recovered)
Esempio n. 16
0
    def test_AttributeExtractor(self):
        pipeline = Pipeline[DataPack]()
        reader = CoNLL03Reader()
        pipeline.set_reader(reader)
        pipeline.initialize()

        config = {
            "need_pad": True,
            "entry_type": Token,
            "attribute": "text",
        }

        extractor = AttributeExtractor(config)

        sentence = "The European Commission said on Thursday it disagreed "\
                    "with German advice to consumers to shun British lamb "\
                    "until scientists determine whether mad cow disease "\
                    "can be transmitted to sheep ."

        # Check update_vocab.
        for pack in pipeline.process_dataset(self.dataset_path):
            for instance in pack.get(Sentence):
                extractor.update_vocab(pack, instance)

        # Check extract
        for pack in pipeline.process_dataset(self.dataset_path):
            features = []
            for instance in pack.get(Sentence):
                features.append(extractor.extract(pack, instance))

            for feat in features:
                recovered = [extractor.id2element(idx) for idx in feat.data[0]]
                self.assertEqual(" ".join(recovered), sentence)

        # Check add_to_pack and pre_evaluation_action.
        # Vocab_method is indexing, therefore the id of element
        # is the same as repr.
        extractor.config.attribute = "pos"
        extractor.add("TMP")
        fake_pos_ids = [
            extractor.element2repr("TMP")
            for _ in range(len(sentence.split(" ")))
        ]
        # After pre_evaluation_action, the attribute value will
        # become None. Since vocab_use_unk is true, None will be
        # mapped to <UNK>.
        unk_pos_ids = [
            extractor.element2repr(None)
            for _ in range(len(sentence.split(" ")))
        ]

        for pack in pipeline.process_dataset(self.dataset_path):
            for instance in pack.get(Sentence):
                extractor.add_to_pack(pack, instance, fake_pos_ids)

            for instance in pack.get(Sentence):
                feat = extractor.extract(pack, instance)
                self.assertEqual(feat.data[0], fake_pos_ids)

            for instance in pack.get(Sentence):
                extractor.pre_evaluation_action(pack, instance)
                feat = extractor.extract(pack, instance)
                self.assertEqual(feat.data[0], unk_pos_ids)

        # Check state and from_state.
        new_extractor = pkl.loads(pkl.dumps(extractor))
        self.assertEqual(new_extractor.config.attribute,
                         extractor.config.attribute)
Esempio n. 17
0
    word = batch["text_tag"]["data"]
    char = batch["char_tag"]["data"]
    word_masks = batch["text_tag"]["masks"][0]
    output = model.decode(input_word=word, input_char=char, mask=word_masks)
    output = output.numpy()
    return {"output_tag": output}


task = sys.argv[1]
assert task in ["ner", "pos"], "Not supported nlp task type: {}".format(task)

config_predict = yaml.safe_load(open("configs/config_predict.yml", "r"))
saved_model = torch.load(config_predict["model_path"])
train_state = torch.load(config_predict["train_state_path"])

reader = CoNLL03Reader()
predictor = TaggingPredictor()
evaluator = CoNLLNEREvaluator()

pl = Pipeline()
pl.set_reader(reader)
pl.add(predictor)
pl.add(evaluator)
pl.initialize()

for pack in pl.process_dataset(config_predict["test_path"]):
    print("---- pack ----")
    for instance in pack.get(Sentence):
        sent = instance.text
        output_tags = []
        if task == "ner":