Exemplo n.º 1
0
    def ignore_test_train_with_empty_data(self):
        """
        test train with empty train data
        :return:
        """
        test_config = "tests/data/test_config/test_config.json"
        config = AnnotatorConfig(test_config)

        trainer = Trainer(config)
        assert len(trainer.pipeline) > 0
        # create tmp train set

        train_data = TrainingData([])
        # rm tmp train set

        trainer.train(train_data)
        # test persist and load
        persisted_path = trainer.persist(config['path'], config['project'],
                                         config['fixed_model_name'])

        interpreter_loaded = Interpreter.load(persisted_path, config)

        assert interpreter_loaded.pipeline
        assert interpreter_loaded.parse("hello") is not None
        assert interpreter_loaded.parse(
            "Hello today is Monday, again!") is not None

        # remove tmp models
        shutil.rmtree(config['path'], ignore_errors=False)
Exemplo n.º 2
0
 def ignor_test_load_default_config(self):
     """
     test load default config
     :return:
     """
     config = AnnotatorConfig()
     assert config["config"] == "config.json"
Exemplo n.º 3
0
    def ignore_test_load_and_persist_without_train(self):
        """
        test save and load model without train
        :return:
        """
        test_config = "tests/data/test_config/test_config.json"
        config = AnnotatorConfig(test_config)

        trainer = Trainer(config)
        assert len(trainer.pipeline) > 0
        # create tmp train set
        tmp_path = create_tmp_test_jsonfile("tmp.json")
        train_data = load_local_data(tmp_path)
        # rm tmp train set
        rm_tmp_file("tmp.json")

        # interpreter = trainer.train(train_data)
        # test persist and load
        persisted_path = trainer.persist(config['path'], config['project'],
                                         config['fixed_model_name'])

        interpreter_loaded = Interpreter.load(persisted_path, config)
        assert interpreter_loaded.pipeline
        assert interpreter_loaded.parse("hello") is not None
        assert interpreter_loaded.parse(
            "Hello today is Monday, again!") is not None
        # remove tmp models
        shutil.rmtree(config['path'], ignore_errors=False)
Exemplo n.º 4
0
    def ignore_test_trainer_persist(self):
        """
        test pipeline persist, metadata will be saved
        :return:
        """
        test_config = "tests/data/test_config/test_config.json"
        config = AnnotatorConfig(test_config)

        trainer = Trainer(config)
        assert len(trainer.pipeline) > 0
        # char_tokenizer component should been created
        assert trainer.pipeline[0] is not None
        # create tmp train set
        tmp_path = create_tmp_test_jsonfile("tmp.json")
        train_data = load_local_data(tmp_path)
        # rm tmp train set
        rm_tmp_file("tmp.json")

        trainer.train(train_data)
        persisted_path = trainer.persist(config['path'], config['project'],
                                         config['fixed_model_name'])
        # load persisted metadata
        metadata_path = os.path.join(persisted_path, 'metadata.json')
        with io.open(metadata_path) as f:
            metadata = json.load(f)
        assert 'trained_at' in metadata
        # rm tmp files and dirs
        shutil.rmtree(config['path'], ignore_errors=False)
Exemplo n.º 5
0
    def ignore_test_pipeline_flow(self):
        """
        test trainer's train func for pipeline
        :return:
        """
        test_config = "tests/data/test_config/test_config.json"
        config = AnnotatorConfig(test_config)

        trainer = Trainer(config)
        assert len(trainer.pipeline) > 0
        # create tmp train set
        tmp_path = create_tmp_test_jsonfile("tmp.json")
        train_data = load_local_data(tmp_path)
        # rm tmp train set
        rm_tmp_file("tmp.json")

        interpreter = trainer.train(train_data)
        assert interpreter is not None
        out1 = interpreter.parse(("点连接拿红包啦"))

        # test persist and load
        persisted_path = trainer.persist(config['path'], config['project'],
                                         config['fixed_model_name'])

        interpreter_loaded = Interpreter.load(persisted_path, config)
        out2 = interpreter_loaded.parse("点连接拿红包啦")
        assert out1.get("classifylabel").get("name") == out2.get(
            "classifylabel").get("name")

        # remove tmp models
        shutil.rmtree(config['path'], ignore_errors=True)
    def test_active_leaner_process_texts(self):
        """
        test active_leaner process raw texts
        :return:
        """
        test_config = "tests/data/test_config.json"
        config = AnnotatorConfig(test_config)
        # init trainer first

        # load all data for test, in actual data should get from user label
        with io.open(config["org_data"], encoding="utf-8-sig") as f:
            data = simplejson.loads(f.read())
        validate_local_data(data)

        data_set = data.get("data_set", list())

        # faker user labeled data, user has labeled 50 texts.
        faker_user_labeled_data = data_set[:50]

        # text to be predict
        texts = [{"uuid": 1, "text": "我是测试"}, {"uuid": 2, "text": "我是测试2"}]

        active_learner = ActiveLearner(config)
        active_learner.train(faker_user_labeled_data)
        predicted = active_learner.process_texts(texts)
        assert len(predicted) == 2
        assert "classifylabel" in predicted[0]
    def test_kmeans(self):
        from chi_annotator.algo_factory.components import ComponentBuilder
        from chi_annotator.algo_factory.common import Message
        from chi_annotator.task_center.config import AnnotatorConfig
        from chi_annotator.algo_factory.common import TrainingData
        task_config = dict(config.CLASSIFY_TASK_CONFIG)
        dir_name = os.path.dirname(os.path.abspath(__file__))
        task_config[
            "embedding_path"] = dir_name + "/../data/test_embedding/vec.txt"
        task_config["embedding_type"] = "w2v"
        cfg = AnnotatorConfig(task_config)
        pos_msg1 = Message(u"你好,我是一个demo!!!!")
        pos_msg2 = Message(u"你好,你好,你好")
        neg_msg1 = Message(u"如果发现有文件漏提或注释有误")
        neg_msg2 = Message(u"增加一个需要上传的文件")

        train_data = TrainingData([neg_msg1, neg_msg2, pos_msg1, pos_msg2])
        cb = ComponentBuilder()
        char_tokenize = cb.create_component("char_tokenizer", cfg)
        sent_embedding = cb.create_component("sentence_embedding_extractor",
                                             cfg)
        svm_classifer = cb.create_component("cluster_sklearn", cfg)
        char_tokenize.train(train_data, cfg)
        sent_embedding.train(train_data, cfg)
        svm_classifer.train(train_data, cfg)
        # test
        test_msg = Message(u"增加一个需要上传的文件")
        char_tokenize.process(test_msg, **{})
        sent_embedding.process(test_msg, **{})
        svm_classifer.process(test_msg, **{})
        assert test_msg.get("cluster_center").get("center") is not None
Exemplo n.º 8
0
    def test_randomforest_classify(self):
        task_config = dict(config.CLASSIFY_TASK_CONFIG)
        dir_name = os.path.dirname(os.path.abspath(__file__))
        task_config[
            "embedding_path"] = dir_name + "/../data/test_embedding/vec.txt"
        task_config["embedding_type"] = "w2v"
        cfg = AnnotatorConfig(task_config)
        train_data = TrainingData(
            [self.neg_msg1, self.neg_msg2, self.pos_msg1, self.pos_msg2])
        cb = ComponentBuilder()
        char_tokenize = cb.create_component("char_tokenizer", cfg)
        sent_embedding = cb.create_component("sentence_embedding_extractor",
                                             cfg)
        RandomForest_Classifier = cb.create_component(
            "RandomForest_Classifier", cfg)
        char_tokenize.train(train_data, cfg)
        sent_embedding.train(train_data, cfg)
        RandomForest_Classifier.train(train_data, cfg)
        # test
        test_msg = Message(u"增加一个需要上传的文件")
        char_tokenize.process(test_msg, **{})
        sent_embedding.process(test_msg, **{})
        RandomForest_Classifier.process(test_msg, **{})

        assert test_msg.get("classifylabel").get("name") == "bad"
Exemplo n.º 9
0
    def test_kmeans(self):
        from chi_annotator.algo_factory.components import ComponentBuilder
        from chi_annotator.algo_factory.common import Message
        from chi_annotator.task_center.config import AnnotatorConfig
        from chi_annotator.algo_factory.common import TrainingData
        cfg = AnnotatorConfig()
        pos_msg1 = Message(u"你好,我是一个demo!!!!")
        pos_msg2 = Message(u"你好,你好,你好")
        neg_msg1 = Message(u"如果发现有文件漏提或注释有误")
        neg_msg2 = Message(u"增加一个需要上传的文件")

        train_data = TrainingData([neg_msg1, neg_msg2, pos_msg1, pos_msg2])
        cb = ComponentBuilder()
        char_tokenize = cb.create_component("char_tokenizer", cfg)
        sent_embedding = cb.create_component("sentence_embedding_extractor",
                                             cfg)
        svm_classifer = cb.create_component("cluster_sklearn", cfg)
        char_tokenize.train(train_data, cfg)
        sent_embedding.train(train_data, cfg)
        svm_classifer.train(train_data, cfg)
        # test
        test_msg = Message(u"增加一个需要上传的文件")
        char_tokenize.process(test_msg, **{})
        sent_embedding.process(test_msg, **{})
        svm_classifer.process(test_msg, **{})
        assert test_msg.get("cluster_center").get("center") is not None
Exemplo n.º 10
0
def init():  # pragma: no cover
    # type: () -> AnnotatorConfig
    """Combines passed arguments to create Annotator config."""

    parser = create_argparser()
    args = parser.parse_args()
    config = AnnotatorConfig(args.config, os.environ, vars(args))
    return config
Exemplo n.º 11
0
 def ignore_test_load_config(self):
     """
     test load config
     :return:
     """
     config = AnnotatorConfig(\
         filename="chi_annotator/user_instance/examples/classify/spam_email_classify_config.json")
     assert config["name"] == "email_spam_classification"
    def test_online_training(self):
        """
        test online training.
        :return:
        """
        test_config = "tests/data/test_config.json"
        config = AnnotatorConfig(test_config)
        # init trainer first
        trainer = Trainer(config)

        # load all data for test, in actual data should get from user label
        with io.open(config["org_data"], encoding="utf-8-sig") as f:
            data = simplejson.loads(f.read())
        validate_local_data(data)

        data_set = data.get("data_set", list())

        # faker user labeled data, user has labeled 50 texts.
        faker_user_labeled_data = data_set[:50]
        # 950 text to predict and rank
        unlabeled_data = data_set[50:]

        # now test online training
        examples = []
        for e in faker_user_labeled_data:
            data = e.copy()
            if "text" in data:
                del data["text"]
            examples.append(Message(e["text"], data))

        new_labeled_data = TrainingData(examples)

        # full amount train and persist model
        interpreter = trainer.train(new_labeled_data)
        trainer.persist(config['path'], config['project'],
                        config['fixed_model_name'])

        # predict unlabeled dataset and ranking
        predicted_results = []
        for unlabeled_data in unlabeled_data:
            predict = interpreter.parse(unlabeled_data["text"])
            predicted_results.append(predict)

        # sort predict result
        # predicted result format as
        # {
        #   'classifylabel': {'name': 'spam', 'confidence': 0.5701943777626447},
        #   'classifylabel_ranking': [{'name': 'spam', 'confidence': 0.5701943777626447},
        #                             {'name': 'notspam', 'confidence': 0.42980562223735524}],
        #   'text': '我是一个垃圾邮件'
        # }
        confidence_threshold = config["confidence_threshold"]
        ranking_candidates = [text for text in predicted_results \
                              if text.get("classifylabel").get("confidence") < confidence_threshold]
        for candidate in ranking_candidates:
            assert candidate.get("classifylabel").get(
                "confidence") < confidence_threshold
Exemplo n.º 13
0
    def ignore_test_trainer_init(self):
        """
        test trainer
        :return:
        """
        test_config = "tests/data/test_config/test_config.json"
        config = AnnotatorConfig(test_config)

        trainer = Trainer(config)
        assert len(trainer.pipeline) > 0
    def teardown_class(cls):
        """ teardown any state that was previously setup with a call to
        setup_class.
        """
        # remove tmp files and dirs created in test case
        test_config = "tests/data/test_config.json"
        config = AnnotatorConfig(test_config)

        rm_tmp_file("test_data.json")
        shutil.rmtree(config['path'], ignore_errors=True)
Exemplo n.º 15
0
 def test_char_tokenizer(self):
     from chi_annotator.algo_factory.components import ComponentBuilder
     from chi_annotator.algo_factory.common import Message
     from chi_annotator.task_center.config import AnnotatorConfig
     msg = Message(u"你好,我是一个demo!!!!")
     cb = ComponentBuilder()
     cfg = AnnotatorConfig(config.CLASSIFY_TASK_CONFIG)
     ct = cb.create_component("char_tokenizer", cfg)
     assert ct is not None
     ct.process(msg, **{})
     assert len(msg.get("tokens")) > 0
Exemplo n.º 16
0
def test_tokenizer_main():
    from chi_annotator.algo_factory.components import ComponentBuilder
    from chi_annotator.algo_factory.common import Message
    from chi_annotator.task_center.config import AnnotatorConfig
    msg = Message(u"你好,我是一个demo!!!!")
    cb = ComponentBuilder()
    config = AnnotatorConfig()
    ct = cb.create_component("char_tokenizer", config)
    if ct is not None:
        ct.process(msg, **{})
        print(msg.get("tokens"))
Exemplo n.º 17
0
    def test_trainer_init(self):
        """
        test trainer
        :return:
        """
        test_config = "tests/data/test_config.json"
        config = AnnotatorConfig(test_config)

        trainer = Trainer(config)
        assert len(trainer.pipeline) == 1
        # char_tokenizer component should been created
        assert trainer.pipeline[0] is not None
Exemplo n.º 18
0
    def ignor_test_embedding(self):
        from chi_annotator.algo_factory.components import ComponentBuilder
        from chi_annotator.algo_factory.common import Message
        from chi_annotator.task_center.config import AnnotatorConfig
        from chi_annotator.algo_factory.common import TrainingData
        from gensim.models.word2vec import LineSentence
        text_dir = create_tmp_test_textfile("spam_email_text_1000")

        # 将数据放入TrainingData
        with open(text_dir, 'r') as f:
            res = []
            for line in f.readlines():
                line.strip('\n')
                line = Message(re.sub('\s', '', line))
                res.append(line)
        res = TrainingData(res)

        cfg = AnnotatorConfig(
            filename="tests/data/test_config/test_config_embedding.json")
        cb = ComponentBuilder()

        # char_tokenize, embedding的训练暂时不用用到
        char_tokenize = cb.create_component("char_tokenizer", cfg)
        char_tokenize.train(res, cfg)

        # 加载embedding, 训练模型, 传入数据为LinSentence(data_path)
        embedding = cb.create_component("embedding", cfg)
        embedding.train(LineSentence(text_dir), cfg)
        embedding.persist(cfg.wv_model_path)

        # 加载sent_embedding, 从embedding训练完是model中, 获得sentence_vec
        sent_embedding = cb.create_component("embedding_extractor", cfg)
        msg = Message("你好,我是一个demo!!!!")
        char_tokenize.process(msg)
        sent_embedding.sentence_process(msg, **{})
        assert msg.get("sentence_embedding").sum() != 0

        # 加载base model, 加入新的corpus, 在base_model的基础上进行增量学习
        embedding = embedding.load(model_metadata=cfg)
        embedding.train(LineSentence(text_dir), cfg)
        embedding.persist(cfg.wv_model_path)

        # 增量学习后生成的新model, 进行EmbeddingExtractor测验
        sent_embedding = cb.create_component("embedding_extractor", cfg)
        msg = Message("你好,我是一个demo!!!!")
        char_tokenize.process(msg)
        sent_embedding.sentence_process(msg, **{})
        assert msg.get("sentence_embedding").sum() != 0

        rm_tmp_file("word2vec.model")
        rm_tmp_file("word2vec.model.vector")
        rm_tmp_file("spam_email_text_1000")
Exemplo n.º 19
0
 def ignor_test_senten_embedding_extractor(self):
     from chi_annotator.algo_factory.components import ComponentBuilder
     from chi_annotator.algo_factory.common import Message
     from chi_annotator.task_center.config import AnnotatorConfig
     cfg = AnnotatorConfig()
     msg = Message("你好,我是一个demo!!!!")
     cb = ComponentBuilder()
     char_tokenize = cb.create_component("char_tokenizer", cfg)
     sent_embedding = cb.create_component("sentence_embedding_extractor",
                                          cfg)
     char_tokenize.process(msg)
     sent_embedding.process(msg, **{})
     assert msg.get("sentence_embedding").sum() + 7.30032945834 < 1e-6
Exemplo n.º 20
0
 def test_words_jieba_tokenizer(self):
     """
     test word tokenizer using jieba
     :return:
     """
     from chi_annotator.algo_factory.components import ComponentBuilder
     from chi_annotator.algo_factory.common import Message
     from chi_annotator.task_center.config import AnnotatorConfig
     msg = Message(u"你好,我是一个demo!!!!")
     cb = ComponentBuilder()
     config = AnnotatorConfig()
     ct = cb.create_component("tokenizer_jieba", config)
     assert ct is not None
     ct.process(msg, **{})
     assert len(msg.get("tokens")) > 0
Exemplo n.º 21
0
 def ignor_test_words_jieba_tokenizer(self):
     """
     #TODO: jieba will add later
     test word tokenizer using jieba
     :return:
     """
     from chi_annotator.algo_factory.components import ComponentBuilder
     from chi_annotator.algo_factory.common import Message
     from chi_annotator.task_center.config import AnnotatorConfig
     msg = Message(u"你好,我是一个demo!!!!")
     cb = ComponentBuilder()
     cfg = AnnotatorConfig(config.CLASSIFY_TASK_CONFIG)
     ct = cb.create_component("tokenizer_jieba", cfg)
     assert ct is not None
     ct.process(msg, **{})
     assert len(msg.get("tokens")) > 0
Exemplo n.º 22
0
    def ignore_test_train_model_empty_pipeline(self):
        """
        train model with no component
        :return:
        """
        test_config = "tests/data/test_config/test_config.json"
        config = AnnotatorConfig(test_config)
        config['pipeline'] = []

        tmp_path = create_tmp_test_jsonfile("tmp.json")
        train_data = load_local_data(tmp_path)
        rm_tmp_file("tmp.json")

        with pytest.raises(ValueError):
            trainer = Trainer(config)
            trainer.train(train_data)
Exemplo n.º 23
0
    def ignore_test_handles_pipeline_with_non_existing_component(self):
        """
        handle no exist component in pipeline
        :return:
        """
        test_config = "tests/data/test_config/test_config.json"
        config = AnnotatorConfig(test_config)
        config['pipeline'].append("unknown_component")

        tmp_path = create_tmp_test_jsonfile("tmp.json")
        train_data = load_local_data(tmp_path)
        rm_tmp_file("tmp.json")

        with pytest.raises(Exception) as execinfo:
            trainer = Trainer(config)
            trainer.train(train_data)
        assert "Failed to find component" in str(execinfo.value)
Exemplo n.º 24
0
    def load(model_dir,
             config=AnnotatorConfig(),
             component_builder=None,
             skip_valdation=False):
        """Creates an interpreter based on a persisted model."""

        if isinstance(model_dir, Metadata):
            # this is for backwards compatibilities (metadata passed as a dict)
            model_metadata = model_dir
            logger.warning(
                "Deprecated use of `Interpreter.load` with a metadata "
                "object. If you want to directly pass the metadata, "
                "use `Interpreter.create(metadata, ...)`. If you want "
                "to load the metadata from file, use "
                "`Interpreter.load(model_dir, ...)")
        else:
            model_metadata = Metadata.load(model_dir)
        return Interpreter.create(model_metadata, config, component_builder,
                                  skip_valdation)
Exemplo n.º 25
0
    def test_sgd_classify(self):
        cfg = AnnotatorConfig()
        train_data = TrainingData(
            [self.neg_msg1, self.neg_msg2, self.pos_msg1, self.pos_msg2])
        cb = ComponentBuilder()
        char_tokenize = cb.create_component("char_tokenizer", cfg)
        sent_embedding = cb.create_component("sentence_embedding_extractor",
                                             cfg)
        SGD_Classifier = cb.create_component("SGD_Classifier", cfg)
        char_tokenize.train(train_data, cfg)
        sent_embedding.train(train_data, cfg)
        SGD_Classifier.train(train_data, cfg)
        # test
        test_msg = Message(u"增加一个需要上传的文件")
        char_tokenize.process(test_msg, **{})
        sent_embedding.process(test_msg, **{})
        SGD_Classifier.process(test_msg, **{})

        assert test_msg.get("classifylabel").get("name") == "bad"
Exemplo n.º 26
0
    def test_pipeline_flow(self):
        """
        test trainer's train func for pipeline
        :return:
        """
        test_config = "tests/data/test_config.json"
        config = AnnotatorConfig(test_config)

        trainer = Trainer(config)
        assert len(trainer.pipeline) == 1
        # char_tokenizer component should been created
        assert trainer.pipeline[0] is not None
        # create tmp train set
        tmp_path = create_tmp_test_file("tmp.json")
        train_data = load_local_data(tmp_path)
        # rm tmp train set
        rm_tmp_file("tmp.json")

        interpreter = trainer.train(train_data)
        assert interpreter is not None