예제 #1
0
    def test_kmeans(self):
        from chi_annotator.algo_factory.components import ComponentBuilder
        from chi_annotator.algo_factory.common import Message
        from chi_annotator.task_center.config import AnnotatorConfig
        from chi_annotator.algo_factory.common import TrainingData
        cfg = AnnotatorConfig()
        pos_msg1 = Message(u"你好,我是一个demo!!!!")
        pos_msg2 = Message(u"你好,你好,你好")
        neg_msg1 = Message(u"如果发现有文件漏提或注释有误")
        neg_msg2 = Message(u"增加一个需要上传的文件")

        train_data = TrainingData([neg_msg1, neg_msg2, pos_msg1, pos_msg2])
        cb = ComponentBuilder()
        char_tokenize = cb.create_component("char_tokenizer", cfg)
        sent_embedding = cb.create_component("sentence_embedding_extractor",
                                             cfg)
        svm_classifer = cb.create_component("cluster_sklearn", cfg)
        char_tokenize.train(train_data, cfg)
        sent_embedding.train(train_data, cfg)
        svm_classifer.train(train_data, cfg)
        # test
        test_msg = Message(u"增加一个需要上传的文件")
        char_tokenize.process(test_msg, **{})
        sent_embedding.process(test_msg, **{})
        svm_classifer.process(test_msg, **{})
        assert test_msg.get("cluster_center").get("center") is not None
    def test_kmeans(self):
        from chi_annotator.algo_factory.components import ComponentBuilder
        from chi_annotator.algo_factory.common import Message
        from chi_annotator.task_center.config import AnnotatorConfig
        from chi_annotator.algo_factory.common import TrainingData
        task_config = dict(config.CLASSIFY_TASK_CONFIG)
        dir_name = os.path.dirname(os.path.abspath(__file__))
        task_config[
            "embedding_path"] = dir_name + "/../data/test_embedding/vec.txt"
        task_config["embedding_type"] = "w2v"
        cfg = AnnotatorConfig(task_config)
        pos_msg1 = Message(u"你好,我是一个demo!!!!")
        pos_msg2 = Message(u"你好,你好,你好")
        neg_msg1 = Message(u"如果发现有文件漏提或注释有误")
        neg_msg2 = Message(u"增加一个需要上传的文件")

        train_data = TrainingData([neg_msg1, neg_msg2, pos_msg1, pos_msg2])
        cb = ComponentBuilder()
        char_tokenize = cb.create_component("char_tokenizer", cfg)
        sent_embedding = cb.create_component("sentence_embedding_extractor",
                                             cfg)
        svm_classifer = cb.create_component("cluster_sklearn", cfg)
        char_tokenize.train(train_data, cfg)
        sent_embedding.train(train_data, cfg)
        svm_classifer.train(train_data, cfg)
        # test
        test_msg = Message(u"增加一个需要上传的文件")
        char_tokenize.process(test_msg, **{})
        sent_embedding.process(test_msg, **{})
        svm_classifer.process(test_msg, **{})
        assert test_msg.get("cluster_center").get("center") is not None
예제 #3
0
    def test_randomforest_classify(self):
        task_config = dict(config.CLASSIFY_TASK_CONFIG)
        dir_name = os.path.dirname(os.path.abspath(__file__))
        task_config[
            "embedding_path"] = dir_name + "/../data/test_embedding/vec.txt"
        task_config["embedding_type"] = "w2v"
        cfg = AnnotatorConfig(task_config)
        train_data = TrainingData(
            [self.neg_msg1, self.neg_msg2, self.pos_msg1, self.pos_msg2])
        cb = ComponentBuilder()
        char_tokenize = cb.create_component("char_tokenizer", cfg)
        sent_embedding = cb.create_component("sentence_embedding_extractor",
                                             cfg)
        RandomForest_Classifier = cb.create_component(
            "RandomForest_Classifier", cfg)
        char_tokenize.train(train_data, cfg)
        sent_embedding.train(train_data, cfg)
        RandomForest_Classifier.train(train_data, cfg)
        # test
        test_msg = Message(u"增加一个需要上传的文件")
        char_tokenize.process(test_msg, **{})
        sent_embedding.process(test_msg, **{})
        RandomForest_Classifier.process(test_msg, **{})

        assert test_msg.get("classifylabel").get("name") == "bad"
예제 #4
0
    def test_kmeans(self):
        from chi_annotator.algo_factory.components import ComponentBuilder
        from chi_annotator.algo_factory.common import Message
        from chi_annotator.task_center.config import AnnotatorConfig
        from chi_annotator.algo_factory.common import TrainingData
        task_config = dict(config.CLASSIFY_TASK_CONFIG)
        dir_name = os.path.dirname(os.path.abspath(__file__))
        task_config["embedding_path"] = dir_name + "/../data/test_embedding/vec.txt"
        task_config["embedding_type"] = "w2v"
        cfg = AnnotatorConfig(task_config)
        pos_msg1 = Message(u"你好,我是一个demo!!!!")
        pos_msg2 = Message(u"你好,你好,你好")
        neg_msg1 = Message(u"如果发现有文件漏提或注释有误")
        neg_msg2 = Message(u"增加一个需要上传的文件")

        train_data = TrainingData([neg_msg1, neg_msg2, pos_msg1, pos_msg2])
        cb = ComponentBuilder()
        char_tokenize = cb.create_component("char_tokenizer", cfg)
        sent_embedding = cb.create_component("sentence_embedding_extractor", cfg)
        svm_classifer = cb.create_component("cluster_sklearn", cfg)
        char_tokenize.train(train_data, cfg)
        sent_embedding.train(train_data, cfg)
        svm_classifer.train(train_data, cfg)
        # test
        test_msg = Message(u"增加一个需要上传的文件")
        char_tokenize.process(test_msg, **{})
        sent_embedding.process(test_msg, **{})
        svm_classifer.process(test_msg, **{})
        assert test_msg.get("cluster_center").get("center") is not None
예제 #5
0
 def ignor_test_senten_embedding_extractor(self):
     from chi_annotator.algo_factory.components import ComponentBuilder
     from chi_annotator.algo_factory.common import Message
     from chi_annotator.task_center.config import AnnotatorConfig
     cfg = AnnotatorConfig()
     msg = Message("你好,我是一个demo!!!!")
     cb = ComponentBuilder()
     char_tokenize = cb.create_component("char_tokenizer", cfg)
     sent_embedding = cb.create_component("sentence_embedding_extractor", cfg)
     char_tokenize.process(msg)
     sent_embedding.process(msg, **{})
     assert msg.get("sentence_embedding").sum() + 7.30032945834 < 1e-6
예제 #6
0
    def ignor_test_embedding(self):
        from chi_annotator.algo_factory.components import ComponentBuilder
        from chi_annotator.algo_factory.common import Message
        from chi_annotator.task_center.config import AnnotatorConfig
        from chi_annotator.algo_factory.common import TrainingData
        from gensim.models.word2vec import LineSentence
        text_dir = create_tmp_test_textfile("spam_email_text_1000")

        # 将数据放入TrainingData
        with open(text_dir, 'r') as f:
            res = []
            for line in f.readlines():
                line.strip('\n')
                line = Message(re.sub('\s', '', line))
                res.append(line)
        res = TrainingData(res)

        cfg = AnnotatorConfig(
            filename="tests/data/test_config/test_config_embedding.json")
        cb = ComponentBuilder()

        # char_tokenize, embedding的训练暂时不用用到
        char_tokenize = cb.create_component("char_tokenizer", cfg)
        char_tokenize.train(res, cfg)

        # 加载embedding, 训练模型, 传入数据为LinSentence(data_path)
        embedding = cb.create_component("embedding", cfg)
        embedding.train(LineSentence(text_dir), cfg)
        embedding.persist(cfg.wv_model_path)

        # 加载sent_embedding, 从embedding训练完是model中, 获得sentence_vec
        sent_embedding = cb.create_component("embedding_extractor", cfg)
        msg = Message("你好,我是一个demo!!!!")
        char_tokenize.process(msg)
        sent_embedding.sentence_process(msg, **{})
        assert msg.get("sentence_embedding").sum() != 0

        # 加载base model, 加入新的corpus, 在base_model的基础上进行增量学习
        embedding = embedding.load(model_metadata=cfg)
        embedding.train(LineSentence(text_dir), cfg)
        embedding.persist(cfg.wv_model_path)

        # 增量学习后生成的新model, 进行EmbeddingExtractor测验
        sent_embedding = cb.create_component("embedding_extractor", cfg)
        msg = Message("你好,我是一个demo!!!!")
        char_tokenize.process(msg)
        sent_embedding.sentence_process(msg, **{})
        assert msg.get("sentence_embedding").sum() != 0

        rm_tmp_file("word2vec.model")
        rm_tmp_file("word2vec.model.vector")
        rm_tmp_file("spam_email_text_1000")
예제 #7
0
 def ignor_test_senten_embedding_extractor(self):
     from chi_annotator.algo_factory.components import ComponentBuilder
     from chi_annotator.algo_factory.common import Message
     from chi_annotator.task_center.config import AnnotatorConfig
     cfg = AnnotatorConfig()
     msg = Message("你好,我是一个demo!!!!")
     cb = ComponentBuilder()
     char_tokenize = cb.create_component("char_tokenizer", cfg)
     sent_embedding = cb.create_component("sentence_embedding_extractor",
                                          cfg)
     char_tokenize.process(msg)
     sent_embedding.process(msg, **{})
     assert msg.get("sentence_embedding").sum() + 7.30032945834 < 1e-6
예제 #8
0
    def ignor_test_embedding(self):
        from chi_annotator.algo_factory.components import ComponentBuilder
        from chi_annotator.algo_factory.common import Message
        from chi_annotator.task_center.config import AnnotatorConfig
        from chi_annotator.algo_factory.common import TrainingData
        from gensim.models.word2vec import LineSentence
        text_dir = create_tmp_test_textfile("spam_email_text_1000")

        # 将数据放入TrainingData
        with open(text_dir, 'r') as f:
            res = []
            for line in f.readlines():
                line.strip('\n')
                line = Message(re.sub('\s', '', line))
                res.append(line)
        res = TrainingData(res)

        cfg = AnnotatorConfig(filename="tests/data/test_config/test_config_embedding.json")
        cb = ComponentBuilder()

        # char_tokenize, embedding的训练暂时不用用到
        char_tokenize = cb.create_component("char_tokenizer", cfg)
        char_tokenize.train(res, cfg)

        # 加载embedding, 训练模型, 传入数据为LinSentence(data_path)
        embedding = cb.create_component("embedding", cfg)
        embedding.train(LineSentence(text_dir), cfg)
        embedding.persist(cfg.wv_model_path)

        # 加载sent_embedding, 从embedding训练完是model中, 获得sentence_vec
        sent_embedding = cb.create_component("embedding_extractor", cfg)
        msg = Message("你好,我是一个demo!!!!")
        char_tokenize.process(msg)
        sent_embedding.sentence_process(msg, **{})
        assert msg.get("sentence_embedding").sum() != 0

        # 加载base model, 加入新的corpus, 在base_model的基础上进行增量学习
        embedding = embedding.load(model_metadata=cfg)
        embedding.train(LineSentence(text_dir), cfg)
        embedding.persist(cfg.wv_model_path)

        # 增量学习后生成的新model, 进行EmbeddingExtractor测验
        sent_embedding = cb.create_component("embedding_extractor", cfg)
        msg = Message("你好,我是一个demo!!!!")
        char_tokenize.process(msg)
        sent_embedding.sentence_process(msg, **{})
        assert msg.get("sentence_embedding").sum() != 0

        rm_tmp_file("word2vec.model")
        rm_tmp_file("word2vec.model.vector")
        rm_tmp_file("spam_email_text_1000")
예제 #9
0
    def test_sgd_classify(self):
        cfg = AnnotatorConfig()
        train_data = TrainingData(
            [self.neg_msg1, self.neg_msg2, self.pos_msg1, self.pos_msg2])
        cb = ComponentBuilder()
        char_tokenize = cb.create_component("char_tokenizer", cfg)
        sent_embedding = cb.create_component("sentence_embedding_extractor",
                                             cfg)
        SGD_Classifier = cb.create_component("SGD_Classifier", cfg)
        char_tokenize.train(train_data, cfg)
        sent_embedding.train(train_data, cfg)
        SGD_Classifier.train(train_data, cfg)
        # test
        test_msg = Message(u"增加一个需要上传的文件")
        char_tokenize.process(test_msg, **{})
        sent_embedding.process(test_msg, **{})
        SGD_Classifier.process(test_msg, **{})

        assert test_msg.get("classifylabel").get("name") == "bad"
예제 #10
0
def test_tokenizer_main():
    from chi_annotator.algo_factory.components import ComponentBuilder
    from chi_annotator.algo_factory.common import Message
    from chi_annotator.config import AnnotatorConfig
    msg = Message("你好,我是一个demo!!!!")
    cb = ComponentBuilder()
    config = AnnotatorConfig()
    ct = cb.create_component("char_tokenizer", config)
    if ct is not None:
        ct.process(msg, **{})
        print(msg.get("tokens"))
예제 #11
0
 def test_char_tokenizer(self):
     from chi_annotator.algo_factory.components import ComponentBuilder
     from chi_annotator.algo_factory.common import Message
     from chi_annotator.task_center.config import AnnotatorConfig
     msg = Message(u"你好,我是一个demo!!!!")
     cb = ComponentBuilder()
     cfg = AnnotatorConfig(config.CLASSIFY_TASK_CONFIG)
     ct = cb.create_component("char_tokenizer", cfg)
     assert ct is not None
     ct.process(msg, **{})
     assert len(msg.get("tokens")) > 0
    def test_randomforest_classify(self):
        task_config = dict(config.CLASSIFY_TASK_CONFIG)
        dir_name = os.path.dirname(os.path.abspath(__file__))
        task_config["embedding_path"] = dir_name + "/../data/test_embedding/vec.txt"
        task_config["embedding_type"] = "w2v"
        cfg = AnnotatorConfig(task_config)
        train_data = TrainingData([self.neg_msg1, self.neg_msg2, self.pos_msg1, self.pos_msg2])
        cb = ComponentBuilder()
        char_tokenize = cb.create_component("char_tokenizer", cfg)
        sent_embedding = cb.create_component("sentence_embedding_extractor", cfg)
        RandomForest_Classifier = cb.create_component("RandomForest_Classifier", cfg)
        char_tokenize.train(train_data, cfg)
        sent_embedding.train(train_data, cfg)
        RandomForest_Classifier.train(train_data, cfg)
        # test
        test_msg = Message(u"增加一个需要上传的文件")
        char_tokenize.process(test_msg, **{})
        sent_embedding.process(test_msg, **{})
        RandomForest_Classifier.process(test_msg, **{})

        assert test_msg.get("classifylabel").get("name") == "bad"
예제 #13
0
 def test_words_jieba_tokenizer(self):
     """
     test word tokenizer using jieba
     :return:
     """
     from chi_annotator.algo_factory.components import ComponentBuilder
     from chi_annotator.algo_factory.common import Message
     from chi_annotator.task_center.config import AnnotatorConfig
     msg = Message(u"你好,我是一个demo!!!!")
     cb = ComponentBuilder()
     config = AnnotatorConfig()
     ct = cb.create_component("tokenizer_jieba", config)
     assert ct is not None
     ct.process(msg, **{})
     assert len(msg.get("tokens")) > 0
예제 #14
0
 def ignor_test_words_jieba_tokenizer(self):
     """
     #TODO: jieba will add later
     test word tokenizer using jieba
     :return:
     """
     from chi_annotator.algo_factory.components import ComponentBuilder
     from chi_annotator.algo_factory.common import Message
     from chi_annotator.task_center.config import AnnotatorConfig
     msg = Message(u"你好,我是一个demo!!!!")
     cb = ComponentBuilder()
     cfg = AnnotatorConfig(config.CLASSIFY_TASK_CONFIG)
     ct = cb.create_component("tokenizer_jieba", cfg)
     assert ct is not None
     ct.process(msg, **{})
     assert len(msg.get("tokens")) > 0