def test_stopwords(self): stopword = [u'AV', u'女優'] print (u'Stopwords Filtering Test. Stopwords is {}'.format(u','.join(stopword))) test_sentence = u"紗倉 まな(さくら まな、1993年3月23日 - )は、日本のAV女優。" juman_wrapper = JumanWrapper() token_objects = juman_wrapper.tokenize(sentence=test_sentence, return_list=False, is_feature=True ) filtered_result = juman_wrapper.filter( parsed_sentence=token_objects, stopwords=stopword ) check_flag = True for stem_posTuple in filtered_result.convert_list_object(): assert isinstance(stem_posTuple, tuple) word_stem = stem_posTuple[0] word_posTuple = stem_posTuple[1] assert isinstance(word_stem, string_types) assert isinstance(word_posTuple, tuple) print(u'word_stem:{} word_pos:{}'.format(word_stem, ' '.join(word_posTuple))) if word_stem in stopword: check_flag = False assert check_flag
def test_tokenize(self): """This test case checks juman_wrapper.tokenize """ print(u'Tokenize Test') test_sentence = u"紗倉 まな(さくら まな、1993年3月23日 - )は、日本のAV女優。" juman_wrapper = JumanWrapper() token_objects = juman_wrapper.tokenize(sentence=test_sentence, return_list=False, is_feature=True) assert isinstance(token_objects, TokenizedSenetence) for t_obj in token_objects.tokenized_objects: assert isinstance(t_obj, TokenizedResult) print(u"word_surafce:{}, word_stem:{}, pos_tuple:{}, misc_info:{}". format(t_obj.word_surface, t_obj.word_stem, ' '.join(t_obj.tuple_pos), t_obj.misc_info)) assert isinstance(t_obj.word_surface, string_types) assert isinstance(t_obj.word_stem, string_types) assert isinstance(t_obj.tuple_pos, tuple) assert isinstance(t_obj.misc_info, dict) token_objects_list = token_objects.convert_list_object() assert isinstance(token_objects_list, list) print('-' * 30) for stem_posTuple in token_objects_list: assert isinstance(stem_posTuple, tuple) word_stem = stem_posTuple[0] word_posTuple = stem_posTuple[1] assert isinstance(word_stem, string_types) assert isinstance(word_posTuple, tuple) print(u'word_stem:{} word_pos:{}'.format(word_stem, ' '.join(word_posTuple)))
def test_stopwords(self): stopword = [u'AV', u'女優'] logger.debug (u'Stopwords Filtering Test. Stopwords is {}'.format(u','.join(stopword))) test_sentence = u"紗倉 まな(さくら まな、1993年3月23日 - )は、日本のAV女優。" juman_wrapper = JumanWrapper() token_objects = juman_wrapper.tokenize(sentence=test_sentence, return_list=False, is_feature=True ) filtered_result = juman_wrapper.filter( parsed_sentence=token_objects, stopwords=stopword ) check_flag = True for stem_posTuple in filtered_result.convert_list_object(): assert isinstance(stem_posTuple, tuple) word_stem = stem_posTuple[0] word_posTuple = stem_posTuple[1] assert isinstance(word_stem, string_types) assert isinstance(word_posTuple, tuple) logger.debug(u'word_stem:{} word_pos:{}'.format(word_stem, ' '.join(word_posTuple))) if word_stem in stopword: check_flag = False assert check_flag
def test_juman_server_mode(self): ### test with server mode ### ### Attention: this method causes Error if you don't start JUMAN SERVER mode ### test_sentence = u"紗倉 まな(さくら まな、1993年3月23日 - )は、日本のAV女優。" juman_wrapper = JumanWrapper(server='localhost', port=32000) token_objects = juman_wrapper.tokenize(sentence=test_sentence, return_list=False, is_feature=True) self.assertTrue(isinstance(token_objects, TokenizedSenetence)) list_tokens = juman_wrapper.tokenize(sentence=test_sentence, return_list=True, is_feature=True) self.assertTrue(isinstance(list_tokens, list))
def test_filter_pos(self): """ """ print (u'Filtering Test. POS condition is only 名詞') test_sentence = u"紗倉 まな(さくら まな、1993年3月23日 - )は、日本のAV女優。" juman_wrapper = JumanWrapper() token_objects = juman_wrapper.tokenize(sentence=test_sentence, return_list=False, is_feature=True ) pos_condition = [(u'名詞', )] filtered_result = juman_wrapper.filter( parsed_sentence=token_objects, pos_condition=pos_condition ) assert isinstance(filtered_result, FilteredObject) for t_obj in filtered_result.tokenized_objects: assert isinstance(t_obj, TokenizedResult) print(u"word_surafce:{}, word_stem:{}, pos_tuple:{}, misc_info:{}".format( t_obj.word_surface, t_obj.word_stem, ' '.join(t_obj.tuple_pos), t_obj.misc_info )) assert isinstance(t_obj.word_surface, string_types) assert isinstance(t_obj.word_stem, string_types) assert isinstance(t_obj.tuple_pos, tuple) assert isinstance(t_obj.misc_info, dict) assert t_obj.tuple_pos[0] == u'名詞' print('-'*30) for stem_posTuple in filtered_result.convert_list_object(): assert isinstance(stem_posTuple, tuple) word_stem = stem_posTuple[0] word_posTuple = stem_posTuple[1] assert isinstance(word_stem, string_types) assert isinstance(word_posTuple, tuple) print(u'word_stem:{} word_pos:{}'.format(word_stem, ' '.join(word_posTuple)))
def test_filter_pos(self): """ """ logger.debug (u'Filtering Test. POS condition is only 名詞') test_sentence = u"紗倉 まな(さくら まな、1993年3月23日 - )は、日本のAV女優。" juman_wrapper = JumanWrapper() token_objects = juman_wrapper.tokenize(sentence=test_sentence, return_list=False, is_feature=True ) pos_condition = [(u'名詞', )] filtered_result = juman_wrapper.filter( parsed_sentence=token_objects, pos_condition=pos_condition ) assert isinstance(filtered_result, FilteredObject) for t_obj in filtered_result.tokenized_objects: assert isinstance(t_obj, TokenizedResult) logger.debug(u"word_surafce:{}, word_stem:{}, pos_tuple:{}, misc_info:{}".format( t_obj.word_surface, t_obj.word_stem, ' '.join(t_obj.tuple_pos), t_obj.misc_info )) assert isinstance(t_obj.word_surface, string_types) assert isinstance(t_obj.word_stem, string_types) assert isinstance(t_obj.tuple_pos, tuple) assert isinstance(t_obj.misc_info, dict) assert t_obj.tuple_pos[0] == u'名詞' logger.debug('-'*30) for stem_posTuple in filtered_result.convert_list_object(): assert isinstance(stem_posTuple, tuple) word_stem = stem_posTuple[0] word_posTuple = stem_posTuple[1] assert isinstance(word_stem, string_types) assert isinstance(word_posTuple, tuple) logger.debug(u'word_stem:{} word_pos:{}'.format(word_stem, ' '.join(word_posTuple)))
def test_tokenize(self): """This test case checks juman_wrapper.tokenize """ print (u'Tokenize Test') test_sentence = u"紗倉 まな(さくら まな、1993年3月23日 - )は、日本のAV女優。" juman_wrapper = JumanWrapper() token_objects = juman_wrapper.tokenize(sentence=test_sentence, return_list=False, is_feature=True ) assert isinstance(token_objects, TokenizedSenetence) for t_obj in token_objects.tokenized_objects: assert isinstance(t_obj, TokenizedResult) print(u"word_surafce:{}, word_stem:{}, pos_tuple:{}, misc_info:{}".format( t_obj.word_surface, t_obj.word_stem, ' '.join(t_obj.tuple_pos), t_obj.misc_info )) assert isinstance(t_obj.word_surface, string_types) assert isinstance(t_obj.word_stem, string_types) assert isinstance(t_obj.tuple_pos, tuple) assert isinstance(t_obj.misc_info, dict) token_objects_list = token_objects.convert_list_object() assert isinstance(token_objects_list, list) print('-'*30) for stem_posTuple in token_objects_list: assert isinstance(stem_posTuple, tuple) word_stem = stem_posTuple[0] word_posTuple = stem_posTuple[1] assert isinstance(word_stem, string_types) assert isinstance(word_posTuple, tuple) print(u'word_stem:{} word_pos:{}'.format(word_stem, ' '.join(word_posTuple)))