def multiDocs2DocFreqInfo(labeled_documents:AvailableInputTypes, n_jobs:int=1, path_working_dir:str=tempfile.mkdtemp(), is_use_cache: bool = True)->SetDocumentInformation: """This function generates information for constructing document-frequency matrix. """ assert isinstance(labeled_documents, (SqliteDict, dict)) type_flag = set([judge_feature_type(docs) for docs in list(labeled_documents.values())]) assert len(type_flag)==1 counted_frequency = joblib.Parallel(n_jobs=n_jobs)( joblib.delayed(generate_document_dict)(key, docs) for key, docs in sorted(list(labeled_documents.items()), key=lambda key_value_tuple: key_value_tuple[0])) ### construct [{}] structure for input of DictVectorizer() ### seq_feature_documents = (dict(label_freqCounter_tuple[1]) for label_freqCounter_tuple in counted_frequency) ### Save index-string dictionary if is_use_cache: dict_matrix_index = init_cache_object('matrix_element_object', path_working_dir) else: dict_matrix_index = {} # use sklearn feature-extraction vec = DictVectorizer() dict_matrix_index['matrix_object'] = vec.fit_transform(seq_feature_documents).tocsr() dict_matrix_index['feature2id'] = {feat:feat_id for feat_id, feat in enumerate(vec.get_feature_names())} dict_matrix_index['label2id'] = {label_freqCounter_tuple[0]:label_id for label_id, label_freqCounter_tuple in enumerate(counted_frequency)} return SetDocumentInformation(dict_matrix_index)
def make_multi_docs2term_freq_info(labeled_documents: AvailableInputTypes, is_use_cache: bool = True, path_work_dir: str = tempfile.mkdtemp()): """* What u can do - This function generates information to construct term-frequency matrix """ assert isinstance(labeled_documents, (SqliteDict, dict)) counted_frequency = [ (label, Counter(list(itertools.chain.from_iterable(documents)))) for label, documents in labeled_documents.items() ] feature_documents = [ dict(label_freqCounter_tuple[1]) for label_freqCounter_tuple in counted_frequency ] if is_use_cache: dict_matrix_index = init_cache_object('matrix_element_objects', path_work_dir=path_work_dir) else: dict_matrix_index = {} # use sklearn feature-extraction vec = DictVectorizer() dict_matrix_index['matrix_object'] = vec.fit_transform( feature_documents).tocsr() dict_matrix_index['feature2id'] = { feat: feat_id for feat_id, feat in enumerate(vec.get_feature_names()) } dict_matrix_index['label2id'] = { label_freqCounter_tuple[0]: label_id for label_id, label_freqCounter_tuple in enumerate(counted_frequency) } return SetDocumentInformation(dict_matrix_index)