def multiDocs2DocFreqInfo(labeled_documents:AvailableInputTypes,
                          n_jobs:int=1,
                          path_working_dir:str=tempfile.mkdtemp(),
                          is_use_cache: bool = True)->SetDocumentInformation:
    """This function generates information for constructing document-frequency matrix.
    """
    assert isinstance(labeled_documents, (SqliteDict, dict))
    type_flag = set([judge_feature_type(docs) for docs in list(labeled_documents.values())])
    assert len(type_flag)==1

    counted_frequency = joblib.Parallel(n_jobs=n_jobs)(
        joblib.delayed(generate_document_dict)(key, docs)
        for key, docs in sorted(list(labeled_documents.items()), key=lambda key_value_tuple: key_value_tuple[0]))

    ### construct [{}] structure for input of DictVectorizer() ###
    seq_feature_documents = (dict(label_freqCounter_tuple[1]) for label_freqCounter_tuple in counted_frequency)

    ### Save index-string dictionary
    if is_use_cache:
        dict_matrix_index = init_cache_object('matrix_element_object', path_working_dir)
    else:
        dict_matrix_index = {}

    # use sklearn feature-extraction
    vec = DictVectorizer()
    dict_matrix_index['matrix_object'] = vec.fit_transform(seq_feature_documents).tocsr()
    dict_matrix_index['feature2id'] = {feat:feat_id for feat_id, feat in enumerate(vec.get_feature_names())}
    dict_matrix_index['label2id'] = {label_freqCounter_tuple[0]:label_id for label_id, label_freqCounter_tuple in enumerate(counted_frequency)}

    return SetDocumentInformation(dict_matrix_index)
def multiDocs2DocFreqInfo(labeled_documents:AvailableInputTypes,
                          n_jobs:int=1)->SetDocumentInformation:
    """This function generates information for constructing document-frequency matrix.
    """
    assert isinstance(labeled_documents, (SqliteDict, dict))
    type_flag = set([judge_feature_type(docs) for docs in labeled_documents.values()])
    assert len(type_flag)==1

    counted_frequency = joblib.Parallel(n_jobs=n_jobs)(
        joblib.delayed(generate_document_dict)(key, docs)
        for key, docs in sorted(labeled_documents.items(), key=lambda key_value_tuple: key_value_tuple[0]))
    feature_documents = [dict(label_freqCounter_tuple[1]) for label_freqCounter_tuple in counted_frequency]

    # use sklearn feature-extraction
    vec = DictVectorizer()
    matrix_object = vec.fit_transform(feature_documents).tocsr()
    feature2id = {feat:feat_id for feat_id, feat in enumerate(vec.get_feature_names())}
    label2id = {label_freqCounter_tuple[0]:label_id for label_id, label_freqCounter_tuple in  enumerate(counted_frequency)}

    return SetDocumentInformation(matrix_object, label2id, feature2id)