def create_w2v_vectors(): # with open('./word2vec/IRBlog/w2v_per_300.pkl', 'rb') as infile: with open('./word2vec/Mixed/w2v_per.pkl', 'rb') as infile: w2v = pickle.load(infile) w2v_length = 100 # 300 stop_words = set( pd.read_csv('./Primary_data/PersianStopWordList.txt', header=None)[0]) questions = pd.read_csv('./Primary_data/result_filtered.csv', delimiter=';') train = QuickDataFrame(['w' + str(i) for i in range(0, w2v_length)]) prog = Progresser(questions.shape[0]) # build the train data for i, qrow in questions.iterrows(): prog.count() sum_array = np.zeros(w2v_length) number_of_words = 0 for word in tokenise(qrow['sentence']): if word not in stop_words and word in w2v: number_of_words += 1 sum_array += w2v[word] if i != len(train): print('wat?!!') train.append(list(sum_array / number_of_words)) train.to_csv('./Primary_data/w2v-100_vector_Q.csv')
def build_w2v_vectors(): with open('./word2vec/word2vec-En.pkl', 'rb') as infile: w2v = pickle.load(infile) w2v_length = 300 stop_words = set() for w in stopwords.words('english'): stop_words.add(w) id_mappings = QuickDataFrame.read_csv( './EurLex_data/eurlex_ID_mappings.csv', sep='\t') # create DataFrame cols_list = ['doc_id'] + ['w' + str(i) for i in range(0, w2v_length)] train = QuickDataFrame(columns=cols_list) prog = Progresser(len(id_mappings)) for i in range(len(id_mappings)): prog.count() # read the file try: with open('./EurLex_data/lem_txt/' + str(id_mappings['DocID'][i]) + '-lem.txt', 'r', encoding="utf8") as infile: doc_text = infile.read() except IOError: continue try: sum_array = np.zeros(w2v_length) number_of_words = 0 for word in word_tokenize(doc_text): if word not in stop_words and word in w2v: number_of_words += 1 sum_array += w2v[word] if number_of_words > 0: sum_array = sum_array / number_of_words train.append([id_mappings['DocID'][i]] + list(sum_array)) except Exception as e: print(e) train.to_csv('./EurLex_data/w2v_vector_Q.csv')
def build_all_vectors(): id_mappings = QuickDataFrame.read_csv( './EurLex_data/eurlex_ID_mappings.csv', sep='\t') subject_data = QuickDataFrame.read_csv( './EurLex_data/eurlex_id2class/id2class_eurlex_subject_matter.qrels', header=False, columns=['sub', 'doc_id', 'col2'], sep=' ') words_vector = QuickDataFrame.read_csv('./EurLex_data/1000words.csv', header=False, columns=['term']) topics = QuickDataFrame.read_csv('./EurLex_data/tags.csv') # train = QuickDataFrame.read_csv('./EurLex_data/w2v_vector_Q.csv') # train.set_index(train['doc_id'], unique=True) # create DataFrame cols_list = ['doc_id'] + list(words_vector['term']) train = QuickDataFrame(columns=cols_list) # filling word columns prog = Progresser(len(id_mappings)) for i in range(len(id_mappings)): prog.count() try: # read the file try: with open('./EurLex_data/lem_txt/' + str(id_mappings['DocID'][i]) + '-lem.txt', 'r', encoding="utf8") as infile: doc_text = infile.read() except IOError: continue # add a new row train.append(value=0) # complete the data in that row train['doc_id'][len(train) - 1] = id_mappings['DocID'][i] for word in word_tokenize(doc_text): if word in train.data: train[word][len(train) - 1] = 1 except Exception as e: print(e) # index by doc id train.set_index(train['doc_id'], unique=True) # rename word columns rename_dict = dict() index = 0 for wrd in list(words_vector['term']): rename_dict[wrd] = 'wrd' + str(index) index += 1 train.rename(columns=rename_dict) # add topic columns for col in list(topics['term']): train.add_column(name=col, value=0) # filling topic columns for i in range(len(subject_data)): try: sub = subject_data['sub'][i] doc_id = subject_data['doc_id'][i] train[sub, doc_id] = 1 except Exception as e: print(e) # rename topic columns rename_dict = dict() index = 0 for tpc in list(topics['term']): rename_dict[tpc] = 'tpc' + str(index) index += 1 train.rename(columns=rename_dict) # write to file print('\nWriting to file...') # train.to_csv('./EurLex_data/eurlex_combined_vectors.csv') train.to_csv('./EurLex_data/eurlex_combined_vectors-w2v.csv')