build_words_graph.build_graph('like_texts.txt', '../OK_recommend/user' + user_id + '/words_degrees_like.txt') training.train_small('like_texts.txt', 'like') build_words_graph.build_graph('dislike_texts.txt', '../OK_recommend/user' + user_id + '/words_degrees_dislike.txt') training.train_small('dislike_texts.txt', 'dislike') types = ['like', 'dislike'] W_like = [0] * K W_dislike = [0] * K score_like = dict() score_dislike = dict() topic_of_words_like = dict() topic_of_words_dislike = dict() bad_user = False for typ in types: topic_score, word_in_topic, word_score, word_ids, id_words, newK = extract_text_topics.extract_text_topics(typ + '_texts.txt', typ, K, user_id, model) if newK != K: print("Too less words for that user :(") bad_user = True break W = None if typ == 'like': W = W_like else: W = W_dislike topic_scores_summary = 0 for i in range(newK): topic_scores_summary += topic_score[i] for i in range(newK):
if len(likes) < 20: continue g = open('texts.txt', 'w', encoding='utf-8') cnt_existing_posts = 0 for like in likes: Id = like.split(", ") group_id = Id[0] post_id = Id[1] text = read_post(group_id, post_id) if text == "-1": continue cnt_existing_posts += 1 g.write(text) g.close() if cnt_existing_posts < 15: continue if not os.path.isdir('../OK_results/user' + user_id): mkdir('../OK_results/user' + user_id) build_words_graph.build_graph( 'texts.txt', '../OK_results/user' + user_id + '/words_degrees.txt') training.train_small('texts.txt') extract_text_topics.extract_text_topics('texts.txt', K, user_id, model) cnt += 1 print("For %d users interests profile was built", cnt)
likes = likes.split("|") if len(likes) < 20: continue g = open("texts.txt", "w", encoding="utf-8") cnt_existing_posts = 0 for like in likes: Id = like.split(", ") group_id = Id[0] post_id = Id[1] text = read_post(group_id, post_id) if text == "-1": continue cnt_existing_posts += 1 g.write(text) g.close() if cnt_existing_posts < 15: continue if not os.path.isdir("../OK_results/user" + user_id): mkdir("../OK_results/user" + user_id) build_words_graph.build_graph("texts.txt", "../OK_results/user" + user_id + "/words_degrees.txt") training.train_small("texts.txt") extract_text_topics.extract_text_topics("texts.txt", K, user_id, model) cnt += 1 print("For %d users interests profile was built", cnt)
build_words_graph.build_graph( 'dislike_texts.txt', '../OK_recommend/user' + user_id + '/words_degrees_dislike.txt') training.train_small('dislike_texts.txt', 'dislike') types = ['like', 'dislike'] W_like = [0] * K W_dislike = [0] * K score_like = dict() score_dislike = dict() topic_of_words_like = dict() topic_of_words_dislike = dict() bad_user = False for typ in types: topic_score, word_in_topic, word_score, word_ids, id_words, newK = extract_text_topics.extract_text_topics( typ + '_texts.txt', typ, K, user_id, model) if newK != K: print("Too less words for that user :(") bad_user = True break W = None if typ == 'like': W = W_like else: W = W_dislike topic_scores_summary = 0 for i in range(newK): topic_scores_summary += topic_score[i] for i in range(newK):