def export_data(self): weights = [[], []] for neuron in self.neuron_layers[0]: weights[0].extend(neuron.weights) for neuron in self.neuron_layers[1]: weights[1].extend(neuron.weights) weights_path = pathlib.Path('weights.json') weights_path.write_text( json_file.list_to_json(weights, json_file.data_to_json), 'utf-8') network_info = pathlib.Path('network_info.json') network_info.write_text(json_file.list_to_json( [self.input_size, self.hidden_size, self.genre_num], json_file.data_to_json), encoding='utf-8') wordtoindex_dic_path = pathlib.Path('wordtoindex.json') wordtoindex_dic_path.write_text(json_file.dict_to_json( self.wordtoindex_dic, json_file.data_to_json), encoding='utf-8') genretoindex_dic_path = pathlib.Path('genretoindex.json') genretoindex_dic_path.write_text(json_file.dict_to_json( self.genretoindex_dic, json_file.data_to_json), encoding='utf-8')
def set_to_vector(trainig_set, word_num): global index_to_genre, index_to_word, word_to_index, genre_to_index genre_to_index = {} word_to_index = {} genre_list = [genre for item in trainig_set for genre in item["genre"]] genre_list = set(genre_list) genre_num = len(genre_list) print(genre_list) num_counts = count_word_num(trainig_set) num_counts.sort(key=lambda x: x[1], reverse=True) num_counts = num_counts[:word_num] print("word_count done") for i in range(word_num): word_to_index[num_counts[i][0]] = i index_to_word = [ w for i in range(len(word_to_index.keys())) for w, j in word_to_index.items() if j == i ] for i, genre in enumerate(list(genre_list)): genre_to_index[genre] = i index_to_genre = [ g for i in range(len(genre_to_index.keys())) for g, j in genre_to_index.items() if j == i ] print("trainset adjustment finished") wordtoindex_dic_path = pathlib.Path('wordtoindex.json') wordtoindex_dic_path.write_text(json.dumps( json_file.dict_to_json(word_to_index, json_file.data_to_json)), encoding='utf-8') genretoindex_dic_path = pathlib.Path('genretoindex.json') genretoindex_dic_path.write_text(json.dumps( json_file.dict_to_json(genre_to_index, json_file.data_to_json)), encoding='utf-8') print("2index set file saved") return adjust_train_logistic(word_num, word_to_index, genre_num, genre_to_index, trainig_set)
def export_data(self) : wtipath = pathlib.Path('wordtoindex_ori_title.json') wti_json = json.dumps(json_file.dict_to_json(self.wordtobookindex, json_file.data_to_json)) if wtipath.exists(): file_data = wtipath.read_text(encoding='utf-8') if file_data != wti_json: wtipath.write_text(wti_json, encoding='utf-8') else: wtipath.write_text(wti_json, encoding='utf-8')
def export_data(self): book_p = pathlib.Path('book_data.json') book_p.write_text(json_file.list_to_json(self.book_list, json_file.data_to_json), encoding='utf-16') dic_p = pathlib.Path('date_to_book.json') dic_p.write_text(json_file.dict_to_json(self.date_to_book, json_file.data_to_json), encoding='utf-16')
def export_data(self): dic_p = pathlib.Path('word_prob.json') dic_p.write_text(json_file.list_to_json([[x, y] for x, y in self.word_probs], json_file.data_to_json), encoding='utf-16') dic_p = pathlib.Path('genre_num.json') dic_p.write_text(json_file.dict_to_json(self.genre_list, json_file.data_to_json), encoding='utf-16')
def export_data(self) : book_p = pathlib.Path('book_data.json') book_json = json.dumps(json_file.list_to_json(self.book_list, json_file.data_to_json)) try: if book_p.exists() : file_data = book_p.read_text(encoding='utf-16') if file_data != book_json : book_p.write_text(book_json, encoding='utf-16') else : book_p.write_text(book_json, encoding='utf-16') except: print("exception occured") dic_p = pathlib.Path('date_to_book.json') dic_json = json.dumps(json_file.dict_to_json(self.date_to_book, json_file.data_to_json)) if dic_p.exists(): file_data = dic_p.read_text(encoding='utf-16') if file_data != dic_json: dic_p.write_text(dic_json, encoding='utf-16') else: dic_p.write_text(dic_json, encoding='utf-16')
def export_data(self) : wtipath = pathlib.Path('wordtoindex_searcher.json') wtipath.write_text(json_file.dict_to_json(self.wordtobookindex, json_file.data_to_json), encoding='utf-8')