def learn_with_segment(self, db, learning_epoch, partial_size): tf.set_random_seed(777) # for reproducibility self.init_network() # 가상함수 self.sess = tf.Session() # Initialize TensorFlow variables self.sess.run(tf.global_variables_initializer()) print("\nStart learning:") # Training cycle for epoch in range(learning_epoch): err_4_all_data = 0 number_of_segment = self.get_number_of_segment() # 가상함수 # 처음 데이터를 100개를 읽어 최적화함. # 그 다음 100개 데이터에 대하여 수행. # 이를 모두 550번 수행하면 전체 데이터 55,000개에 대해 1번 수행하게 됨. # 아래 for 문장이 한번 모두 실행되면 전체 데이터에 대해 1번 실행(학습)함. for i in range(number_of_segment): x_data, y_data = self.get_next_segment() # 가상함수 # 아래 에러는 일부분(100개)에 대한 것이므로 전체 에러를 구하려면 550으로 나누어주어야 함. 아래에서 수행 err_4_partial, _= self.sess.run([self.cost_function, self.optimizer], feed_dict={self.X: x_data, self.Y: y_data}) err_4_all_data += err_4_partial from lib import mytool mytool.print_dot() avg_err = err_4_all_data / number_of_segment # self.costs.append(avg_err) self.my_log(epoch, x_data, y_data) # 가상함수 print("\nDone!\n")
def learn(self, dX, dY, total_loop, check_step): tf.set_random_seed(777) # reproducibility self.init_network() self.sess = tf.Session() self.sess.run(tf.global_variables_initializer()) print('\nStart learning:') for i in range(total_loop + 1): self.sess.run(self.optimizer, feed_dict={self.X: dX, self.Y: dY}) l = self.sess.run(self.cost_function, feed_dict={self.X: dX, self.Y: dY}) predictedY = self.sess.run(self.hypothesis, feed_dict={self.X: dX}) # print(predictedY.shape) # (171, 10, 65) for j, result in enumerate(predictedY): index = np.argmax(result, axis=1) # 가장 안쪽에 있는 리스트에서 가장 큰 값을 갖는 인덱스 반 # print(i, j, ''.join([self.unique_char_list[t] for t in index]), l) self.errors.append(l) if i % check_step == 0: print_dot() print('\nDone.')
def learn(self, trainX, trainY, total_loop, check_step): tf.set_random_seed(777) # reproducibility self.init_network() if self.input_dim == 0 | self.output_dim == 0 | self.seq_length == 0: print('Set RNN parameters!') exit() self.sess = tf.Session() self.sess.run(tf.global_variables_initializer()) print('\nStart learning:') for i in range(total_loop): self.sess.run(self.optimizer, feed_dict={ self.X: trainX, self.Y: trainY }) loss = self.sess.run(self.cost_function, feed_dict={ self.X: trainX, self.Y: trainY }) self.errors.append(loss) if i % check_step == 0: from lib import mytool mytool.print_dot() print('\nDone!\n')
def learn(self, xdata, ydata, total_loop, check_step): tf.set_random_seed(777) # reproducibility self.init_network() self.sess = tf.Session() self.sess.run(tf.global_variables_initializer()) print('\nStart learning:') x_index_list = self.cheolsu.sentence_to_index_list(xdata) #[6, 2, 6, 7, 10, 5, 3, 6, 11, 10, 1, 6, 12, 10, 6, 0, 1, 8, 9] y_index_list = self.cheolsu.sentence_to_index_list(ydata) #[2, 6, 7, 10, 5, 3, 6, 11, 10, 1, 6, 12, 10, 6, 0, 1, 8, 9, 4] for i in range(total_loop): # 3000 l, _ = self.sess.run([self.cost_function, self.optimizer], feed_dict={ self.X: [x_index_list], self.Y: [y_index_list] }) if i % check_step == 0: # 10 self.costs.append(l) from lib import mytool mytool.print_dot() print('\nDone!\n')
def learn(self, x_index_list, y_index_list, total_loop, check_step): tf.set_random_seed(777) # reproducibility ''' input_dim = 5 # input dimension hidden_size = 5 # output dim. sequence_length = 6 # six inputs batch_size = 1 # one sentence ''' self.init_network() self.prediction = tf.argmax(self.hypothesis, axis=2) self.sess = tf.Session() self.sess.run(tf.global_variables_initializer()) self.create_writer() # virtual function for tensorboard print( 'error', self.sess.run(self.cost_function, feed_dict={ self.X: [x_index_list], self.Y: [y_index_list] })) print('\nStart learning:') for i in range(total_loop + 1): err, _ = self.sess.run([self.cost_function, self.optimizer], feed_dict={ self.X: [x_index_list], self.Y: [y_index_list] }) self.do_summary(feed_dict={ self.X: x_index_list, self.Y: [y_index_list] }) # virtual function for tensorboard if i % check_step == 0: mytool.print_dot() result = self.sess.run(self.prediction, feed_dict={self.X: [x_index_list]}) msg = "{} 오류: {:.6f}, 예측: {}, 실제: {}".format( i, err, result, [y_index_list]) self.logs.append(msg) self.errors.append(err) print('\nDone!\n')
def learn(self, total_loop, check_step): tf.set_random_seed(777) # for reproducibility if self.sess == None: self.init_network() self.sess = tf.Session() self.sess.run(tf.global_variables_initializer()) print("\nStart learning:") for step in range(total_loop + 1): cost_val, _ = self.sess.run([self.cost_function, self.optimizer]) if step % check_step == 0: self.costs.append(cost_val) from lib import mytool mytool.print_dot() print("\nDone!\n")
# train my model for epoch in range(training_epochs): avg_cost = 0 total_batch = int(mnist.train.num_examples / batch_size) for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) feed_dict = {X: batch_xs, Y: batch_ys, keep_prob: 0.7} s, _ = sess.run([summary, optimizer], feed_dict=feed_dict) writer.add_summary(s, global_step=global_step) global_step += 1 avg_cost += sess.run(cost, feed_dict=feed_dict) / total_batch from lib.mytool import print_dot print_dot() print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost)) print('Learning Finished!') # Test model and check accuracy correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print( 'Accuracy:', sess.run(accuracy, feed_dict={ X: mnist.test.images, Y: mnist.test.labels, keep_prob: 1
def check_step_processing(self, i, x_data, y_data): mytool.print_dot() err = self.sess.run(self.cost_function, feed_dict={self.X: x_data, self.Y: y_data}) self.costs.append("{:.8f}".format(err))