def mainloop(self): sess = tf.Session() sf.add_train_var() sf.add_loss() sf.add_image("image_to_write") sum_writer = tf.train.SummaryWriter(self.train_log_dir, sess.graph) saver = tf.train.Saver() summ = tf.merge_all_summaries() init_op = tf.initialize_all_variables() sess.run(init_op) if self.restore_model: sf.restore_model(sess, saver, self.model_dir) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord, sess=sess) for i in xrange(self.max_training_iter): train_st_data_v, train_image_data_v = sess.run( [self.st_data, self.image_data]) #train_image_data_v = np.random.uniform(-1, 1, (self.bsize, self.iheight, self.iwidth, 1)) #train_st_data_v = np.random.uniform(-1, 1, (self.bsize, self.st_len)) ran_code = np.random.uniform(-1, 1, size=(self.bsize, 100)) feed_data = { self.image_data_ph: train_image_data_v, self.st_data_ph: train_st_data_v, self.ran_code_ph: ran_code } for di in xrange(self.d_iter): _, d_loss_v = sess.run([self.d_optim, self.d_loss], feed_dict=feed_data) for gi in xrange(self.g_iter): #_, _, g_image_v, g_loss_v, c_loss_v, summ_v = sess.run([self.g_optim, self.c_optim, self.g_image, # self.g_loss, self.c_loss, summ], feed_dict = feed_data) _, g_image_v, g_loss_v, summ_v = sess.run( [self.g_optim, self.g_image, self.g_loss, summ], feed_dict=feed_data) if i % 100 == 0: sum_writer.add_summary(summ_v, i) print("iter: %d, d_loss: %.3f, g_loss: %.3f" % (i, d_loss_v, g_loss_v)) if i != 0 and (i % 1000 == 0 or i == self.max_training_iter - 1): sf.save_model(sess, saver, self.model_dir, i)
def mainloop(self): config_proto = uf.define_graph_config( self.model_params["gpu_fraction"]) sess = tf.Session(config=config_proto) self.init_var(sess) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord, sess=sess) if self.load_train: for i in range(self.model_params["max_training_iter"]): feed_dict = self.get_feed_dict(sess, True) p = float(i) / self.model_params["max_training_iter"] l = 2. / (1. + np.exp(-10. * p)) - 1 feed_dict[self.data_ph.get_gl()] = \ l * self.model_params['adapt_scale'] _, tloss_v, taccuracy_v = sess.run( [self.train_op, self.loss, self.accuracy], feed_dict) if i % self.model_params["test_per_iter"] == 0: feed_dict = self.get_feed_dict(sess, False) loss_v, accuracy_v, summ_v = \ sess.run([self.loss, self.accuracy, self.summ], feed_dict) print_string = "i: %d, train_loss: %.2f, "\ "train_accuracy: %.2f, "\ "test_loss: %.2f, test_accuracy: %.2f" %\ (i, tloss_v, taccuracy_v, loss_v, accuracy_v) print(print_string) file_io.save_string( print_string, self.model_params["train_log_dir"] + self.model_params["string_log_name"]) self.sum_writer.add_summary(summ_v, i) sf.add_value_sum(self.sum_writer, tloss_v, "train_loss", i) sf.add_value_sum(self.sum_writer, loss_v, "test_loss", i) sf.add_value_sum(self.sum_writer, taccuracy_v, "train_accu", i) sf.add_value_sum(self.sum_writer, accuracy_v, "test_accu", i) if i != 0 and (i % self.model_params["save_per_iter"] == 0 or i == self.model_params["max_training_iter"] - 1): sf.save_model(sess, self.saver, self.model_params["model_dir"], i) else: file_len = self.target_data_input[0].file_size batch_size = self.model_params["batch_size"] test_iter = int(file_len / batch_size) + 1 accuracy_list = list() for i in range(test_iter): feed_dict = self.get_feed_dict(sess, False) loss_v, accuracy_v = sess.run([self.loss, self.accuracy], feed_dict) accuracy_list.append(accuracy_v) print("accuracy is %.2f" % accuracy_v) accuracy = np.mean(np.array(accuracy_list)) accuracy_string = "final accuracy is: %.3f" % accuracy print(accuracy_string) with open(self.model_params["result_file_name"], 'w') as f: f.write(accuracy_string) coord.request_stop() coord.join(threads)
def train(): train_batch_data, train_batch_label = gen_data_label(TRAIN_TXT, True) # sess = tf.Session() # print(sess.run(train_batch_data)) test_batch_data, test_batch_label = gen_data_label(TEST_TXT, False) # print(tf.shape(test_batch_data)) data_ph = tf.placeholder(tf.float32, shape=(FLAGS.batch_size, FLAGS.feature_row, FLAGS.feature_col, FLAGS.feature_cha), name='feature') label_ph = tf.placeholder(tf.float32, shape=(FLAGS.batch_size, FLAGS.label_row, FLAGS.label_col, FLAGS.label_cha), name='label') global_step = tf.Variable(0, name='global_step', trainable=False) keep_prob_ph = tf.placeholder(tf.float32, name='keep_prob') train_test_phase_ph = tf.placeholder(tf.bool, name='phase_holder') # fcn_model.test_infer_size(label_ph) output_shape = [ FLAGS.batch_size, FLAGS.label_row, FLAGS.label_col, FLAGS.label_cha ] infer = fcn_model.inference(data_ph, output_shape, keep_prob_ph, train_test_phase_ph) loss = fcn_model.loss(infer, label_ph) train_op = fcn_model.train_op(loss, FLAGS.init_learning_rate, global_step) test_loss = fcn_model.loss(infer, label_ph) sess = tf.Session() saver = tf.train.Saver() init_op = tf.global_variables_initializer() sess.run(init_op) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord, sess=sess) for i in range(FLAGS.max_training_iter): train_batch_data_v, train_batch_label_v = sess.run( [train_batch_data, train_batch_label]) _, loss_v, infer_v = sess.run([train_op, loss, infer], { data_ph: train_batch_data_v, label_ph: train_batch_label_v }) if i % 500 == 0: test_batch_data_v, test_batch_label_v = sess.run( [test_batch_data, test_batch_label]) test_loss_v, infer_v = sess.run([test_loss, infer], { data_ph: test_batch_data_v, label_ph: test_batch_label_v }) num_car_label = np.sum(test_batch_label_v) / FLAGS.batch_size num_car_infer = np.sum(infer_v) / FLAGS.batch_size print( "i: %d train_loss: %.5f, test_loss: %.5f, test_num_car: %.2f, infer_num_car: %.2f" % (i, loss_v, test_loss_v, num_car_label, num_car_infer)) if i % 5000 == 0 or i == FLAGS.max_training_iter - 1: sf.save_model(sess, saver, FLAGS.model_dir, i) label_norm = norm_image(test_batch_label_v[0]) infer_norm = norm_image(infer_v[0]) image = np.hstack((label_norm, infer_norm)) cv2.imwrite(FLAGS.image_dir + "/%08d.jpg" % (i / 100), image) cv2.imshow("infer", image)
def train(): train_batch_data, train_batch_label, train_batch_name = gen_data_label( TRAIN_TXT, True) test_batch_data, test_batch_label, test_batch_name = gen_data_label( TEST_TXT, False) data_ph = tf.placeholder(tf.float32, shape=(FLAGS.batch_size, FLAGS.feature_row, FLAGS.feature_col, FLAGS.feature_cha), name='feature') label_ph = tf.placeholder(tf.float32, shape=(FLAGS.batch_size, FLAGS.label_row, FLAGS.label_col, FLAGS.label_cha), name='label') global_step = tf.Variable(0, name='global_step', trainable=False) keep_prob_ph = tf.placeholder(tf.float32, name='keep_prob') train_test_phase_ph = tf.placeholder(tf.bool, name='phase_holder') output_shape = [ FLAGS.batch_size, FLAGS.label_row, FLAGS.label_col, FLAGS.label_cha ] #infer = model.test_infer_size(label_ph) #gpu_list = ['/gpu:0','/gpu:1','/gpu:2','/gpu:3'] #for d in gpu_list: # with tf.device(d): # if (d != gpu_list[0]): # tf.get_variable_scope().reuse_variables() infer, count_diff_infer = model.inference(data_ph, output_shape, keep_prob_ph, train_test_phase_ph) loss, _ = model.loss(infer, count_diff_infer, label_ph) train_op = model.train_op(loss, FLAGS.init_learning_rate, global_step) sess = tf.Session() sf.add_train_var() sf.add_loss() train_sum = tf.merge_all_summaries() #test_count_loss = tf.reduce_mean(count_diff_infer, name = "test_count") test_loss, test_count = model.loss(infer, count_diff_infer, label_ph) test_count_sum = tf.scalar_summary("test_count", tf.reduce_mean(test_count)) test_sum = tf.scalar_summary("test_loss", test_loss) sum_writer = tf.train.SummaryWriter(FLAGS.train_log_dir, sess.graph) saver = tf.train.Saver() init_op = tf.initialize_all_variables() sess.run(init_op) if FLAGS.restore_model: sf.restore_model(sess, saver, FLAGS.model_dir) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord, sess=sess) result_list = list() for i in xrange(FLAGS.max_training_iter): train_batch_data_v, train_batch_label_v, = sess.run( [train_batch_data, train_batch_label]) _, loss_v, infer_v, train_sum_v = sess.run( [train_op, loss, infer, train_sum], { data_ph: train_batch_data_v, label_ph: train_batch_label_v, train_test_phase_ph: True }) if i % 20 == 0: test_batch_data_v, test_batch_label_v, test_batch_name_v = \ sess.run([test_batch_data, test_batch_label, test_batch_name]) test_loss_v, test_count_v, infer_v ,test_sum_v ,test_count_sum_v = \ sess.run([test_loss, test_count, infer, test_sum, test_count_sum], {data_ph:test_batch_data_v, label_ph:test_batch_label_v, train_test_phase_ph:False}) num_car_label = np.sum(test_batch_label_v) / FLAGS.batch_size num_car_infer = np.mean(test_count_v) num_car_diff = np.mean( np.abs( np.sum(test_batch_label_v, axis=(1, 2, 3)) - test_count_v)) print( "i: %d train_loss: %.5f, test_loss: %.5f, test_num_car: %.2f, infer_num_car: %.2f, num_car_diff: %.2f" % (i, loss_v, test_loss_v, num_car_label, num_car_infer, num_car_diff)) sum_writer.add_summary(train_sum_v, i) sum_writer.add_summary(test_sum_v, i) sum_writer.add_summary(test_count_sum_v, i) #save_results(test_batch_label_v, infer_v, test_batch_name_v, result_list) #label_norm = iuf.norm_image(batch_label[i]) if i != 0 and (i % 200 == 0 or i == FLAGS.max_training_iter - 1): sf.save_model(sess, saver, FLAGS.model_dir, i)