def train(self, epoch=100, load=False): noise = tf.placeholder(tf.float32, [self.batch_size, self.noise_dim]) real_image = tf.placeholder(tf.float32, [ self.batch_size, self.image_size, self.image_size, self.image_depth ]) self.build_model(noise, real_image) if (self.image_size == 64): datasetTrain = Dataset(self.label_dir, self.batch_size, cv2=self.cv2, mode="compress", normalized=self.nor) elif (self.image_size == 96): datasetTrain = Dataset(self.label_dir, self.batch_size, cv2=self.cv2, mode="origin", normalized=self.nor) else: raise RuntimeError('image_size %i is not valid' % (self.image_size)) # (bs,self.image_size,self.image_size,self.image_depth) with tf.Session() as sess: saver = tf.train.Saver(max_to_keep=3) if not load: sess.run(tf.global_variables_initializer()) print("New Model: ", self.name) else: latest_checkpoint = tf.train.latest_checkpoint(self.save_path) saver.restore(sess, latest_checkpoint) print("Saver Loaded: " + latest_checkpoint) PATH = self.save_path + self.name + ".ckpt" if (self.nor): mean, std = datasetTrain.reset_normalized_par() for epo in range(0, epoch): if (epo % 1 == 0): batch_z = ut.genNoise(self.batch_size, mode=self.noise_mode, ball=self.ball) gen_image = sess.run(self.gen_image, feed_dict={noise: batch_z}) idx = np.random.randint( 0, self.batch_size - self.genImage_num + 1) if (self.nor): gen_image = std * np.array( gen_image[idx:idx + self.genImage_num]) + mean else: gen_image = np.array( gen_image[idx:idx + self.genImage_num]) * 255 print(gen_image) for i in range(self.genImage_num): if (self.cv2): cv2.imwrite( "./" "_" + str(epo) + "_" + str(i) + ".jpg", gen_image[i]) else: skimage.io.imsave( "../genImage/DCGAN2_80/" + self.name + "_" + str(epo) + "_" + str(i) + ".jpg", gen_image[i]) """all_image = [] for f in range(10): batch_z = ut.genNoise(self.batch_size,mode = 'uniform',ball = False) gen_image = sess.run(self.gen_image,feed_dict = {noise:batch_z}) idx = np.random.randint(0,self.batch_size-24) gen_image = np.array(gen_image[ idx : idx+25]) if True: gen_image = gen_image[:,:,:,::-1] #to rgb #print(gen_image.shape) all_image.append(gen_image) for k in range(10): r, c = 5, 5 fig, axs = plt.subplots(r, c) cnt = 0 for w in range(r): for z in range(c): axs[w,z].imshow(all_image[k][cnt, :,:,:]) axs[w,z].axis('off') cnt += 1 fig.savefig( './_' + str(k) + ".png") plt.close()""" if (epo != 0): ckpt_path = saver.save(sess, PATH, global_step=epo) print("Model saved: ", ckpt_path) datasetTrain.shuffle_perm() num_steps = int(datasetTrain.batch_max_size / self.batch_size) epo_loss = [0, 0, 0] ### Training for i in range(0, num_steps): images, _ = datasetTrain.next_batch() images = np.array(images).astype(np.float32) batch_z = ut.genNoise(self.batch_size, mode=self.noise_mode, ball=self.ball) total_dl_fake = 0 total_dl_real = 0 total_gl = 0 for j in range(self.d_update_num): #print("Start training") _, dl_fake, dl_real = sess.run( [self.d_opt, self.d_loss_fake, self.d_loss_real], feed_dict={ noise: batch_z, real_image: images }) total_dl_fake += dl_fake / self.d_update_num total_dl_real += dl_real / self.d_update_num #print("Done Dis") for j in range(self.g_update_num): _, gl = sess.run([self.g_opt, self.g_loss], feed_dict={ noise: batch_z, real_image: images }) total_gl += gl / self.g_update_num #print("Done Gen") gl = sess.run(self.d_loss_fake, feed_dict={ noise: batch_z, real_image: images }) total_gl = gl print("steps: %i gloss: %f dloss_fake: %f dloss_real %f" % (i, total_gl, total_dl_fake, total_dl_real)) for i, loss in enumerate( [total_gl, total_dl_fake, total_dl_real]): epo_loss[i] += loss epo_loss = np.array(epo_loss) print("\n[FINISHED] Epoch " + str(epo) + \ ", Training Loss (per epoch): ", epo_loss/num_steps) print('\n\nTraining finished!') print("Success")
def train(self, epoch=100, load=False): noise = tf.placeholder( tf.float32, [self.batch_size, self.noise_dim + self.word_dim]) real_image = tf.placeholder(tf.float32, [ self.batch_size, self.image_size, self.image_size, self.image_depth ]) real_tags = tf.placeholder(tf.float32, [self.batch_size, self.word_dim]) random_tags = tf.placeholder(tf.float32, [self.batch_size, self.word_dim]) self.build_model(noise, real_image, real_tags, random_tags) if (self.image_size == 64): datasetTrain = TrainDataset(self.label_csv, self.label_dir, self.save_dict, self.batch_size, self.nor) elif (self.image_size == 96): datasetTrain = TrainDataset(self.label_dir, self.batch_size, cv2=self.cv2, mode="origin", normalized=self.nor) with tf.Session() as sess: saver = tf.train.Saver(max_to_keep=3) if not load: sess.run(tf.global_variables_initializer()) print("New Model: ", self.name) else: latest_checkpoint = tf.train.latest_checkpoint(self.save_path) saver.restore(sess, latest_checkpoint) print("Saver Loaded: " + latest_checkpoint) PATH = self.save_path + self.name + "_" + ".ckpt" if (self.nor): mean, std = datasetTrain.reset_normalized_par() for epo in range(0, epoch): if (epo % 1 == 0): batch_z = ut.genNoise(self.batch_size, mode=self.noise_mode, ball=self.ball) word_test = ut.random_tags(self.batch_size) batch_z = np.concatenate((batch_z, word_test), axis=1) gen_image = sess.run(self.gen_image, feed_dict={noise: batch_z}) idx = np.random.randint( 0, self.batch_size - self.genImage_num + 1) if (self.nor): gen_image = std * np.array( gen_image[idx:idx + self.genImage_num]) + mean else: gen_image = np.array( gen_image[idx:idx + self.genImage_num]) * 255 print(gen_image) for i in range(self.genImage_num): print(word_test[i + idx]) if (self.cv2): cv2.imwrite( self.image_save_path + self.name + "_" + str(epo) + "_" + str(i) + ".jpg", gen_image[i]) else: skimage.io.imsave( "../genImage/DCGAN2_80/" + self.name + "_" + str(epo) + "_" + str(i) + ".jpg", gen_image[i]) if (epo != 0): ckpt_path = saver.save(sess, PATH, global_step=epo) print("Model saved: ", ckpt_path) datasetTrain.shuffle_perm() num_steps = int(datasetTrain.batch_max_size / self.batch_size) epo_loss = [0, 0, 0, 0] ### Training for i in range(0, num_steps): images, words, _ = datasetTrain.next_batch() images = np.array(images).astype(np.float32) batch_z = ut.genNoise(self.batch_size, mode=self.noise_mode, ball=self.ball) noise_tag = ut.random_tags(self.batch_size) batch_z = np.concatenate((batch_z, noise_tag), axis=1) total_dl_fake = 0 total_dl_real = 0 total_dl_rft = 0 total_gl = 0 for j in range(self.d_update_num): #print("Start training") _, dl_fake, dl_real, dl_rft = sess.run( [ self.d_opt, self.d_loss_fake, self.d_loss_real, self.d_loss_rft ], feed_dict={ noise: batch_z, real_image: images, real_tags: words, random_tags: noise_tag }) total_dl_fake += dl_fake / self.d_update_num total_dl_real += dl_real / self.d_update_num total_dl_rft += dl_rft / self.d_update_num #print("Done Dis") for j in range(self.g_update_num): _, gl = sess.run([self.g_opt, self.g_loss], feed_dict={ noise: batch_z, real_image: images }) total_gl += gl / self.g_update_num #print("Done Gen") print( "steps: %i gloss: %f dloss_fake: %f dloss_real %f dloss_rft %f" % (i, total_gl, total_dl_fake, total_dl_real, total_dl_rft)) for i, loss in enumerate( [total_gl, total_dl_fake, total_dl_real, total_dl_rft]): epo_loss[i] += loss epo_loss = np.array(epo_loss) print("\n[FINISHED] Epoch " + str(epo) + \ ", Training Loss (per epoch): ", epo_loss/num_steps) print('\n\nTraining finished!') print("Success")
def train(self, epoch=100, load=False): noise = tf.placeholder(tf.float32, [self.batch_size, self.noise_dim]) real_image = tf.placeholder(tf.float32, [ self.batch_size, self.image_size, self.image_size, self.image_depth ]) self.build_model(noise, real_image) datasetTrain = Dataset(self.label_dir, self.batch_size) # (bs,self.image_size,self.image_size,self.image_depth) with tf.Session() as sess: saver = tf.train.Saver(max_to_keep=3) if not load: sess.run(tf.global_variables_initializer()) print("New Model: ", self.name) else: latest_checkpoint = tf.train.latest_checkpoint(self.save_path) saver.restore(sess, latest_checkpoint) print("Saver Loaded: " + latest_checkpoint) PATH = self.save_path + self.name + ".ckpt" for epo in range(0, epoch): if (epo % 1 == 0): batch_z = ut.genNoise(self.batch_size, mode="normal") gen_image = sess.run(self.gen_image, feed_dict={noise: batch_z}) idx = np.random.randint(0, self.batch_size - 4) gen_image = np.array(gen_image[idx:idx + 5]) for i in range(5): #記得在生產圖片的地方換成 cv.imwrite("../genImage/DCGAN2_80/" + self.name + "_" + str(epo) # + "_" + str(i) + ".jpg", gen_image[i]) skimage.io.imsave( "../genImage/DCGAN2_80/" + self.name + "_" + str(epo) #不要 + "_" + str(i) + ".jpg", gen_image[i]) if (epo != 0): ckpt_path = saver.save(sess, PATH, global_step=epo) print("Model saved: ", ckpt_path) datasetTrain.shuffle_perm() num_steps = int(datasetTrain.batch_max_size / self.batch_size) epo_loss = [0, 0, 0] ### Training for i in range(0, num_steps): images, _ = datasetTrain.next_batch() images = np.array(images).astype(np.float32) batch_z = ut.genNoise(self.batch_size, mode="normal") total_dl_fake = 0 total_dl_real = 0 total_gl = 0 for j in range(self.d_update_num): #print("Start training") _, dl_fake, dl_real = sess.run( [self.d_opt, self.d_loss_fake, self.d_loss_real], feed_dict={ noise: batch_z, real_image: images }) total_dl_fake += dl_fake / self.d_update_num total_dl_real += dl_real / self.d_update_num #print("Done Dis") for j in range(self.g_update_num): _, gl = sess.run([self.g_opt, self.g_loss], feed_dict={ noise: batch_z, real_image: images }) total_gl += gl / self.g_update_num #print("Done Gen") print("steps: %i gloss: %f dloss_fake: %f dloss_real %f" % (i, total_gl, total_dl_fake, total_dl_real)) for i, loss in enumerate( [total_gl, total_dl_fake, total_dl_real]): epo_loss[i] += loss epo_loss = np.array(epo_loss) print("\n[FINISHED] Epoch " + str(epo) + \ ", Training Loss (per epoch): ", epo_loss/num_steps) print('\n\nTraining finished!') print("Success")
def test(self): test_batch_size = 25 noise = tf.placeholder(tf.float32,[test_batch_size,self.noise_dim+self.word_dim]) real_image = tf.placeholder(tf.float32,[test_batch_size,self.image_size, self.image_size,self.image_depth]) real_tags = tf.placeholder(tf.float32,[test_batch_size,self.word_dim]) random_tags = tf.placeholder(tf.float32,[test_batch_size,self.word_dim]) self.build_model(noise,real_image,real_tags,random_tags) test_label_csv ="./tags.csv" datasetTrain = TrainDataset(self.label_csv, self.label_dir, self.save_dict, self.batch_size, self.nor) if(self.nor): mean, std = datasetTrain.reset_normalized_par() with tf.Session() as sess: saver = tf.train.Saver(max_to_keep=3) saver.restore(sess, load_path) print("Saver Loaded: " + load_path) if read_csv: all_image = [] total_num = 10 for f in range(1): datasetTest = Testdataset(test_label_csv, self.save_dict, test_batch_size) batch_z = ut.genNoise(test_batch_size,mode=self.noise_mode,ball=self.ball) word_test = datasetTest.next_batch() batch_z = np.concatenate((batch_z, word_test), axis=1) gen_image = sess.run(self.gen_image,feed_dict ={noise:batch_z}) gen_image = np.array(gen_image) if(self.cv2): cv2.imwrite( "./11.jpg", gen_image[0]) gen_image = gen_image[:,:,:,::-1] #to rgb all_image.append(gen_image) for k in range(1): r, c = 5, 5 fig, axs = plt.subplots(r, c) cnt = 0 for w in range(r): for z in range(c): axs[w,z].imshow(all_image[k][cnt, :,:,:]) axs[w,z].axis('off') cnt += 1 fig.savefig( './_' + str(k) + ".png") plt.close() else : word_test = [] all_image = [] for f in range(12): for k in range(10): zero = np.zeros(22) zero[f] = 1 zero[12+k] = 1 word_test.append(zero) for f in range(5): word_test.append(np.zeros(22)) for f in range(5): batch_z = ut.genNoise(test_batch_size,mode=self.noise_mode,ball=self.ball) batch_z = np.concatenate((batch_z, word_test[25*f:25*f+25]), axis=1) gen_image = sess.run(self.gen_image,feed_dict ={noise:batch_z}) gen_image = np.array(gen_image) gen_image = gen_image[:,:,:,::-1] #to rgb all_image.append(gen_image) for k in range(5): r, c = 5, 5 fig, axs = plt.subplots(r, c) cnt = 0 for w in range(r): for z in range(c): axs[w,z].imshow(all_image[k][cnt, :,:,:]) axs[w,z].axis('off') cnt += 1 fig.savefig( './_' + str(k) + ".png") plt.close()