def train(self): print ('start to training') coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) try: while not coord.should_stop(): # start_time = time.time() _, loss, step, cl, sl = self.sess.run([self.opt, self.loss, self.global_step, self.content_loss, self.style_loss]) if step%100 == 0: gen_img = self.sess.run(self.gen_img) if not os.path.exists('gen_img'): os.mkdir('gen_img') save_img.save_images(gen_img, './gen_img/{0}.jpg'.format(step/100)) print ('[{}/40000],loss:{}, content:{},style:{}'.format(step, loss, cl, sl)) if step % 2000 == 0: if not os.path.exists('model_saved_s'): os.mkdir('model_saved_s') self.save.save(self.sess, './model_saved_s/wave{}.ckpt'.format(step/2000)) if step >= 40000: break except tf.errors.OutOfRangeError: self.save.save(sess, os.path.join(os.getcwd(), 'fast-style-model.ckpt-done')) finally: coord.request_stop() coord.join(threads)
def train(self): if not os.path.exists('model_saved'): os.mkdir('model_saved') if not os.path.exists('gen_picture'): os.mkdir('gen_picture') noise = np.random.normal(-1, 1, [self.batch_size, 128]) temp = 0.80 print 'training' for epoch in range(self.EPOCH): # iters = int(156191//self.batch_size) iters = 50000 // self.batch_size flag2 = 1 # if epoch>10 else 0 for idx in range(iters): start_t = time.time() flag = 1 if idx < 4 else 0 # set we use 2*batch_size=200 train data labeled. batchx, batchl = mnist.train.next_batch(self.batch_size) # batchx, batchl = self.sess.run([batchx, batchl]) g_opt = [self.opt_g, self.g_loss] d_opt = [self.opt_d, self.d_loss, self.d_l_1, self.d_l_2] feed = { self.x: batchx, self.z: noise, self.label: batchl, self.flag: flag, self.flag2: flag2 } # update the Discrimater k times _, loss_d, d1, d2 = self.sess.run(d_opt, feed_dict=feed) # update the Generator one time _, loss_g = self.sess.run(g_opt, feed_dict=feed) print( "[%3f][epoch:%2d/%2d][iter:%4d/%4d],loss_d:%5f,loss_g:%4f, d1:%4f, d2:%4f" % (time.time() - start_t, epoch, self.EPOCH, idx, iters, loss_d, loss_g, d1, d2)), 'flag:', flag plot.plot('d_loss', loss_d) plot.plot('g_loss', loss_g) if ((idx + 1) % 100) == 0: # flush plot picture per 1000 iters plot.flush() plot.tick() if (idx + 1) % 500 == 0: print('images saving............') img = self.sess.run(self.G_img, feed_dict=feed) save_img.save_images(img, os.getcwd()+'/gen_picture/'+'sample{}_{}.jpg'\ .format(epoch, (idx+1)/500)) print 'images save done' test_acc = self.test() plot.plot('test acc', test_acc) plot.flush() plot.tick() print 'test acc:{}'.format(test_acc), 'temp:%3f' % (temp) if test_acc > temp: print('model saving..............') path = os.getcwd() + '/model_saved' save_path = os.path.join(path, "model.ckpt") self.saver.save(self.sess, save_path=save_path) print('model saved...............') temp = test_acc
def test(self): print('test model') test_img_path = self.args.test_data_path test_img = load_test_img(test_img_path) # test_img = tf.random_uniform(shape=(1, 500, 800, 3), minval=0, maxval=1.) test_img = self.sess.run(test_img) with slim.arg_scope(model.arg_scope()): gen_img, _ = model.gen_net(test_img, reuse=False, name='transform') # load model model_path = self.args.transfer_model vars = slim.get_variables_to_restore(include=['transform']) # vgg_init_var = slim.get_variables_to_restore(include=['vgg_16/fc6']) init_fn = slim.assign_from_checkpoint_fn(model_path, vars) init_fn(self.sess) # tf.initialize_variables(var_list=vgg_init_var) print('vgg s weights load done') gen_img = self.sess.run(gen_img) save_img.save_images(gen_img, self.args.new_img_name)