print(psnr_vals_list) # -------------------------------- Train loop --------------------------------------------- config = tf.ConfigProto() # dynamic memory growth config.gpu_options.allow_growth = True with tf.Session(config=config) as session: # Init variables if (START_ITER > 0): saver.restore(session, restore_path) # Restore variables from saved session. print("Model restored.") plotter.restore(START_ITER) # makes plots start from 0 session.run(fixed_noise.initializer) else: session.run(init_op) session.run(fixed_noise.initializer) print(fixed_noise.eval()) overall_start_time = time.time() summary_writer = tf.summary.FileWriter(log_dir, graph=session.graph) # Network Training for iteration in range(START_ITER, ITERS): # START_ITER: 0 or from last checkpoint start_time = time.time()
imsaver.save_images(samples_255.reshape((BATCH_SIZE, 3, IM_DIM, IM_DIM)), 'samples_{}.png'.format(frame)) ### for MNIST init_op = tf.global_variables_initializer() # op to initialize the variables. saver = tf.train.Saver() # ops to save and restore all the variables. # Train loop with tf.Session() as session: # Init variables if (CONTINUE): saver.restore(session, restore_path) # Restore variables from saved session. print("Model restored.") plotter.restore( START_ITER ) # does not fully work, but makes plots start from newly started iteration else: session.run(init_op) for iteration in range(START_ITER, ITERS): # START_ITER: 0 or from last checkpoint start_time = time.time() # Train generator if iteration > 0: _ = session.run(gen_train_op) # Train duscriminator for i in range(DISC_ITERS): _data, _ = next(gen) # shape: (batchsize, 6144) _real_data = _data[:, 2 * OUTPUT_DIM:] # current frame for disc _disc_cost, _ = session.run([disc_cost, disc_train_op],