class AVGRunner: def __init__(self, num_steps, model_load_path, num_test_rec): """ Initializes the Adversarial Video Generation Runner. @param num_steps: The number of training steps to run. @param model_load_path: The path from which to load a previously-saved model. Default = None. @param num_test_rec: The number of recursive generations to produce when testing. Recursive generations use previous generations as input to predict further into the future. """ self.global_step = 0 self.num_steps = num_steps self.num_test_rec = num_test_rec #---------------------------------------- config = tf.ConfigProto() config.gpu_options.allow_growth = True self.sess = tf.Session(config=config) #---------------------------------------- #self.sess = tf.Session() self.summary_writer = tf.summary.FileWriter(c.SUMMARY_SAVE_DIR, graph=self.sess.graph) #self.summary_writer = tf.train.SummaryWriter(c.SUMMARY_SAVE_DIR, graph=self.sess.graph) if c.ADVERSARIAL: print 'Init discriminator...' self.d_model = DiscriminatorModel(self.sess, self.summary_writer, c.TRAIN_HEIGHT, c.TRAIN_WIDTH, c.SCALE_CONV_FMS_D, c.SCALE_KERNEL_SIZES_D, c.SCALE_FC_LAYER_SIZES_D) print 'Init generator...' self.g_model = GeneratorModel(self.sess, self.summary_writer, c.TRAIN_HEIGHT, c.TRAIN_WIDTH, c.FULL_HEIGHT, c.FULL_WIDTH, c.SCALE_FMS_G, c.SCALE_KERNEL_SIZES_G) print 'Init variables...' self.saver = tf.train.Saver(keep_checkpoint_every_n_hours=2) self.sess.run(tf.global_variables_initializer()) # if load path specified, load a saved model if model_load_path is not None: self.saver.restore(self.sess, model_load_path) print 'Model restored from ' + model_load_path def train(self): """ Runs a training loop on the model networks. """ for i in xrange(self.num_steps): if c.ADVERSARIAL: # update discriminator batch = get_train_batch() print 'Training discriminator...' self.d_model.train_step(batch, self.g_model) # update generator batch = get_train_batch() print 'Training generator...' self.global_step = self.g_model.train_step( batch, discriminator=(self.d_model if c.ADVERSARIAL else None)) # save the models if self.global_step % c.MODEL_SAVE_FREQ == 0: print '-' * 30 print 'Saving models...' self.saver.save(self.sess, c.MODEL_SAVE_DIR + 'model.ckpt', global_step=self.global_step) print 'Saved models!' print '-' * 30 # test generator model if self.global_step % c.TEST_FREQ == 0: self.test() def test(self): """ Runs one test step on the generator network. """ #step = 0 dirs = sorted(glob(os.path.join(c.TEST_DIR, '*'))) for num in range(len(dirs) / c.BATCH_SIZE + 1): ep_dirs = dirs[num * c.BATCH_SIZE:(num + 1) * c.BATCH_SIZE] batch = get_test_batch_re_id(c.BATCH_SIZE, ep_dirs, num_rec_out=self.num_test_rec) #print(batch) self.g_model.test_batch(batch, ep_dirs, self.global_step, num, num_rec_out=self.num_test_rec)
class AVGRunner: def __init__(self, num_steps, model_load_path, num_test_rec): """ Initializes the Adversarial Video Generation Runner. @param num_steps: The number of training steps to run. @param model_load_path: The path from which to load a previously-saved model. Default = None. @param num_test_rec: The number of recursive generations to produce when testing. Recursive generations use previous generations as input to predict further into the future. """ self.global_step = 0 self.num_steps = num_steps self.num_test_rec = num_test_rec self.sess = tf.Session() self.summary_writer = SummaryWriter(c.SUMMARY_SAVE_DIR, graph=self.sess.graph) if c.ADVERSARIAL: print('Init discriminator...') self.d_model = DiscriminatorModel(self.sess, self.summary_writer, c.TRAIN_HEIGHT, c.TRAIN_WIDTH, c.SCALE_CONV_FMS_D, c.SCALE_KERNEL_SIZES_D, c.SCALE_FC_LAYER_SIZES_D) print('Init generator...') self.g_model = GeneratorModel(self.sess, self.summary_writer, c.TRAIN_HEIGHT, c.TRAIN_WIDTH, c.FULL_HEIGHT, c.FULL_WIDTH, c.SCALE_FMS_G, c.SCALE_KERNEL_SIZES_G) print('Init variables...') self.saver = tf.train.Saver(keep_checkpoint_every_n_hours=2) self.sess.run(tf.global_variables_initializer()) # if load path specified, load a saved model if model_load_path is not None: self.saver.restore(self.sess, model_load_path) print('Model restored from ' + model_load_path) def train(self): """ Runs a training loop on the model networks. """ for i in range(self.num_steps): if c.ADVERSARIAL: # update discriminator batch = get_train_batch() print('Training discriminator...') self.d_model.train_step(batch, self.g_model) # update generator batch = get_train_batch() print('Training generator...') self.global_step = self.g_model.train_step( batch, discriminator=(self.d_model if c.ADVERSARIAL else None)) # save the models if self.global_step % c.MODEL_SAVE_FREQ == 0: print('-' * 30) print('Saving models...') self.saver.save(self.sess, c.MODEL_SAVE_DIR + 'model.ckpt', global_step=self.global_step) print('Saved models!') print('-' * 30) # test generator model if self.global_step % c.TEST_FREQ == 0: self.test() def test(self): """ Runs one test step on the generator network. """ batch = get_test_batch(c.BATCH_SIZE, num_rec_out=self.num_test_rec) self.g_model.test_batch(batch, self.global_step, num_rec_out=self.num_test_rec)
class AVGRunner: def __init__(self, num_steps, model_load_path, num_test_rec): """ Initializes the Adversarial Video Generation Runner. @param num_steps: The number of training steps to run. @param model_load_path: The path from which to load a previously-saved model. Default = None. @param num_test_rec: The number of recursive generations to produce when testing. Recursive generations use previous generations as input to predict further into the future. """ self.global_step = 0 self.num_steps = num_steps self.num_test_rec = num_test_rec self.sess = tf.Session() self.summary_writer = tf.summary.FileWriter(c.SUMMARY_SAVE_DIR, graph=self.sess.graph) if c.ADVERSARIAL: print 'Init discriminator...' self.d_model = DiscriminatorModel(self.sess, self.summary_writer, c.TRAIN_HEIGHT, c.TRAIN_WIDTH, c.SCALE_CONV_FMS_D, c.SCALE_KERNEL_SIZES_D, c.SCALE_FC_LAYER_SIZES_D) print 'Init generator...' self.g_model = GeneratorModel(self.sess, self.summary_writer, c.TRAIN_HEIGHT, c.TRAIN_WIDTH, c.FULL_HEIGHT, c.FULL_WIDTH, c.SCALE_FMS_G, c.SCALE_KERNEL_SIZES_G) print 'Init variables...' self.saver = tf.train.Saver(keep_checkpoint_every_n_hours=2) self.sess.run(tf.global_variables_initializer()) # if load path specified, load a saved model if model_load_path is not None: self.saver.restore(self.sess, model_load_path) print 'Model restored from ' + model_load_path def train(self): """ Runs a training loop on the model networks. """ delta = 45.6 adv_windowed_list = [] for i in xrange(self.num_steps): if c.ADVERSARIAL: # update discriminator batch = get_train_batch() print 'Training discriminator...' self.d_model.train_step(batch, self.g_model) # update generator batch = get_train_batch() print 'Training generator...' if (len(adv_windowed_list) > (c.WINDOW_SIZE - 1)): adv_windowed_list.pop(0) if not adv_windowed_list: self.global_step, adv_windowed_list = self.g_model.train_step( batch, adv_windowed_list, discriminator=(self.d_model if c.ADVERSARIAL else None)) start = True if (sum(adv_windowed_list) / len(adv_windowed_list) <= delta / c.WINDOW_SIZE): self.global_step, adv_windowed_list = self.g_model.train_step( batch, adv_windowed_list, discriminator=(self.d_model if c.ADVERSARIAL else None)) counter = 0 while (sum(adv_windowed_list) / len(adv_windowed_list) > delta / c.WINDOW_SIZE): print( sum(adv_windowed_list) / len(adv_windowed_list), delta / c.WINDOW_SIZE) if not start: adv_windowed_list.pop(len(adv_windowed_list) - 1) self.global_step, adv_windowed_list = self.g_model.train_step( batch, adv_windowed_list, discriminator=(self.d_model if c.ADVERSARIAL else None)) counter += 1 start = False print(counter) if counter >= 10: break print("Out of loop") # save the models if self.global_step % c.MODEL_SAVE_FREQ == 0: print '-' * 30 print 'Saving models...' self.saver.save(self.sess, c.MODEL_SAVE_DIR + 'model.ckpt', global_step=self.global_step) print 'Saved models!' print '-' * 30 # test generator model if self.global_step % c.TEST_FREQ == 0: self.test() #if self.global_step % c.MODEL_SAVE_FREQ == 0: print '-' * 30 print 'Saving models...' self.saver.save(self.sess, c.MODEL_SAVE_DIR + 'model.ckpt', global_step=self.global_step) print 'Saved models!' print '-' * 30 # test generator model #if self.global_step % c.TEST_FREQ == 0: self.test() def test(self): """ Runs one test step on the generator network. """ batch = get_test_batch(c.BATCH_SIZE, num_rec_out=self.num_test_rec) self.g_model.test_batch(batch, self.global_step, num_rec_out=self.num_test_rec)
class AVGRunner: def __init__(self, num_steps, model_load_path): """ Initializes the Adversarial Video Generation Runner. @param num_steps: The number of training steps to run. @param model_load_path: The path from which to load a previously-saved model. Default = None. """ self.global_step = 0 self.num_steps = num_steps self.sess = tf.Session() #self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) self.summary_writer = tf.summary.FileWriter(c.SUMMARY_SAVE_DIR, graph=self.sess.graph) if c.ADVERSARIAL: print('Init discriminator...') self.d_model = DiscriminatorModel(self.sess, self.summary_writer, c.TRAIN_HEIGHT, c.TRAIN_WIDTH, c.SCALE_CONV_FMS_D, c.SCALE_KERNEL_SIZES_D, c.SCALE_FC_LAYER_SIZES_D) print('Init generator...') self.g_model = GeneratorModel(self.sess, self.summary_writer, c.TRAIN_HEIGHT, c.TRAIN_WIDTH, c.FULL_HEIGHT, c.FULL_WIDTH, c.SCALE_FMS_G, c.SCALE_KERNEL_SIZES_G) print('Init variables...') self.summary_writer.add_graph(self.sess.graph) self.saver = tf.train.Saver(keep_checkpoint_every_n_hours=2) self.sess.run(tf.global_variables_initializer()) # if load path specified, load a saved model if model_load_path is not None: self.saver.restore(self.sess, model_load_path) print('Model restored from ' + model_load_path) def train(self): """ Runs a training loop on the model networks. """ np.random.shuffle(c.TEST_EXAMPLES) np.random.shuffle(c.TRAIN_EXAMPLES) examples_count = 0 num_epoch = 0 print('EPOCH - ' + str(num_epoch)) for i in range(self.num_steps): if c.ADVERSARIAL: # update discriminator batch = get_train_batch(examples_count) #print('Training discriminator...') self.d_model.train_step(batch, self.g_model) # update generator batch = get_train_batch(examples_count) examples_count += c.BATCH_SIZE #print('Training generator...') self.global_step = self.g_model.train_step( batch, discriminator=(self.d_model if c.ADVERSARIAL else None)) #test batch each 'epoch' if examples_count >= c.NUM_CLIPS: np.random.shuffle(c.TRAIN_EXAMPLES) examples_count = 0 self.test(c.TEST_BATCH_SIZE, full=True) #bsize = c.NUM_TEST_CLIPS,full=True) num_epoch += 1 print('EPOCH - ' + str(num_epoch)) # save the models if self.global_step % c.MODEL_SAVE_FREQ == 0: print('-' * 30) print('Saving models...') self.saver.save(self.sess, c.MODEL_SAVE_DIR + 'model.ckpt', global_step=self.global_step) print('Saved models!') print('-' * 30) # test generator model #if self.global_step % c.TEST_FREQ == 0: # self.test() def test(self, bsize=c.BATCH_SIZE, full=False): """ Runs one test step on the generator network. """ ''' batch = get_test_batch(c.BATCH_SIZE) ''' batch = np.empty( [bsize, c.FULL_HEIGHT, c.FULL_WIDTH, (3 * (c.HIST_LEN + 1))], dtype=np.float32) if full: # can be very memory hungry if c.TEST_CLIPS_FULL.size == 0: c.TEST_CLIPS_FULL = np.empty([ c.NUM_TEST_CLIPS, c.FULL_HEIGHT, c.FULL_WIDTH, (3 * (c.HIST_LEN + 1)) ], dtype=np.float32) for i in range(c.NUM_TEST_CLIPS): path = c.TEST_EXAMPLES[i] clip = np.load(path)['arr_0'] c.TEST_CLIPS_FULL[i] = clip offset = np.random.choice(np.arange(c.NUM_TEST_CLIPS - bsize)) batch = c.TEST_CLIPS_FULL[offset:(offset + bsize), :, :, :] else: offset = np.random.choice(np.arange(c.NUM_TEST_CLIPS - bsize)) for i in range(bsize): #path = c.TEST_DIR + str(np.random.choice(c.NUM_TEST_CLIPS)) + '.npz' path = c.TEST_EXAMPLES[offset + i] clip = np.load(path)['arr_0'] batch[i] = clip self.g_model.test_batch(batch, self.global_step)
class AVGRunner: def __init__(self, num_steps, model_load_path, num_test_rec): """ Initializes the Adversarial Video Generation Runner. @param num_steps: The number of training steps to run. @param model_load_path: The path from which to load a previously-saved model. Default = None. @param num_test_rec: The number of recursive generations to produce when testing. Recursive generations use previous generations as input to predict further into the future. """ self.global_step = 0 self.num_steps = num_steps self.num_test_rec = num_test_rec self.sess = tf.Session() self.summary_writer = tf.train.SummaryWriter(c.SUMMARY_SAVE_DIR, graph=self.sess.graph) # self.summary_writer = tf.train.FileWriter(c.SUMMARY_SAVE_DIR, graph=self.sess.graph) if c.ADVERSARIAL: print 'Init discriminator...' self.d_model = DiscriminatorModel(self.sess, self.summary_writer, c.TRAIN_HEIGHT, c.TRAIN_WIDTH, c.SCALE_CONV_FMS_D, c.SCALE_KERNEL_SIZES_D, c.SCALE_FC_LAYER_SIZES_D) print 'Init generator...' self.g_model = GeneratorModel(self.sess, self.summary_writer, c.TRAIN_HEIGHT, c.TRAIN_WIDTH, c.FULL_HEIGHT, c.FULL_WIDTH, c.SCALE_FMS_G, c.SCALE_KERNEL_SIZES_G) print 'Init variables...' self.saver = tf.train.Saver(keep_checkpoint_every_n_hours=2) self.sess.run(tf.global_variables_initializer()) # if load path specified, load a saved model if model_load_path is not None: print model_load_path self.model_name = model_load_path.split("/")[-1] print self.model_name self.saver.restore(self.sess, model_load_path) print 'Model restored from ' + model_load_path def train(self): """ Runs a training loop on the model networks. """ for i in xrange(self.num_steps): if c.ADVERSARIAL: # update discriminator batch, blurs = get_recursive_train_batch() #batch = get_recursive_train_batch() if check_nan_inf(batch): sys.exit() print 'Training discriminator...' self.d_model.train_step(batch, blurs, self.g_model) # update generator batch, blurs = get_recursive_train_batch() # batch = get_recursive_train_batch() if check_nan_inf(batch): sys.exit() print 'Training generator...' self.global_step = self.g_model.train_step( batch, blurs, discriminator=(self.d_model if c.ADVERSARIAL else None)) # save the models if self.global_step % c.MODEL_SAVE_FREQ == 0: print '-' * 30 print 'Saving models...' self.saver.save(self.sess, c.MODEL_SAVE_DIR + 'model.ckpt', global_step=self.global_step) print 'Saved models!' print '-' * 30 # test generator model if self.global_step % c.TEST_FREQ == 0: self.test() def test(self): """ Runs one test step on the generator network. """ batch = get_recursive_test_batch(c.BATCH_SIZE, num_rec_out=self.num_test_rec) self.g_model.test_batch(batch, self.global_step, num_rec_out=self.num_test_rec) def test_original(self): # print "Hi" test_dir_len = len(os.listdir(c.TEST_ORIGINAL_DIR)) print test_dir_len model_no = int(self.model_name.split("-")[-1]) psnr_list, sharpdiff_list = [], [] for i in range(0, test_dir_len, c.BATCH_SIZE): if i + c.BATCH_SIZE < test_dir_len: print i, i + c.BATCH_SIZE batch = get_original_test_batch(c.TEST_ORIGINAL_DIR, c.BATCH_SIZE, i, i + c.BATCH_SIZE) psnr, sharpdiff = self.g_model.test_batch( batch, self.global_step, num_rec_out=self.num_test_rec, save_imgs=False, test_only=True, model_no=model_no, start=i) print psnr, sharpdiff psnr_list.append(psnr) sharpdiff_list.append(sharpdiff) psnr_list = np.array(psnr_list) sharpdiff_list = np.array(sharpdiff_list) psnr_mean = np.mean(psnr_list) sharpdiff_mean = np.mean(sharpdiff_list) print psnr_mean, sharpdiff_mean f = open("testing.txt", 'a') f.write( str(self.model_name) + "\t" + str(psnr_mean) + "\t" + str(sharpdiff_mean)) f.close()