ckpt = tf.train.get_checkpoint_state(CHECKPOINT_DIR) if ckpt and ckpt.model_checkpoint_path: print "Restoring previous model..." try: saver.restore(sess, ckpt.model_checkpoint_path) print "Model restored" except: print "Could not restore model" pass ########################################### training portion step = sess.run(global_step) print 'Loading data...' train_paths, train_annots, train_ids, test_paths, test_annots, test_ids = data_ops.load_zoo( DATA_DIR, hot=HOT) print 'Done' train_len = len(train_paths) test_len = len(test_paths) print 'train num:', train_len epoch_num = step / (train_len / BATCH_SIZE) while epoch_num < EPOCHS + 1: epoch_num = step / (train_len / BATCH_SIZE) start = time.time() # train the discriminator n times
sess.run(init) # restore previous model if there is one ckpt = tf.train.get_checkpoint_state(CHECKPOINT_DIR) if ckpt and ckpt.model_checkpoint_path: print "Restoring previous model..." try: saver.restore(sess, ckpt.model_checkpoint_path) print "Model restored" except: print "Could not restore model" raise exit() print 'Loading data...' train_images, train_annots, train_ids, test_images, test_annots, test_ids = data_ops.load_zoo( DATA_DIR, HOT) test_len = len(test_ids) print 'Done\n' ''' Approach: Going to pick two z vectors and interpolate between them, then set the second vector as the 'start' and pick a random second vector for the end, and repeat. For each I'll use the same random attribute ''' idx = np.random.choice(np.arange(test_len), 2, replace=False) batch_y = test_annots[idx] # the two z vectors to interpolate between two_z = np.random.normal(-1.0, 1.0, size=[2, 100]).astype(np.float32)
sess.run(init) # restore previous model if there is one ckpt = tf.train.get_checkpoint_state(CHECKPOINT_DIR) if ckpt and ckpt.model_checkpoint_path: print "Restoring previous model..." try: saver.restore(sess, ckpt.model_checkpoint_path) print "Model restored" except: print "Could not restore model" raise exit() print 'Loading data...' train_images, train_annots, train_ids, test_images, test_annots, test_ids = data_ops.load_zoo( DATA_DIR, SIZE, crop=CROP) test_len = len(test_annots) num_gen = 0 gen_z = [] # z vectors used to generate images gen_y = [] # y vectors used to generate images print 'generating data...' while num_gen < MAX_GEN: batch_z = np.random.normal(-1.0, 1.0, size=[BATCH_SIZE, 100]).astype(np.float32) idx = np.random.choice(np.arange(test_len), BATCH_SIZE, replace=False) batch_y = test_annots[idx] gen_imgs = sess.run([gen_images], feed_dict={