def analyze(): # Initialize session and trigger debugging mode session = tf.Session() if args.debug: session = tf_debug.LocalCLIDebugWrapperSession(session) # Load data and define placeholders print('Loading data from: ' + args.data_dir) data = np.load(args.data_dir) placeholders = define_placeholders(args, data.shape) # Create model and optimizer model = define_model(args, data.shape, placeholders) model_name = "%s_%s_%s_%s_%s_tol=%s" % ( args.data_dir[8:-10], args.model_type, str(args.hidden_dim_1), str(args.hidden_dim_2), str(args.hidden_dim_3), str(args.tol)) model_path = "../models/%s.ckpt" % (model_name) saver = tf.train.Saver() with session as sess: saver.restore(sess, model_path) start_time = time.ctime(int(time.time())) print("Analyzing '%s'... \nStart Time: %s" % (model_name, str(start_time))) if args.model_type == 'VAE': analyze_VAE(args, placeholders, data, model, model_name, sess) score_VAE(args, placeholders, data, model, model_name, sess) elif args.model_type == 'VGAE': analyze_VGAE(args, placeholders, data, model, model_name, sess)
def augment(): # Initialize session and trigger debugging mode session = tf.Session() if args.debug: session = tf_debug.LocalCLIDebugWrapperSession(session) # Load data and define placeholders print('Loading data from: ' + args.data_dir) data = np.load(args.data_dir) placeholders = define_placeholders(args, data.shape) # Create model and optimizer model = define_model(args, data.shape, placeholders) model_name = "%s_%s_%s_%s_%s_tol=%s" % ( args.data_dir[8:-10], args.model_type, str(args.hidden_dim_1), str(args.hidden_dim_2), str(args.hidden_dim_3), str(args.tol)) model_path = "../models/%s.ckpt" % (model_name) saver = tf.train.Saver() with session as sess: saver.restore(sess, model_path) start_time = time.ctime(int(time.time())) print("Augmenting '%s'... \nStart Time: %s" % (model_name, str(start_time))) gen_all = [] for i in range(args.num_batches): randoms = np.random.normal(0.0, 1.0, (args.batch_size, args.hidden_dim_3)) [gen, gen_preds] = sess.run([model.reconstructions, model.preds], feed_dict={model.z: randoms}) gen = np.concatenate((gen, gen_preds), axis=1) gen = gen.reshape(args.batch_size, -1) gen_all.append(gen) gen = np.array(gen_all).reshape(args.num_batches, args.batch_size, -1) gen = gen.reshape(-1, input_dim) if args.predict_type == 'Classifier': gen[:, 16110:] = np.clip(tf.round(gen[:, 16110:]).eval(), 0, 1) augmented_data = np.concatenate((np.transpose(gen), train_data), axis=1) print(augmented_data.shape) print('Saving augmented dataset at ../data/%s_augmented_train.npy' % (model_name)) np.save('../data/%s_augmented_train.npy' % (model_name), augmented_data)
def main(): # Initialize session and trigger debugging mode session = tf.Session() if args.debug: session = tf_debug.LocalCLIDebugWrapperSession(session) # Load data and define placeholders print('Loading test data from: ' + args.test_data_dir) data = np.load(args.test_data_dir) placeholders = define_placeholders(args, data.shape) # Create model and optimizer model = define_model(args, data.shape, placeholders) model_name = "%s_%s_%s_%s_%s" % ( args.data_dir[8:-10], args.model_type, str( args.hidden_dim_1), str(args.hidden_dim_2), str(args.hidden_dim_3)) model_path = "../models/%s.ckpt" % (model_name) saver = tf.train.Saver() with session as sess: print('Scoring %s' % (model_name)) saver.restore(sess, model_path) start, accuracy = 0, 0 i = 0 # Get average reconstruction loss on test set while start + args.batch_size <= data.shape[1]: batch, labels = get_consecutive_batch(start, args.batch_size, data) feed_dict = {placeholders['inputs']: batch} outs = sess.run([model.preds], feed_dict=feed_dict) if args.model_type == 'Classifier': preds = tf.nn.sigmoid(outs[0]) correct_pred = tf.equal(tf.round(preds), labels) print(correct_pred.eval()) accuracy += tf.reduce_mean(tf.cast(correct_pred, tf.float32)) start += args.batch_size i += 1 else: accuracy += tf.reduce_mean(tf.square(outs[0] - labels)) start += args.batch_size i += 1 print("total score:", accuracy.eval() / i) f = open('./scores/%s.txt' % (model_name), 'w') f.write("Score on %s: %f" % (args.test_data_dir, accuracy.eval() / i)) f.close()
def main(): # Initialize session and trigger debugging mode session = tf.Session() if args.debug: session = tf_debug.LocalCLIDebugWrapperSession(session) # Load data and define placeholders print('Loading data from: ' + args.data_dir) data = np.load(args.data_dir) placeholders = define_placeholders(args, data.shape) # Create model and optimizer model = define_model(args, data.shape, placeholders) opt = define_optimizer(args, model, data.shape, placeholders) model_name = "%s_%s_%s_%s_%s_tol=%s" % ( args.data_dir[8:-10], args.model_type, str(args.hidden_dim_1), str(args.hidden_dim_2), str(args.hidden_dim_3), str(args.tol)) model_path = "../models/%s.ckpt" % (model_name) saver = tf.train.Saver() with session as sess: sess.run(tf.global_variables_initializer()) if args.restore: print("Restoring model from: ", model_path) saver.restore(sess, model_path) start_time = time.ctime(int(time.time())) print("Starting to train '%s'... \nStart Time: %s" % (model_name, str(start_time))) if args.model_type == 'VAE': train_VAE(model_path, data, session, saver, placeholders, model, opt, args) elif args.model_type == 'VGAE': train_VGAE(model_path, data, session, saver, placeholders, model, opt, args) print("Training Complete. Model name: %s" % (model_name))