def px(pae_dict): saver = tf.train.Saver() # P(x) Session config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: # Restore pretrained model saver.restore(sess, pxfinestr.split('.meta')[0]) if cp.get('Experiment', 'PREFIX') == 'MNIST': # Save hidden/output layer results for pipeline training px_Z_latent = utils.run_OOM(sess, pae_dict['conv_in'], XX_full, pae_dict['conv_z'], batch_size=batch_size) else: px_Z_latent = utils.run_OOM(sess, pae_dict['sda_in'], XX_full, pae_dict['sda_hidden'], batch_size=batch_size) # Print clustering ACC utils.log_accuracy(cp, YY_full, px_Z_latent, 'PX - ACC FULL', SEED) # Print clustering NMI utils.log_NMI(cp, YY_full, px_Z_latent, 'PX - NMI FULL', SEED) # Print clustering CHS score utils.log_CHS(cp, XX_full, px_Z_latent, 'PX - CHS FULL', SEED) sess.close()
def evitram(evitramd): saver = tf.train.Saver() config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: # Restore pretrained model saver.restore(sess, evitramfinestr.split('.meta')[0]) if cp.get('Experiment', 'PREFIX') == 'MNIST': # Save hidden/output layer results for pipeline training px_Z_latent = utils.run_OOM(sess, evitram_dict['conv_in'], XX_full, evitram_dict['conv_z'], batch_size=batch_size) # Save latent space utils.save_OOM(sess, evitram_dict['conv_in'], XX_full, evitram_dict['conv_z'], path='COND_' + cp.get('Experiment', 'PX_Z_FULL'), batch_size=batch_size) # Save reconstruction utils.save_OOM(sess, evitram_dict['conv_in'], XX_full, evitram_dict['conv_out'], path='COND_' + cp.get('Experiment', 'PX_XREC_TRAIN'), batch_size=batch_size) else: px_Z_latent = utils.run_OOM(sess, evitram_dict['sda_in'], XX_full, evitram_dict['sda_hidden'], batch_size=batch_size) utils.save_OOM(sess, evitram_dict['sda_in'], XX_full, evitram_dict['sda_hidden'], path='COND_' + cp.get('Experiment', 'PX_Z_FULL'), batch_size=batch_size) # Print clustering ACC utils.log_accuracy(cp, YY_full, px_Z_latent, 'COND - ACC FULL', SEED) # Print clustering NMI utils.log_NMI(cp, YY_full, px_Z_latent, 'COND - NMI FULL', SEED) # Print clustering CHS score utils.log_CHS(cp, XX_full, px_Z_latent, 'COND - CHS FULL', SEED) sess.close()
def px(pae_dict): # Initialize model save string pxmodelstr = pxfinestr.split('.meta')[0] # Variable initilization and saving init = tf.global_variables_initializer() saver = tf.train.Saver() if cp.get('Experiment', 'PREFIX') == 'MNIST': # Tensorboard (comment / uncomment) ###################################################################### from datetime import datetime now = datetime.utcnow().strftime("%m-%d_%H-%M:%S") root_logdir = cp.get('Experiment', 'ModelOutputPath') logdir = "{}/{}{}-{}/".format( root_logdir, cp.get('Experiment', 'PREFIX') + '_' + cp.get('Experiment', 'Enumber') + '_px', sys.argv[2], now) tf.summary.scalar(name='xrecon loss', tensor=pae_dict['px_mse']) summary = tf.summary.merge_all() file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph()) ###################################################################### # Start Session config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: # Initialize graph variables init.run() if cp.get('Experiment', 'PREFIX') == 'MNIST': train_dict = { 'cp': cp, 'sess': sess, 'data': XX_full, 'sumr': summary, 'savestr': pxmodelstr, 'saver': saver, 'fw': file_writer } else: train_dict = { 'cp': cp, 'sess': sess, 'data': XX_full, 'savestr': pxmodelstr, 'saver': saver, 'ae_ids': ae_ids, 'out_': out_ } if cp.get('Experiment', 'PREFIX') == 'MNIST': ConvAE.train(train_dict, pae_dict) else: SAE.train(train_dict, pae_dict) # Get batch size for batch output save batch_size = cp.getint('Hyperparameters', 'BatchSize') if cp.get('Experiment', 'PREFIX') == 'MNIST': # Save hidden/output layer results for pipeline training px_Z_latent = utils.run_OOM(sess, pae_dict['conv_in'], XX_full, pae_dict['conv_z'], batch_size=batch_size) else: # Save hidden/output layer results for pipeline training px_Z_latent = utils.run_OOM(sess, pae_dict['sda_in'], XX_full, pae_dict['sda_hidden'], batch_size=batch_size) # utils.save_OOM(sess, pae_dict['conv_in'], XX_full, # pae_dict['conv_out'], # path=cp.get('Experiment', 'PX_XREC_TRAIN'), # batch_size=batch_size) # Print clustering ACC utils.log_accuracy(cp, YY_full, px_Z_latent, 'PX - ACC FULL', SEED) # Print clustering NMI utils.log_NMI(cp, YY_full, px_Z_latent, 'PX - NMI FULL', SEED) sess.close()
def evitram(): # Restore pretrained model restorestr = pxfinestr.split('.meta')[0] # Save model str evitramstr = evitramfinestr.split('.meta')[0] # Load pretrained evidence representations for all sources K = [] for e in sys.argv[3:]: cp2 = utils.load_config(e) K.append(cp2.get('Experiment', 'PX_Z_TRAIN')) sect = 'Experiment' ev_paths = [cp.get(sect, i) for i in cp.options(sect) if 'evidence' in i] if cp.get('Experiment', 'PREFIX') == 'MNIST': evitram_dict = ConvAE.build_EviTRAM(cp, SEED) else: # Layerwise autoencoder number ae_ids = [str(i) for i in xrange(cp.getint('Experiment', 'AENUM'))] evitram_dict = SAE.build_EviTRAM(cp, ae_ids, SEED) # Get variables to restore from pretrained model P(x) Encoder var_list = tf.trainable_variables() for ev_path_id, ev_path in enumerate(ev_paths): if cp.get('Experiment', 'PREFIX') == 'MNIST': # Prepare "restore" variable list for v in var_list: if v.name == 'Pre_Q' + str(ev_path_id) + '/kernel:0': var_list.remove(v) for v in var_list: if v.name == 'Pre_Q' + str(ev_path_id) + '/bias:0': var_list.remove(v) else: # Prepare "restore" variable list for v in var_list: if v.name == 'Pre_Q' + str(ev_path_id) + '/kernel:0': var_list.remove(v) for v in var_list: if v.name == 'Pre_Q' + str(ev_path_id) + '/bias:0': var_list.remove(v) for v in var_list: if v.name == 'Pre_Comp_Q' + str(ev_path_id) + '/kernel:0': var_list.remove(v) for v in var_list: if v.name == 'Pre_Comp_Q' + str(ev_path_id) + '/bias:0': var_list.remove(v) ########################################################## # Tensorboard (comment / uncomment) ########################################################## from datetime import datetime now = datetime.utcnow().strftime("%m-%d_%H-%M:%S") root_logdir = cp.get('Experiment', 'ModelOutputPath') logdir = "{}/{}{}-{}/".format( root_logdir, cp.get('Experiment', 'PREFIX') + '_' + cp.get('Experiment', 'Enumber') + '_cond', sys.argv[2], now) tf.summary.scalar(name='cond loss', tensor=evitram_dict['evitram_loss']) tf.summary.scalar(name='recon loss', tensor=evitram_dict['px_mse']) summary = tf.summary.merge_all() file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph()) ########################################################## # Initialize & restore P(x) AE weights init = tf.global_variables_initializer() saver = tf.train.Saver(var_list) saverCOND = tf.train.Saver() # Task outcomes EV = [np.load(i) for i in K] # Start Session config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: # Init values init.run() # Restore finetuned model saver.restore(sess, restorestr) train_dict = { 'cp': cp, 'sess': sess, 'data': XX_full, 'sumr': summary, 'savestr': evitramstr, 'saver': saverCOND, 'fw': file_writer, 'EV': EV, 'ev_paths': ev_paths } if cp.get('Experiment', 'PREFIX') == 'MNIST': ConvAE.evitram_train(train_dict, evitram_dict) else: SAE.evitram_train(train_dict, evitram_dict) # Get batch size for batch output save batch_size = train_dict['cp'].getint('Hyperparameters', 'BatchSize') if cp.get('Experiment', 'PREFIX') == 'MNIST': # Save hidden/output layer results for pipeline training px_Z_latent = utils.run_OOM(sess, evitram_dict['conv_in'], XX_full, evitram_dict['conv_z'], batch_size=batch_size) else: px_Z_latent = utils.run_OOM(sess, evitram_dict['sda_in'], XX_full, evitram_dict['sda_hidden'], batch_size=batch_size) # utils.save_OOM(sess, pae_dict['conv_in'], XX_full, # pae_dict['conv_out'], # path=cp.get('Experiment', 'PX_XREC_TRAIN'), # batch_size=batch_size) # Print clustering ACC utils.log_accuracy(cp, YY_full, px_Z_latent, 'COND - ACC FULL', SEED) # Print clustering NMI utils.log_NMI(cp, YY_full, px_Z_latent, 'COND - NMI FULL', SEED) sess.close()
def evitram(evitramd): saver = tf.train.Saver() config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: # Restore pretrained model saver.restore(sess, evitramfinestr.split('.meta')[0]) if cp.get('Experiment', 'PREFIX') == 'MNIST' or \ cp.get('Experiment', 'PREFIX') == 'AMNIST': # Save hidden/output layer results for pipeline training px_Z_latent = utils.run_OOM(sess, evitram_dict['conv_in'], XX_full, evitram_dict['conv_z'], batch_size=batch_size) elif cp.get('Experiment', 'PREFIX') == 'WEATHER': # Save hidden/output layer results for pipeline training px_Z_latent_tr = utils.run_OOM(sess, evitram_dict['conv_in'], XX_full, evitram_dict['conv_z'], batch_size=batch_size) px_Z_latent_te = utils.run_OOM(sess, evitram_dict['conv_in'], XX_test, evitram_dict['conv_z'], batch_size=batch_size) else: px_Z_latent_tr = utils.run_OOM(sess, evitram_dict['sda_in'], XX_full, evitram_dict['sda_hidden'], batch_size=batch_size) if not(np.array_equal(XX_test, np.zeros(shape=(1,1)))): px_Z_latent_te = utils.run_OOM(sess, evitram_dict['sda_in'], XX_test, evitram_dict['sda_hidden'], batch_size=batch_size) if 'WEATHER' in cp.get('Experiment', 'PREFIX'): # Print clustering ACC utils.log_accuracy_isof(cp, YY_full, px_Z_latent_tr, 'COND - ACC FULL (Train)', SEED) if not(np.array_equal(XX_test, np.zeros(shape=(1,1)))): utils.log_accuracy_isof(cp, YY_test, px_Z_latent_te, 'COND - ACC FULL (Test)', SEED) utils.log_anomalyPRF_isof(cp, YY_full, px_Z_latent_tr, 'COND - PRF FULL (Test)', SEED) if not(np.array_equal(XX_test, np.zeros(shape=(1,1)))): utils.log_anomalyPRF_isof(cp, YY_test, px_Z_latent_te, 'COND - PRF FULL (Test)', SEED) else: # Print clustering ACC utils.log_accuracy(cp, YY_full, px_Z_latent, 'PX - ACC FULL', SEED) # Print clustering NMI utils.log_NMI(cp, YY_full, px_Z_latent, 'PX - NMI FULL', SEED) # Print clustering CHS score utils.log_CHS(cp, XX_full, px_Z_latent, 'PX - CHS FULL', SEED) sess.close()
def px(pae_dict): saver = tf.train.Saver() # P(x) Session config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: # Restore pretrained model saver.restore(sess, pxfinestr.split('.meta')[0]) if cp.get('Experiment', 'PREFIX') == 'MNIST' or \ cp.get('Experiment', 'PREFIX') == 'AMNIST': # Save hidden/output layer results for pipeline training px_Z_latent = utils.run_OOM(sess, pae_dict['conv_in'], XX_full, pae_dict['conv_z'], batch_size=batch_size) # Save latent space utils.save_OOM(sess, pae_dict['conv_in'], XX_full, pae_dict['conv_z'], path=cp.get('Experiment', 'PX_Z_FULL'), batch_size=batch_size) # Save reconstruction utils.save_OOM(sess, pae_dict['conv_in'], XX_full, pae_dict['conv_out'], path=cp.get('Experiment', 'PX_XREC_TRAIN'), batch_size=batch_size) else: utils.save_OOM(sess, pae_dict['sda_in'], XX_full, pae_dict['sda_hidden'], path=cp.get('Experiment', 'PX_Z_FULL'), batch_size=batch_size) if not (np.array_equal(XX_test, np.zeros(shape=(1, 1)))): utils.save_OOM(sess, pae_dict['sda_in'], XX_test, pae_dict['sda_hidden'], path=cp.get('Experiment', 'PX_Z_TEST'), batch_size=batch_size) utils.save_OOM(sess, pae_dict['sda_in'], XX_test, pae_dict['sda_out'], path=cp.get('Experiment', 'PX_XREC_TEST'), batch_size=batch_size) utils.save_OOM(sess, pae_dict['sda_in'], XX_full, pae_dict['sda_out'], path=cp.get('Experiment', 'PX_XREC_TRAIN'), batch_size=batch_size) sess.close()
def px(pae_dict): saver = tf.train.Saver() # P(x) Session config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: # Restore pretrained model saver.restore(sess, pxfinestr.split('.meta')[0]) if cp.get('Experiment', 'PREFIX') == 'MNIST' or \ cp.get('Experiment', 'PREFIX') == 'AMNIST': # Save hidden/output layer results for pipeline training px_Z_latent = utils.run_OOM(sess, pae_dict['conv_in'], XX_full, pae_dict['conv_z'], batch_size=batch_size) else: px_Z_latent = utils.run_OOM(sess, pae_dict['sda_in'], XX_full, pae_dict['sda_hidden'], batch_size=batch_size) if not (np.array_equal(XX_test, np.zeros(shape=(1, 1)))): px_Z_latent_te = utils.run_OOM(sess, pae_dict['sda_in'], XX_test, pae_dict['sda_hidden'], batch_size=batch_size) if cp.get('Experiment', 'Enumber') == 'windT': # Print clustering ACC utils.log_accuracy_AC(cp, YY_full, px_Z_latent, 'PX - ACC FULL', SEED) # utils.log_accuracy(cp, YY_test, px_Z_latent_te, # 'PX - ACC TEST', SEED) # Print clustering NMI utils.log_NMI_AC(cp, YY_full, px_Z_latent, 'PX - NMI FULL', SEED) # utils.log_NMI(cp, YY_test, px_Z_latent_te, # 'PX - NMI TEST', SEED) # Print clustering CHS score utils.log_anomalyPRF_AC(cp, YY_full, px_Z_latent, 'PX - PRF FULL (FULL)', SEED) # utils.log_anomalyPRF(cp, YY_test, px_Z_latent_te, # 'PX - PRF FULL (Test)', SEED) else: # Print clustering ACC utils.log_accuracy(cp, YY_full, px_Z_latent, 'PX - ACC FULL', SEED) if not (np.array_equal(XX_test, np.zeros(shape=(1, 1)))): utils.log_accuracy(cp, YY_test, px_Z_latent_te, 'PX - ACC TEST', SEED) # Print clustering NMI utils.log_NMI(cp, YY_full, px_Z_latent, 'PX - NMI FULL', SEED) if not (np.array_equal(XX_test, np.zeros(shape=(1, 1)))): utils.log_NMI(cp, YY_test, px_Z_latent_te, 'PX - NMI TEST', SEED) # Print clustering CHS score utils.log_anomalyPRF(cp, YY_full, px_Z_latent, 'PX - PRF FULL (FULL)', SEED) if not (np.array_equal(XX_test, np.zeros(shape=(1, 1)))): utils.log_anomalyPRF(cp, YY_test, px_Z_latent_te, 'PX - PRF FULL (Test)', SEED) sess.close()