def SAE_evidence(sae_dict, cp2, ae_ids): # Init variables and parameters init = tf.global_variables_initializer() saver = tf.train.Saver() # Initialize model save string saemodelstr = saefinestr.split('.meta')[0] # Load evidence K = utils.load_evidence(cp2.get('Experiment', 'EVIDENCEDATAPATH')) # _full = np.concatenate((K.train.one, K.test.one)) _full = K.train.one # p = utils.get_perm(perm_str, _full) # EV = _full[p] EV = _full batch_size = cp2.getint('Hyperparameters', 'BatchSize') # Start Session (Layerwise training) config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: # Initialize graph variables init.run() ev_dict = { 'cp2': cp2, 'sess': sess, 'saver': saver, 'EV': EV, 'savestr': saemodelstr, 'ae_ids': ae_ids, 'argv2': sys.argv[2] } EviAE.train(ev_dict, sae_dict) # Save hidden/output layer results for pipeline training utils.save_OOM(sess, sae_dict['sda_in'], EV, sae_dict['sda_hidden'], path=cp2.get('Experiment', 'PX_Z_TRAIN'), batch_size=batch_size) utils.save_OOM(sess, sae_dict['sda_in'], EV, sae_dict['sda_out'], path=cp2.get('Experiment', 'PX_XREC'), batch_size=batch_size) sess.close()
def evitram(evitramd): saver = tf.train.Saver() config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: # Restore pretrained model saver.restore(sess, evitramfinestr.split('.meta')[0]) if cp.get('Experiment', 'PREFIX') == 'MNIST': # Save hidden/output layer results for pipeline training px_Z_latent = utils.run_OOM(sess, evitram_dict['conv_in'], XX_full, evitram_dict['conv_z'], batch_size=batch_size) # Save latent space utils.save_OOM(sess, evitram_dict['conv_in'], XX_full, evitram_dict['conv_z'], path='COND_' + cp.get('Experiment', 'PX_Z_FULL'), batch_size=batch_size) # Save reconstruction utils.save_OOM(sess, evitram_dict['conv_in'], XX_full, evitram_dict['conv_out'], path='COND_' + cp.get('Experiment', 'PX_XREC_TRAIN'), batch_size=batch_size) else: px_Z_latent = utils.run_OOM(sess, evitram_dict['sda_in'], XX_full, evitram_dict['sda_hidden'], batch_size=batch_size) utils.save_OOM(sess, evitram_dict['sda_in'], XX_full, evitram_dict['sda_hidden'], path='COND_' + cp.get('Experiment', 'PX_Z_FULL'), batch_size=batch_size) # Print clustering ACC utils.log_accuracy(cp, YY_full, px_Z_latent, 'COND - ACC FULL', SEED) # Print clustering NMI utils.log_NMI(cp, YY_full, px_Z_latent, 'COND - NMI FULL', SEED) # Print clustering CHS score utils.log_CHS(cp, XX_full, px_Z_latent, 'COND - CHS FULL', SEED) sess.close()
def px(pae_dict): saver = tf.train.Saver() # P(x) Session config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: # Restore pretrained model saver.restore(sess, pxfinestr.split('.meta')[0]) if cp.get('Experiment', 'PREFIX') == 'MNIST' or \ cp.get('Experiment', 'PREFIX') == 'AMNIST': # Save hidden/output layer results for pipeline training px_Z_latent = utils.run_OOM(sess, pae_dict['conv_in'], XX_full, pae_dict['conv_z'], batch_size=batch_size) # Save latent space utils.save_OOM(sess, pae_dict['conv_in'], XX_full, pae_dict['conv_z'], path=cp.get('Experiment', 'PX_Z_FULL'), batch_size=batch_size) # Save reconstruction utils.save_OOM(sess, pae_dict['conv_in'], XX_full, pae_dict['conv_out'], path=cp.get('Experiment', 'PX_XREC_TRAIN'), batch_size=batch_size) else: utils.save_OOM(sess, pae_dict['sda_in'], XX_full, pae_dict['sda_hidden'], path=cp.get('Experiment', 'PX_Z_FULL'), batch_size=batch_size) if not (np.array_equal(XX_test, np.zeros(shape=(1, 1)))): utils.save_OOM(sess, pae_dict['sda_in'], XX_test, pae_dict['sda_hidden'], path=cp.get('Experiment', 'PX_Z_TEST'), batch_size=batch_size) utils.save_OOM(sess, pae_dict['sda_in'], XX_test, pae_dict['sda_out'], path=cp.get('Experiment', 'PX_XREC_TEST'), batch_size=batch_size) utils.save_OOM(sess, pae_dict['sda_in'], XX_full, pae_dict['sda_out'], path=cp.get('Experiment', 'PX_XREC_TRAIN'), batch_size=batch_size) sess.close()