default=1000000, help="Number of training iterations") parser.add_argument('--iter_save', type=int, default=10000, help="Save model every n iterations") parser.add_argument('--run', type=int, default=0, help="Run ID. In case you want to run replicates") args = parser.parse_args() layout = [('model={:s}', 'fsvae'), ('run={:04d}', args.run)] model_name = '_'.join([t.format(v) for (t, v) in layout]) pprint(vars(args)) print('Model name:', model_name) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') train_loader, labeled_subset, test_set = ut.get_svhn_data(device) fsvae = FSVAE(name=model_name).to(device) writer = ut.prepare_writer(model_name, overwrite_existing=True) train(model=fsvae, train_loader=train_loader, labeled_subset=labeled_subset, device=device, y_status='fullsup', tqdm=tqdm.tqdm, writer=writer, iter_max=args.iter_max, iter_save=args.iter_save)
num_hidden_layers=args.nlayers, pi=args.pi, std1=std1, std2=std2, gpu=gpu, BBB=args.BBB, training=args.training, sharpen=args.sharpen, dropout=args.dropout, likelihood_cost_form=args.likelihood_cost_form, input_feat_dim=args.input_feat_dim, pred_feat_dim=args.pred_feat_dim, hidden_feat_dim=args.hidden_feat_dim, n_input_steps=args.n_input_steps, n_pred_steps=args.n_pred_steps, constant_var=args.constant_var, name=model_name, device=device).to(device) train(model, training_set, args.batch_size, n_batches, kernel=data_ut.sinusoidal_kernel, lr=args.lr, clip_grad=args.clip_grad, iter_max=args.iter_max, iter_save=args.iter_save, iter_plot=args.iter_plot, reinitialize=False)
parser.add_argument('--logdir', type=str, default='log', help="Log directory") codebase_args.args = args = parser.parse_args() pprint(vars(args)) from codebase.models.vae import vae from codebase.train import train from codebase.utils import get_data # Make model name setup = [('model={:s}', 'vae'), ('src={:s}', args.src), ('trg={:s}', args.trg), ('design={:s}', args.design), ('gw={:.0e}', args.gw), ('rw={:.0e}', args.rw), ('npc={}', args.npc), ('lr={:.0e}', args.lr), ('run={:04d}', args.run)] model_name = '_'.join([t.format(v) for (t, v) in setup]) print "Model name:", model_name M = vae() M.sess.run(tf.global_variables_initializer()) src = get_data(args.src, npc=args.npc) trg = get_data(args.trg) saver = tf.train.Saver() train(M, src, trg, saver=saver, has_disc=False, add_z=False, model_name=model_name)
M.sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() if args.dirt > 0: run = args.run if args.run < 999 else 0 setup = [ ('model={:s}', 'dirtt'), ('src={:s}', args.src), ('trg={:s}', args.trg), ('nn={:s}', args.nn), ('trim={:d}', args.trim), ('dw={:.0e}', args.dw), ('bw={:.0e}', 0), ('sw={:.0e}', args.sw), ('tw={:.0e}', args.tw), ('dirt={:05d}', 0), ('run={:04d}', run) ] vada_name = '_'.join([t.format(v) for (t, v) in setup]) path = tf.train.latest_checkpoint(os.path.join('checkpoints', vada_name)) saver.restore(M.sess, path) print("Restored from {}".format(path)) src = get_data(args.src) trg = get_data(args.trg) train(M, src, trg, saver=saver, has_disc=args.dirt == 0, model_name=model_name)
('model={:s}', 'vae'), ('z={:02d}', args.z), ('run={:04d}', args.run) ] model_name = '_'.join([t.format(v) for (t, v) in layout]) pprint(vars(args)) print('Model name:', model_name) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') train_loader, labeled_subset, _ = ut.get_mnist_data(device, use_test_subset=True) vae = VAE(z_dim=args.z, name=model_name).to(device) if args.train: writer = ut.prepare_writer(model_name, overwrite_existing=True) train(model=vae, train_loader=train_loader, labeled_subset=labeled_subset, device=device, tqdm=tqdm.tqdm, writer=writer, iter_max=args.iter_max, iter_save=args.iter_save) ut.evaluate_lower_bound(vae, labeled_subset, run_iwae=args.train == 2) x = vae.sample_x(100).view(100, 1, 28, 28) db.printTensor(x) ImUtil.showBatch(x, show=True) input('Press key to exit') else: ut.load_model_by_name(vae, global_step=args.iter_max) ut.evaluate_lower_bound(vae, labeled_subset, run_iwae=True)
from pprint import pprint from codebase.args import args from codebase.models.classifier import classifier from codebase.train import train from codebase import datasets import tensorflow as tf # Settings parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--lr', type=float, default=1e-3, help="Learning rate") parser.add_argument('--run', type=int, default=999, help="Run index") parser.add_argument('--logdir', type=str, default='log', help="Log directory") args.set_args(parser.parse_args()) pprint(vars(args)) # Make model name setup = [('model={:s}', 'classifier'), ('src={:s}', 'mnist'), ('trg={:s}', 'svhn'), ('run={:04d}', args.run)] model_name = '_'.join([t.format(v) for (t, v) in setup]) print "Model name:", model_name M = classifier() M.sess.run(tf.global_variables_initializer()) saver = None # tf.train.Saver() src = datasets.Mnist(shape=(32, 32, 3)) trg = datasets.Svhn() train(M, src, trg, saver=saver, model_name=model_name)
z_prior_v=z_prior_v).to(device) # train_args: # 1 -> step 1: get the model # 2 -> step 2: get mean and variance # 3 -> step 3: refine the model # train_args = 1 train_args = None if train_args == 1: writer = ut.prepare_writer(model_name, overwrite_existing=True) train( model=vae, train_loader=train_loader, # train_loader=data_loader_individual[0], labeled_subset=labeled_subset, device=device, tqdm=tqdm.tqdm, writer=writer, iter_max=10000, iter_save=args.iter_save) ut.evaluate_lower_bound(vae, labeled_subset, run_iwae=args.train == 2) train_args = 2 # train_args = None mean_set = [] variance_set = [] if train_args == 2: ut.load_model_by_name(vae, global_step=20000) para_set = [ get_mean_variance(vae, data_set_individual[i]) for i in range(10) ]
kl_xy_x_weight=args.kl_xy_xw, kl_xy_y_weight=args.kl_xy_yw, gen_weight=args.gw, class_weight=args.cw, name=model_name, CNN=CNN).to(device) Train = True if Train: writer = ut.prepare_writer(model_name, overwrite_existing=True) train(model=hkvae, train_loader=train_loader, labeled_subset=labeled_subset, device=device, y_status='hk', tqdm=tqdm.tqdm, writer=writer, iter_max=args.iter_max, iter_save=args.iter_save, rec_step=args.rec_step, CNN=CNN) else: ut.load_model_by_name(hkvae, args.iter_max) # pprint(vars(args)) # print('Model name:', model_name) # print(hkvae.CNN) # xl, yl = test_set # yl = torch.tensor(np.eye(10)[yl]).float().to(device) # test_set = (xl, yl) # ut.evaluate_lower_bound_HK(hkvae, test_set)
if args.run >= 999 or not os.path.exists(restoration_path): run = args.run % 3 template = 'model=dann_embed_src={:s}_trg={:s}_design={:s}_dw={:.0e}_sbw={:.0e}_cw={:.0e}_tbw={:.0e}_dirt=00000_init=0_pivot=90000_up=0_uval=0e+00_dn=0_dcval=0e+00_dwdn=0_phase=0_run={:04d}_person={:s}' restoration_name = template.format(args.src, args.trg, args.design, args.dw, args.sbw, args.cw, args.tbw, run) restoration_path = os.path.join('checkpoints', restoration_name) assert os.path.exists(restoration_path), "File does not exist: {}".format( restoration_name) path = tf.train.latest_checkpoint(restoration_path) saver.restore(M.sess, path) print "Restored from {}".format(path) src = get_data(args.src, person=args.person) trg = get_data(args.trg, person=args.person) Y = src.train.labels.shape[-1] y_prior = trg.train.labels.mean(axis=0) if args.y_emp else [1. / Y] * Y print "y_prior is", y_prior train(M, src, trg, saver=saver, has_disc=True, add_z=True, model_name=model_name, y_prior=y_prior)
# inputs = torch.Tensor(np.random.rand(n_input_steps, batch_size, input_feat_dim)) # hidden = model.init_hidden(batch_size) # targets = torch.Tensor(np.random.rand(n_pred_steps, batch_size, pred_feat_dim)) # outputs, hidden = model.forward(inputs, hidden, targets) # assert outputs.shape == torch.Size([n_pred_steps, batch_size, 2 * pred_feat_dim]) # print(outputs.shape) # loss = model.get_loss(outputs, targets) # print(loss) dummy_training_set = data_ut.dummy_data_creator( batch_size=batch_size, n_batches=n_batches, input_feat_dim=input_feat_dim, n_input_steps=n_input_steps, n_pred_steps=n_pred_steps, kernel=data_ut.sinusoidal_kernel, device=device) train(model, dummy_training_set, batch_size, n_batches, device, kernel=data_ut.sinusoidal_kernel, lr=1e-3, clip_grad=5, iter_max=iter_max, iter_save=np.inf, iter_plot=np.inf, reinitialize=False)
var_pen=args.var_pen).to(device) if args.mode == 'train': writer = ut.prepare_writer(model_name, overwrite_existing=True) train_loader = ut.get_load_data(device, split='train', batch_size=args.batch, in_memory=True, log_normal=True, shift_scale=shift_scale) train(model=model, train_loader=train_loader, device=device, tqdm=tqdm.tqdm, writer=writer, lr=args.lr, lr_gamma=args.lr_gamma, lr_milestones=lr_milestones, iw=args.iw, iter_max=args.iter_max, iter_save=args.iter_save) model.set_to_eval() val_set = ut.get_load_data(device, split='val', in_memory=True, log_normal=True, shift_scale=shift_scale) ut.evaluate_lower_bound(model, val_set, run_iwae=(args.iw > 1)) else: ut.load_model_by_name(model, global_step=args.iter_max)