def get_opt(): opt = Namespace() # Model opt.CAPACITY = 32 opt.NUM_COMPONENTS = 10 opt.GMM_NUM_COMPONENTS = 10 # Training opt.LR = 0.001 opt.NUM_ITER = 3000 opt.CUDA = True opt.REC_FREQ = 10 # Meta opt.ALPHA_LR = 0.1 # opt.ALPHA_NUM_ITER = 500 # opt.ALPHA_NUM_ITER = 50 opt.ALPHA_NUM_ITER = 10 opt.FINETUNE_LR = 0.001 opt.FINETUNE_NUM_ITER = 10 opt.PARAM_DISTRY = lambda mean: normal(mean, 2, opt.NUM_SAMPLES) opt.PARAM_SAMPLER = lambda: np.random.uniform(-4, 4) # Sampling opt.NUM_SAMPLES = 1000 opt.TRAIN_DISTRY = lambda: normal(0, 2, opt.NUM_SAMPLES) opt.TRANS_DISTRY = lambda: normal(random.randint(-4, 4), 2, opt.NUM_SAMPLES ) return opt
print(f"Encoded (U, V) = {_X_de.item()}, {_Y_de.item()}") opt = Namespace() # Model opt.CAPACITY = 32 opt.NUM_COMPONENTS = 10 opt.GMM_NUM_COMPONENTS = 10 # Training opt.LR = 0.01 opt.NUM_ITER = 20 opt.NUM_META_ITER = 1000 opt.ENCODER_LR = 0.01 opt.ALPHA_LR = 0.001 opt.CUDA = True opt.REC_FREQ = 10 opt.ALPHA_INIT = 0. opt.USE_BASELINE = True # Fine tuning opt.FINETUNE_NUM_ITER = 5 opt.FINETUNE_LR = 0.001 opt.EM_ITERS = 500 # Sampling opt.NUM_SAMPLES = 1000 opt.TRAIN_DISTRY = lambda: normal(0, 2, opt.NUM_SAMPLES) opt.TRANS_DISTRY = lambda: normal(np.random.uniform(-4, 4), 2, opt.NUM_SAMPLES)
def normal(mean, std, N): return torch.normal(torch.ones(N).mul_(mean), torch.ones(N).mul_(std)).view(-1, 1) opt = Namespace() # Model opt.CAPACITY = 32 opt.NUM_COMPONENTS = 10 opt.GMM_NUM_COMPONENTS = 10 # Training opt.LR = 0.001 opt.NUM_ITER = 3000 opt.CUDA = False opt.REC_FREQ = 10 # Meta opt.ALPHA_LR = 0.1 opt.ALPHA_NUM_ITER = 50 opt.FINETUNE_LR = 0.001 opt.FINETUNE_NUM_ITER = 10 opt.PARAM_DISTRY = lambda mean: normal(mean, 2, opt.NUM_TRANS_SAMPLES) opt.PARAM_SAMPLER = lambda: np.random.uniform(-4, 4) # Sampling opt.NUM_SAMPLES = 1000 opt.NUM_TRANS_SAMPLES = 1000 opt.TRAIN_DISTRY = lambda: normal(0, 2, opt.NUM_SAMPLES) opt.TRAIN_EVAL_DISTRY = lambda: normal(0, 2, 10000) #opt.TRANS_DISTRY = lambda: normal(random.randint(-4, 4), 2, opt.NUM_SAMPLES) alpha_list = [] beta_list = [] gamma_list = []