Exemplo n.º 1
0
def get_opt():

    opt = Namespace()
    # Model
    opt.CAPACITY = 32
    opt.NUM_COMPONENTS = 10
    opt.GMM_NUM_COMPONENTS = 10
    # Training
    opt.LR = 0.001
    opt.NUM_ITER = 3000
    opt.CUDA = True
    opt.REC_FREQ = 10
    # Meta
    opt.ALPHA_LR = 0.1
    # opt.ALPHA_NUM_ITER = 500
    # opt.ALPHA_NUM_ITER = 50
    opt.ALPHA_NUM_ITER = 10
    opt.FINETUNE_LR = 0.001
    opt.FINETUNE_NUM_ITER = 10
    opt.PARAM_DISTRY = lambda mean: normal(mean, 2, opt.NUM_SAMPLES)
    opt.PARAM_SAMPLER = lambda: np.random.uniform(-4, 4)
    # Sampling
    opt.NUM_SAMPLES = 1000
    opt.TRAIN_DISTRY = lambda: normal(0, 2, opt.NUM_SAMPLES)
    opt.TRANS_DISTRY = lambda: normal(random.randint(-4, 4), 2, opt.NUM_SAMPLES
                                      )
    return opt
Exemplo n.º 2
0
    with torch.no_grad():
        _X = torch.tensor([[1.]])
        _Y = torch.tensor([[0.]])
        if opt.CUDA:
            _X, _Y = _X.to(encoder.theta.device), _Y.to(encoder.theta.device)
        _X_d, _Y_d = decoder(_X, _Y)
        _X_de, _Y_de = encoder(_X_d, _Y_d)
    print(f"Initial (A, B) = {_X.item()}, {_Y.item()}")
    print(f"Decoded (X, Y) = {_X_d.item()}, {_Y_d.item()}")
    print(f"Encoded (U, V) = {_X_de.item()}, {_Y_de.item()}")


opt = Namespace()

# Model
opt.CAPACITY = 32
opt.NUM_COMPONENTS = 10
opt.GMM_NUM_COMPONENTS = 10

# Training
opt.LR = 0.01
opt.NUM_ITER = 20
opt.NUM_META_ITER = 1000
opt.ENCODER_LR = 0.01
opt.ALPHA_LR = 0.001
opt.CUDA = True
opt.REC_FREQ = 10
opt.ALPHA_INIT = 0.
opt.USE_BASELINE = True

# Fine tuning