Exemplo n.º 1
0
def control(input_msg):
    tagger = Komoran()

    dataset = Dataset('nsmc/ratings.txt',
                      tagger,
                      max_length=MAX_LENGTH,
                      batch_size=BATCH_SIZE)

    Z_DIM = 40
    H_DIM = 300
    C_DIM = 2

    model = RNN_VAE(dataset.num_words,
                    H_DIM,
                    Z_DIM,
                    C_DIM,
                    freeze_embeddings=False,
                    gpu=USE_CUDA,
                    gpu_id=GPU_ID)

    test_data = torch.LongTensor(
        dataset.sentence2idxs(tagger.morphs(input_msg))).unsqueeze(1)

    model.load_state_dict(torch.load('models/vae_epoch_300_400.bin'))
    results = model.controlSentence(test_data, t=0.5)

    return (dataset.idxs2sentence(results[0], no_pad=True),
            dataset.idxs2sentence(results[1], no_pad=True))
z_dim = h_dim
c_dim = 2
kl_weight_max = 0.4

# Specific hyperparams
beta = 0.1
lambda_c = 0.1
lambda_z = 0.1
lambda_u = 0.1

dataset = SST_Dataset(mbsize=mbsize)

model = RNN_VAE(dataset.n_vocab,
                h_dim,
                z_dim,
                c_dim,
                p_word_dropout=0.3,
                pretrained_embeddings=dataset.get_vocab_vectors(),
                freeze_embeddings=True,
                gpu=args.gpu)

# Load pretrained base VAE with c ~ p(c)
model.load_state_dict(torch.load('models/vae.bin'))


def kl_weight(it):
    """
    Credit to: https://github.com/kefirski/pytorch_RVAE/
    0 -> 1
    """
    return (math.tanh((it - 3500) / 1000) + 1) / 2
Exemplo n.º 3
0
mb_size = 32
z_dim = 20
h_dim = 64
lr = 1e-3
lr_decay_every = 1000000
n_iter = 20000
log_interval = 1000
z_dim = h_dim
c_dim = 7

dataset = SST_Dataset()

model = RNN_VAE(dataset.n_vocab,
                h_dim,
                z_dim,
                c_dim,
                p_word_dropout=0.3,
                pretrained_embeddings=dataset.get_vocab_vectors(),
                freeze_embeddings=False,
                gpu=args.gpu)


def main():
    # Annealing for KL term
    kld_start_inc = 3000
    kld_weight = 0.01
    kld_max = 0.15
    kld_inc = (kld_max - kld_weight) / (n_iter - kld_start_inc)

    trainer = optim.Adam(model.vae_params, lr=lr)

    for it in range(n_iter):
Exemplo n.º 4
0
mb_size = 50
z_dim = 20
h_dim = args.dimension
lr = 1e-3
lr_decay_every = 1000000
n_iter = 10000
log_interval = 1000
z_dim = h_dim
c_dim = args.num_classes

torch.manual_seed(int(time.time()))

model = RNN_VAE(
    dataset.n_vocab, h_dim, z_dim, c_dim, p_word_dropout=0.3,
    pretrained_embeddings=dataset.get_vocab_vectors(),
    cnn_filters=args.filters, cnn_units=args.units,
    freeze_embeddings=args.freeze_emb, gpu=args.gpu
)

if args.gpu:
    model.load_state_dict(torch.load('models/{}.bin'
                                     .format(args.model
                                             + utils.getModelName(args))))
else:
    model.load_state_dict(torch.load('models/{}.bin'
                                     .format(args.model
                                             + utils.getModelName(args)),
                                     map_location=lambda storage, loc: storage)
                          )

# Samples latent and conditional codes randomly from prior
Exemplo n.º 5
0
TEXT.build_vocab(train, max_size=10000, vectors="fasttext.en.300d")
#TEXT.build_vocab(train, max_size=10000)
LABEL.build_vocab(train)

LABEL.vocab.stoi['1']=0
LABEL.vocab.stoi['2']=0
LABEL.vocab.stoi['3']=0
LABEL.vocab.stoi['4']=1
LABEL.vocab.stoi['5']=1




model = RNN_VAE(
    len(TEXT.vocab), h_dim, z_dim, c_dim, p_word_dropout=0.3,
    pretrained_embeddings=TEXT.vocab.vectors, freeze_embeddings=False,
    gpu=args.gpu
)



if args.gpu:
    model.load_state_dict(torch.load('models/{}.bin'.format(args.model)))
else:
    model.load_state_dict(torch.load('models/{}.bin'.format(args.model), map_location=lambda storage, loc: storage))

    
  
    
    
for i in range(3):
Exemplo n.º 6
0
h_dim = 64
lr = 1e-3
lr_decay_every = 1000000
n_iter = 20000
log_interval = 1000
z_dim = h_dim
c_dim = 1

dataset = SST_Dataset()

torch.manual_seed(int(time.time()))

model = RNN_VAE(dataset.n_vocab,
                h_dim,
                z_dim,
                c_dim,
                p_word_dropout=0.3,
                pretrained_embeddings=dataset.get_vocab_vectors(),
                freeze_embeddings=True,
                gpu=args.gpu)

if args.gpu:
    model.load_state_dict(torch.load('{}'.format(args.path)))
else:
    model.load_state_dict(
        torch.load('{}'.format(args.path),
                   map_location=lambda storage, loc: storage))

# Samples latent and conditional codes randomly from prior
z = model.sample_z_prior(1)
c = model.sample_c_prior(1)
Exemplo n.º 7
0
                  batch_size=BATCH_SIZE)

# # 3. Train Model

# ## 3.1 Define model
#
# 모델은 GRU를 사용한 Variational Recurrent Autoencoder입니다.

Z_DIM = 40
H_DIM = 300
C_DIM = 2

model = RNN_VAE(dataset.num_words,
                H_DIM,
                Z_DIM,
                C_DIM,
                freeze_embeddings=False,
                gpu=USE_CUDA,
                gpu_id=GPU_ID)


def save_base_vae():
    if not os.path.exists('models/'):
        os.makedirs('models/')

    torch.save(model.state_dict(), 'models/vae_epoch_300_complete.bin')


def save_base_vae_iter(iter):
    if not os.path.exists('models/'):
        os.makedirs('models/')
log_interval = 100
z_dim = h_dim
c_dim = args.num_classes
kl_weight_max = 0.4

# Specific hyperparams
beta = 0.1
lambda_c = 0.1
lambda_z = 0.1
lambda_u = 0.1

model = RNN_VAE(dataset.n_vocab,
                h_dim,
                z_dim,
                c_dim,
                p_word_dropout=0.3,
                pretrained_embeddings=dataset.get_vocab_vectors(),
                cnn_filters=args.filters,
                cnn_units=args.units,
                freeze_embeddings=args.freeze_emb,
                gpu=args.gpu)

# Load pretrained base VAE with c ~ p(c)
model.load_state_dict(
    torch.load('models/vae' + utils.getModelName(args) + '.bin'))


def kl_weight(it):
    """
    Credit to: https://github.com/kefirski/pytorch_RVAE/
    0 -> 1
    """