Ejemplo n.º 1
0
def control(input_msg):
    tagger = Komoran()

    dataset = Dataset('nsmc/ratings.txt',
                      tagger,
                      max_length=MAX_LENGTH,
                      batch_size=BATCH_SIZE)

    Z_DIM = 40
    H_DIM = 300
    C_DIM = 2

    model = RNN_VAE(dataset.num_words,
                    H_DIM,
                    Z_DIM,
                    C_DIM,
                    freeze_embeddings=False,
                    gpu=USE_CUDA,
                    gpu_id=GPU_ID)

    test_data = torch.LongTensor(
        dataset.sentence2idxs(tagger.morphs(input_msg))).unsqueeze(1)

    model.load_state_dict(torch.load('models/vae_epoch_300_400.bin'))
    results = model.controlSentence(test_data, t=0.5)

    return (dataset.idxs2sentence(results[0], no_pad=True),
            dataset.idxs2sentence(results[1], no_pad=True))
lambda_z = 0.1
lambda_u = 0.1

dataset = SST_Dataset(mbsize=mbsize)

model = RNN_VAE(dataset.n_vocab,
                h_dim,
                z_dim,
                c_dim,
                p_word_dropout=0.3,
                pretrained_embeddings=dataset.get_vocab_vectors(),
                freeze_embeddings=True,
                gpu=args.gpu)

# Load pretrained base VAE with c ~ p(c)
model.load_state_dict(torch.load('models/vae.bin'))


def kl_weight(it):
    """
    Credit to: https://github.com/kefirski/pytorch_RVAE/
    0 -> 1
    """
    return (math.tanh((it - 3500) / 1000) + 1) / 2


def temp(it):
    """
    Softmax temperature annealing
    1 -> 0
    """
Ejemplo n.º 3
0
log_interval = 1000
z_dim = h_dim
c_dim = args.num_classes

torch.manual_seed(int(time.time()))

model = RNN_VAE(
    dataset.n_vocab, h_dim, z_dim, c_dim, p_word_dropout=0.3,
    pretrained_embeddings=dataset.get_vocab_vectors(),
    cnn_filters=args.filters, cnn_units=args.units,
    freeze_embeddings=args.freeze_emb, gpu=args.gpu
)

if args.gpu:
    model.load_state_dict(torch.load('models/{}.bin'
                                     .format(args.model
                                             + utils.getModelName(args))))
else:
    model.load_state_dict(torch.load('models/{}.bin'
                                     .format(args.model
                                             + utils.getModelName(args)),
                                     map_location=lambda storage, loc: storage)
                          )

# Samples latent and conditional codes randomly from prior
z = model.sample_z_prior(1)
c = model.sample_c_prior(1)

# Generate positive sample given z
c[0, 0], c[0, 1] = 1, 0
Ejemplo n.º 4
0
LABEL.vocab.stoi['4']=1
LABEL.vocab.stoi['5']=1




model = RNN_VAE(
    len(TEXT.vocab), h_dim, z_dim, c_dim, p_word_dropout=0.3,
    pretrained_embeddings=TEXT.vocab.vectors, freeze_embeddings=False,
    gpu=args.gpu
)



if args.gpu:
    model.load_state_dict(torch.load('models/{}.bin'.format(args.model)))
else:
    model.load_state_dict(torch.load('models/{}.bin'.format(args.model), map_location=lambda storage, loc: storage))

    
  
    
    
for i in range(3):
    print("---------------example-------------   ", i)
    # Samples latent and conditional codes randomly from prior
    z = model.sample_z_prior(1)
    c = model.sample_c_prior(1)

    # Generate negative sample given z
    c[0, 0], c[0, 1] = 1, 0
Ejemplo n.º 5
0
dataset = SST_Dataset()

torch.manual_seed(int(time.time()))

model = RNN_VAE(dataset.n_vocab,
                h_dim,
                z_dim,
                c_dim,
                p_word_dropout=0.3,
                pretrained_embeddings=dataset.get_vocab_vectors(),
                freeze_embeddings=True,
                gpu=args.gpu)

if args.gpu:
    model.load_state_dict(torch.load('{}'.format(args.path)))
else:
    model.load_state_dict(
        torch.load('{}'.format(args.path),
                   map_location=lambda storage, loc: storage))

# Samples latent and conditional codes randomly from prior
z = model.sample_z_prior(1)
c = model.sample_c_prior(1)

# Generate mean sample given z
c[0, 0] = 0

_, c_idx = torch.max(c, dim=1)
sample_idxs = model.sample_sentence(z, c, temp=0.1)
Ejemplo n.º 6
0
#
# 100개의 Test set에 대해서 컨트롤된 문장의 결과를 출력합니다.
#
# 또한 표준 입력을 받아서 컨트롤된 문장의 결과를 출력합니다.

model = RNN_VAE(dataset.num_words,
                H_DIM,
                Z_DIM,
                C_DIM,
                freeze_embeddings=False,
                gpu=USE_CUDA,
                gpu_id=GPU_ID)

test_set = dataset.getTestData(100)

model.load_state_dict(torch.load('models/vae_epoch_300_400.bin'))
for test in test_set:
    results = model.controlSentence(test[0].unsqueeze(1), t=0.5)

    print('Original : ', dataset.idxs2sentence(test[0], no_pad=True))
    print('Positive : ', dataset.idxs2sentence(results[0], no_pad=True))
    print('Negative : ', dataset.idxs2sentence(results[1], no_pad=True))
    print()

tagger = Komoran()

while True:
    sentence = tagger.morphs(input())

    if len(sentence) == 0:
        break
lambda_z = 0.1
lambda_u = 0.1

model = RNN_VAE(dataset.n_vocab,
                h_dim,
                z_dim,
                c_dim,
                p_word_dropout=0.3,
                pretrained_embeddings=dataset.get_vocab_vectors(),
                cnn_filters=args.filters,
                cnn_units=args.units,
                freeze_embeddings=args.freeze_emb,
                gpu=args.gpu)

# Load pretrained base VAE with c ~ p(c)
model.load_state_dict(
    torch.load('models/vae' + utils.getModelName(args) + '.bin'))


def kl_weight(it):
    """
    Credit to: https://github.com/kefirski/pytorch_RVAE/
    0 -> 1
    """
    return (math.tanh((it - 3500) / 1000) + 1) / 2


def temp(it):
    """
    Softmax temperature annealing
    1 -> 0
    """