Пример #1
0
                        type=bool,
                        default=True,
                        metavar='CUDA',
                        help='use cuda (default: True)')
    parser.add_argument('--num-sample',
                        type=int,
                        default=100,
                        metavar='NS',
                        help='num samplings (default: 10)')

    args = parser.parse_args()

    batch_loader = BatchLoader('')
    parameters = Parameters(batch_loader.max_word_len,
                            batch_loader.max_seq_len,
                            batch_loader.words_vocab_size,
                            batch_loader.chars_vocab_size)

    rvae = RVAE(parameters)
    rvae.load_state_dict(t.load('trained_RVAE_code'))
    if args.use_cuda:
        rvae = rvae.cuda()

    with open("code_sampling_100.txt", 'w') as cs:
        for iteration in range(args.num_sample):
            seed = np.random.normal(size=[1, parameters.latent_variable_size])
            result = rvae.sample(batch_loader, 50, seed, args.use_cuda)
            # print(result)
            # print()
            cs.write(result + '\n')
Пример #2
0
#load data
data = 0
with open('train.txt', 'r') as f:
    data = f.readlines()

preprocess = Preprocess(embedding_model)
input = preprocess.to_sequence(data)
# embedding=preprocess.embedding()
# np.save('embedding',embedding)

batch_loader = Batch(input, 0.7)

params=Parameter(word_embed_size=300,encode_rnn_size=600,latent_variable_size=1400,\
            decode_rnn_size=600,vocab_size=preprocess.vocab_size,embedding_path='embedding.npy')
model = RVAE(params)
model = model.cuda()
optimizer = Adam(model.learnable_parameters(), 1e-3)
train_step = model.trainer(optimizer)

use_cuda = t.cuda.is_available()
ce_list = []
kld_list = []
coef_list = []
test_batch = batch_loader.test_next_batch(1)

for i, batch in enumerate(batch_loader.train_next_batch(1)):
    # if i%20==0:
    #     sample=next(test_batch)
    #     sentence=model.sample(10,sample,use_cuda)
    #     sentence=[preprocess.index_to_word(i) for i in sentence]
    #     print(' '.join(sentence))