Esempio n. 1
0
 def epoch(self, epoch_index):
     for p, avg_p in zip(self.trainer.generator.parameters(),
                         self.my_g_clone):
         avg_p.mul_(self.old_weight).add_((1.0 - self.old_weight) * p.data)
     if epoch_index % self.output_snapshot_ticks == 0:
         z = next(self.sample_fn(self.samples_count))
         gen_input = cudize(z)
         original_param = self.flatten_params(self.trainer.generator)
         self.load_params(self.my_g_clone, self.trainer.generator)
         dest = os.path.join(
             self.checkpoints_dir,
             SaverPlugin.last_pattern.format(
                 'smooth_generator',
                 '{:06}'.format(self.trainer.cur_nimg // 1000)))
         torch.save(
             {
                 'cur_nimg': self.trainer.cur_nimg,
                 'model': self.trainer.generator.state_dict()
             }, dest)
         out = generate_samples(self.trainer.generator, gen_input)
         self.load_params(original_param, self.trainer.generator)
         frequency = self.max_freq * out.shape[2] / self.seq_len
         images = self.get_images(frequency, epoch_index, out)
         for i, image in enumerate(images):
             imwrite(
                 os.path.join(self.checkpoints_dir,
                              '{}_{}.png'.format(epoch_index, i)), image)
Esempio n. 2
0
def main():
    model = nn.Sequential(nn.Linear(D_in, H), nn.ReLU(), nn.Linear(H, D_out),
                          nn.Softmax(dim=1))
    loss_fn = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=learning_rate,
                                 weight_decay=0.01)
    lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, decay)

    transform = AdditiveGaussianNoise(std=0.2, shape=(D_in, ), seed=0)
    train_data = PolynomialDataset(points=D_in, transform=transform)
    train_loader = DataLoader(
        train_data,
        batch_size=3,
        shuffle=True,  #
        # num_workers=2,
    )

    # test_data = PolynomialDataset(points=D_in, repeat=85, transform=transform)
    samples = generate_samples(seed=1234)
    test_data = list(islice(samples, 256))
    test_inputs = torch.stack([x for x, l in test_data])
    test_labels = torch.tensor([l for x, l in test_data])

    validation_loader = train_loader

    train_step_ = partial(train_step, model, loss_fn, optimizer, lr_scheduler,
                          train_loader)

    fmt_str = "{:5d} {:.1f} {:7.3f} {:7.3f} {:.1f}"
    col_str = "{:>5} {:>4} {:>7} {:>7} {:>4}"
    print(col_str.format("epoch", "acc", "loss", "valloss", "lr"))

    for epoch in range(1000):
        loss = train_step_()

        if epoch % 100 != 0:
            continue

        test_accuracy = top1_accuracy(model, test_inputs, test_labels)
        validation_loss = validation_step(model, loss_fn, validation_loader)

        print("{:5d} {:.1f} {:7.3f} {:7.3f} {:.1f}".format(
            epoch,
            100 * test_accuracy,
            loss,
            validation_loss,
            math.log(lr_scheduler.get_last_lr()[0]),
        ))
Esempio n. 3
0
def main(length=40, num_epochs=20):
    '''
    Build and train LSTM network to solve XOR problem
    '''
    X_train, y_train, X_test, y_test = generate_samples(length=length)
    model = build_model()
    history = model.fit(X_train,
                        y_train,
                        epochs=num_epochs,
                        batch_size=32,
                        validation_split=0.10,
                        shuffle=False)

    # Evaluate model on test set
    preds = model.predict(X_test)
    preds = np.round(preds[:, 0]).astype('float32')
    acc = (np.sum(preds == y_test) / len(y_test)) * 100
    print('Accuracy: {:.2f}%'.format(acc))

    # Plotting loss and accuracy
    model_plot(history)
    return
Esempio n. 4
0
        classifier = Classifier()
        sys.exit("Need to train a classifier!")
        # TODO: train classifier

    # directories for generated samples
    dir_results = model_prefix + 'results'
    dir_samples = model_prefix + 'samples'
    create_directories(dir_results, dir_samples)

    ########## TEST MODE ##########
    if args.epochs is None:
        # load generator
        G = torch.load(model_prefix + 'generator.pt').to(device)
        print('Generator loaded!')
        # generate samples
        generate_samples(G, dir_samples, args.batchsize, num_samples=4096)
        sampleloader = get_sample_loader(dir_samples, args.batchsize,
                                         image_size)
        print('Samples generated!')
        # compute fid score with test set
        fid_score = get_fid_score(classifier, sampleloader, testloader)
        sys.exit("FID score from test set: " + str(fid_score))

    ########## TRAIN MODE ##########
    try:
        if not args.create:
            G = torch.load(model_prefix + 'generator.pt').to(device)
            D = torch.load(model_prefix + 'discriminator.pt').to(device)
            Model = torch.load(model_prefix + 'model.pt').to(device)
            print('Model loaded!')
        else:
Esempio n. 5
0
    model = G(3, 3, 8)
    device = 0
##################################################################
    # define model's specific weights
    cpm2d = './weights/cpm/stb_hpm2d.pth'
    cpm3d = './weights/cpm/stb_hpm3d.pth'
    weights = "./weights/pix2pix/stb_net_G.pth"
    # init paramters
    evaluate = Evaluator(model, weights,cpm2d, cpm3d, device)
    opt = edict()
    opt.dataroot = "./datasets/stb-dataset/test"
    opt.isTrain = False
    dataset = STBdataset(opt)
    # generate samples
    output_path = os.path.join(TEMP_PATH, "STB")
    generate_samples(model , evaluate, dataset, output_path)
    # run evaluation
    samples_dir = [os.path.join(output_path, i) for i in os.listdir(output_path)]
    p = Pool()
    r = list(tqdm(p.imap(run_js, samples_dir),
                  total=len(samples_dir)))
    p.close()
    p.join()

    #os.system(f"node test.js {output_path}")
    run_evaluate(output_path)
##################################################################3
    # opt.dataroot = "./datasets/rhd-dataset/test"
    # cpm2d = "./weights/cpm/rhd_hpm2d.pth"
    # weights = "./weights/pix2pix/rhd_net_G.pth"
Esempio n. 6
0
from revers import Rjmcmc
from blrMoveFactory import BlrMoveFactory
from stationaryDistributionFactory import StationaryDistributionFactory
from mcmcFactory import McmcFactory
from plotter import Plotter

np.random.seed(0)

line_points_x = [0, 5, 10, 15]
line_points_y = [5, 0, 0, 5]

var = 3
n = 100

xs, ys = utils.generate_samples(line_points_x, line_points_y, n, var)

stats = StationaryDistributionFactory(xs, ys)
moves = BlrMoveFactory()
mcmcs = McmcFactory(stats)

first_sample = [1, 0, 5, 5, 0, 14, 5]

if not stats.get_stationary(1).pdf(first_sample) > 0:
    raise Exception("First sample has zero probality")

rjmcmc = Rjmcmc(moves, mcmcs, stats)

samples = rjmcmc.sample(20000, (1, first_sample))

plotter = Plotter(xs, ys, line_points_x, line_points_y, samples)
Esempio n. 7
0
 def epoch(self, epoch_index):
     gen_input = Variable(self.sample_fn(self.samples_count)).cuda()
     out = generate_samples(self.trainer.G, gen_input)
     for proc in self.output_postprocessors:
         proc(out, self.trainer.cur_nimg // 1000)
import time

from utils import generate_samples, get_numbers_list
from bubble_sort import bubble_sort
from merge_sort import merge_sort
from radix_sort import radix_sort, radix_group_numbers
from selection_sort import selection_sort

#first we need to generate a hugh amount of data

generate_samples(100000)

lst = get_numbers_list('data.txt')

start_time = time.time()
radix_group_numbers(lst)
end_time = time.time()

print 'radix sort took', end_time - start_time, 'seconds'


start_time = time.time()
merge_sort(lst)
end_time = time.time()

print 'merge sort took', end_time - start_time, 'seconds'


start_time = time.time()
bubble_sort(lst)
end_time = time.time()
## Cat
image1_ = (1 - io.imread("data/shapes/cat_1.jpg")[:, :, 0] / 255)
image1 = resize(image1_, (200, 200), mode="reflect",
                anti_aliasing=False).astype("bool") * 1
## Rabbit
image2_ = (1 -
           io.imread("data/shapes/rabbit.png")[:, :, 1] / 255).astype("bool")
image2 = resize(image2_, (image1.shape[0], image1.shape[1]),
                mode="reflect",
                anti_aliasing=False).astype("bool") * 2

image = np.zeros((600, 600))
image[30:30 + n_x, 10:10 + n_y] = image1
image[320:320 + n_x, 350:350 + n_y] = image2[:, ::-1]

data = generate_samples(image, n_y, n_samples=60000, random_state=43)
X = data[0]
Y = data[1]

### Fit GMMs
from sklearn.model_selection import train_test_split

X_train, X_test = train_test_split(X, random_state=54)
Y_train, Y_test = train_test_split(Y, random_state=54)

n_components = 20

gmm_x = GaussianMixture(n_components=n_components, random_state=3).fit(X_train)
gmm_y = GaussianMixture(n_components=n_components,
                        random_state=34).fit(Y_train)
Esempio n. 10
0
from utils import generate_samples

torch.manual_seed(0)

D_in = 10
H = 100  # TODO question: why do H=3 and H=4 produce bad results?
D_out = 3
decay = 0.999
learning_rate = 1e-1

loss_fn = nn.CrossEntropyLoss()
model = nn.Sequential(nn.Linear(D_in, H), nn.ReLU(), nn.Linear(H, D_out),
                      nn.Softmax(dim=1))
# model.apply(lambda m: hasattr(m, "weight") and nn.init.normal_(m.weight))

test_data = list(islice(generate_samples(seed=1234), 256))
test_inputs = torch.stack([x for x, l in test_data])
test_labels = torch.tensor([l for x, l in test_data])


def top1_accuracy() -> float:
    with torch.no_grad():
        preds = model(test_inputs)
    pred_labels = preds.detach().argmax(dim=-1)
    return (pred_labels == test_labels).sum().numpy() / len(test_labels)


optimizer = torch.optim.Adam(model.parameters(),
                             lr=learning_rate,
                             weight_decay=0.01)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, decay)
Esempio n. 11
0
def main(unused_argv):
    config_train = training_config()
    config_gen = generator_config()
    config_dis = discriminator_config()

    np.random.seed(config_train.seed)

    assert config_train.start_token == 0
    gen_data_loader = Gen_Data_loader(config_gen.gen_batch_size)
    likelihood_data_loader = Gen_Data_loader(config_gen.gen_batch_size)
    dis_data_loader = Dis_dataloader(config_dis.dis_batch_size)

    generator = Generator(config=config_gen)
    generator.build()

    rollout_gen = rollout(config=config_gen)

    target_params = pickle.load(open('save/target_params.pkl', 'rb'),
                                encoding='iso-8859-1')
    target_lstm = TARGET_LSTM(config=config_gen,
                              params=target_params)  # The oracle model 预测模型

    discriminator = Discriminator(config=config_dis)
    discriminator.build_discriminator()

    pretrained_optimizer = tf.train.AdamOptimizer(
        config_train.gen_learning_rate)
    gradients, variables = zip(*pretrained_optimizer.compute_gradients(
        generator.pretrained_loss, var_list=var_pretrained))
    gradients, _ = tf.clip_by_global_norm(gradients, config_train.grad_clip)
    gen_pre_update = pretrained_optimizer.apply_gradients(
        zip(gradients, variables))

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    generate_samples(sess, target_lstm, config_train.batch_size,
                     config_train.generated_num, config_train.positive_file)
    gen_data_loader.create_batches(config_train.positive_file)

    log = open('save/experiment-log.txt', 'w')
    print('Start pre-training generator....')

    log.write('pre-training...\n')

    for epoch in range(config_train.pretrained_epoch_num):
        gen_data_loader.reset_pointer()

        for it in range(gen_data_loader.num_batch):
            batch = gen_data_loader.next_batch()
            _, g_loss = sess.run(
                [gen_pre_update, generator.pretrained_loss],
                feed_dict={
                    generator.input_seqs_pre: batch,
                    generator.input_seqs_mask: np.ones_like(batch)
                })

        if epoch % config_train.test_per_epoch == 0:
            generate_samples(sess, generator, config_train.batch_size,
                             config_train.generated_num,
                             config_train.eval_file)
            likelihood_data_loader.create_batches(config_train.eval_file)
            test_loss = target_loss(sess, target_lstm, likelihood_data_loader)
            print('pre-train ', epoch, ' test_loss ', test_loss)
            buffer = 'epoch:\t' + str(epoch) + '\tnll:\t' + str(
                test_loss) + '\n'
            log.write(buffer)

    print('Start pre-training discriminator...')
    for t in range(config_train.dis_update_time_pre):
        print("Times: " + str(t))
        generate_samples(sess, generator, config_train.batch_size,
                         config_train.generated_num,
                         config_train.negative_file)
        dis_data_loader.load_train_data(config_train.positive_file,
                                        config_train.negative_file)
        for _ in range(config_train.dis_update_time_pre):
            dis_data_loader.reset_pointer()
            for it in range(dis_data_loader.num_batch):
                x_batch, y_batch = dis_data_loader.next_batch()
                feed_dict = {
                    discriminator.input_x:
                    x_batch,
                    discriminator.input_y:
                    y_batch,
                    discriminator.dropout_keep_prob:
                    config_dis.dis_dropout_keep_prob  # dropout
                }
                _ = sess.run(discriminator.train_op, feed_dict)

    train_adv_opt = tf.train.AdamOptimizer(config_train.gen_learning_rate)
    gradients, variables = zip(*train_adv_opt.compute_gradients(
        generator.gen_loss_adv, var_list=var_pretrained))
    gradients, _ = tf.clip_by_global_norm(gradients, config_train.grad_clip)
    train_adv_update = train_adv_opt.apply_gradients(zip(gradients, variables))

    # Initialize global variables of optimizer for adversarial training
    uninitialized_var = [
        e for e in tf.global_variables() if e not in tf.trainable_variables()
    ]
    init_vars_uninit_op = tf.variables_initializer(uninitialized_var)
    sess.run(init_vars_uninit_op)

    for total_batch in range(config_train.total_batch):
        for iter_gen in range(config_train.gen_update_time):
            samples = sess.run(generator.sample_word_list_reshpae)
            feed = {'pred_seq_rollout:0': samples}
            reward_rollout = []
            for iter_roll in range(config_train.rollout_num):
                rollout_list = sess.run(rollout_gen.sample_rollout_step,
                                        feed_dict=feed)
                rollout_list_stack = np.vstack(rollout_list)
                reward_rollout_seq = sess.run(
                    discriminator.ypred_for_auc,
                    feed_dict={
                        discriminator.input_x: rollout_list_stack,
                        discriminator.dropout_keep_prob: 1.0
                    })
                reward_last_tok = sess.run(discriminator.ypred_for_auc,
                                           feed_dict={
                                               discriminator.input_x: samples,
                                               discriminator.dropout_keep_prob:
                                               1.0
                                           })
                reward_allseq = np.concatenate(
                    (reward_rollout_seq, reward_last_tok), axis=0)[:, 1]
                reward_tmp = []
                for r in range(config_gen.gen_batch_size):
                    reward_tmp.append(reward_allseq[range(
                        r,
                        config_gen.gen_batch_size * config_gen.sequence_length,
                        config_gen.gen_batch_size)])

                reward_rollout.append(np.array(reward_tmp))
                rewards = np.sum(reward_rollout,
                                 axis=0) / config_train.rollout_num
                _, gen_loss = sess.run(
                    [train_adv_update, generator.gen_loss_adv],
                    feed_dict={
                        generator.input_seqs_adv: samples,
                        generator.rewards: rewards
                    })

        if total_batch % config_train.test_per_epoch == 0 or total_batch == config_train.total_batch - 1:
            generate_samples(sess, generator, config_train.batch_size,
                             config_train.generated_num,
                             config_train.eval_file)
            likelihood_data_loader.create_batches(config_train.eval_file)
            test_loss = target_loss(sess, target_lstm, likelihood_data_loader)
            buffer = 'epoch:\t' + str(total_batch) + '\tnll:\t' + str(
                test_loss) + '\n'
            print('total_batch: ', total_batch, 'test_loss: ', test_loss)
            log.write(buffer)

        for _ in range(config_train.dis_update_time_adv):
            generate_samples(sess, generator, config_train.batch_size,
                             config_train.generated_num,
                             config_train.negative_file)
            dis_data_loader.load_train_data(config_train.positive_file,
                                            config_train.negative_file)

            for _ in range(config_train.dis_update_time_adv):
                dis_data_loader.reset_pointer()
                for it in range(dis_data_loader.num_batch):
                    x_batch, y_batch = dis_data_loader.next_batch()
                    feed = {
                        discriminator.input_x:
                        x_batch,
                        discriminator.input_y:
                        y_batch,
                        discriminator.dropout_keep_prob:
                        config_dis.dis_dropout_keep_prob
                    }
                    _ = sess.run(discriminator.train_op, feed)

    log.close()
Esempio n. 12
0
def main():
    random.seed(SEED)
    np.random.seed(SEED)
    assert START_TOKEN == 0
    vocab_w2idx, vocab_idx2w, len_vocab_w2idx = utils.load_vocab(vacab_file)

    gen_data_loader = Gen_Data_loader(BATCH_SIZE, seq_len=SEQ_LENGTH)
    dis_data_loader = Dis_dataloader(BATCH_SIZE, seq_len=SEQ_LENGTH)

    generator = Generator(vocab_size, BATCH_SIZE, EMB_DIM, HIDDEN_DIM, SEQ_LENGTH, START_TOKEN)

    discriminator = Discriminator(sequence_length=SEQ_LENGTH, num_classes=2, vocab_size=vocab_size, embedding_size=dis_embedding_dim,
                                filter_sizes=dis_filter_sizes, num_filters=dis_num_filters, l2_reg_lambda=dis_l2_reg_lambda)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    sess.run(tf.global_variables_initializer())

    gen_data_loader.create_batches(positive_file)
    print("gen_data_loader num_batches: ", gen_data_loader.num_batch)


    #  pre-train generator
    print 'Start pre-training...'
    for epoch in xrange(PRE_EPOCH_NUM):
        loss = pre_train_epoch(sess, generator, gen_data_loader)
        if epoch % 1 == 0:
            utils.test_demo(sess, generator, 8, 8, eval_file, vocab_idx2w, epoch)
            print 'pre-train epoch ', epoch, 'train_loss ', loss

    print 'Start pre-training discriminator...'
    # Train 3 epoch on the generated data and do this for 50 times
    for _ in range(10):
        utils.generate_samples(sess, generator,  64, generated_num, negative_file)
        dis_data_loader.load_train_data(positive_file, negative_file)
        for _ in range(3):
            dis_data_loader.reset_pointer()
            for it in xrange(dis_data_loader.num_batch):
                x_batch, y_batch = dis_data_loader.next_batch()
                feed = {
                    discriminator.input_x: x_batch,
                    discriminator.input_y: y_batch,
                    discriminator.dropout_keep_prob: dis_dropout_keep_prob
                }
                _ = sess.run(discriminator.train_op, feed)

    rollout = ROLLOUT(generator, 0.8)

    print '#########################################################################'
    print 'Start Adversarial Training...'
    for total_batch in range(TOTAL_BATCH):
        # Train the generator for one step
        for it in range(1):
            samples = generator.generate(sess)
            rewards = rollout.get_reward(sess, samples, 16, discriminator)
            feed = {generator.x: samples, generator.rewards: rewards}
            _ = sess.run(generator.g_updates, feed_dict=feed)

        # Test
        if total_batch % 5 == 0 or total_batch == TOTAL_BATCH - 1:
            print 'adversarial-train epoch ', total_batch
            utils.test_demo(sess, generator, 8, 8, eval_file, vocab_idx2w, total_batch)
        # Update roll-out parameters
        rollout.update_params()

        # Train the discriminator
        for _ in range(5):
            dis_data_loader.load_train_data(positive_file, negative_file)

            for _ in range(3):
                dis_data_loader.reset_pointer()
                for it in xrange(dis_data_loader.num_batch):
                    x_batch, y_batch = dis_data_loader.next_batch()
                    feed = {
                        discriminator.input_x: x_batch,
                        discriminator.input_y: y_batch,
                        discriminator.dropout_keep_prob: dis_dropout_keep_prob
                    }
                    _ = sess.run(discriminator.train_op, feed)
Esempio n. 13
0
def main(unused_argv):
    gen_data_loader = dataloader.Gen_data_loader(64)
    dis_data_loader = dataloader.Dis_data_loader(64)
    likelihood_data_loader = dataloader.Gen_data_loader(64)

    gen_model = generator.Generator()
    gen_model.build()
    dis_model = discriminator.Discriminator()
    dis_model.build()
    rollout_model = rollout.rollout()

    target_params = pickle.load(open("save/target_params.pkl", "rb"),
                                encoding="iso-8859-1")
    target_lstm_model = target_lstm.TARGET_LSTM(params=target_params)

    # Build optimizer op for pretraining
    pretrained_optimizer = tf.train.AdamOptimizer(1e-2)
    var_pretrained = [
        v for v in tf.trainable_variables() if "teller" in v.name
    ]
    gradients1, variables1 = zip(*pretrained_optimizer.compute_gradients(
        gen_model.pretrained_loss, var_list=var_pretrained))
    gradients1, _ = tf.clip_by_global_norm(gradients1, 5.0)
    gen_pre_update = pretrained_optimizer.apply_gradients(
        zip(gradients1, variables1))

    # Build optimizer op for adversarial training
    advtrained_optimizer = tf.train.AdamOptimizer(1e-2)
    gradients2, variables2 = zip(*advtrained_optimizer.compute_gradients(
        gen_model.adversarial_loss, var_list=var_pretrained))
    gradients2, _ = tf.clip_by_global_norm(gradients2, 5.0)
    gen_adv_update = advtrained_optimizer.apply_gradients(
        zip(gradients2, variables2))

    sess = tf.Session(config=config)
    sess.run(tf.global_variables_initializer())

    log = open("save/log.txt", "w", encoding="utf-8")

    print("Start pre-training generator.....")
    utils.generate_samples(sess, target_lstm_model, 64, 1000,
                           "save/real_data.txt")
    gen_data_loader.create_batches("save/real_data.txt")
    log.write("Start pre-training generator.....\n")
    for epoch in range(20):
        gen_data_loader.reset_pointer()
        for it in range(gen_data_loader.num_batch):
            batch = gen_data_loader.next_batch()
            _, g_loss = sess.run(
                [gen_pre_update, gen_model.pretrained_loss],
                feed_dict={
                    gen_model.input_seqs_pre: batch,
                    gen_model.input_seqs_mask: np.ones_like(batch)
                })

        if epoch % 5 == 0:
            utils.generate_samples(sess, gen_model, 64, 1000,
                                   "save/eval_data.txt")
            likelihood_data_loader.create_batches("save/eval_data.txt")
            test_loss = utils.target_loss(sess, target_lstm_model,
                                          likelihood_data_loader)
            print("epoch:", epoch, " test_loss:", test_loss)
            buffer = 'epoch:\t' + str(epoch) + '\tnll:\t' + str(
                test_loss) + '\n'
            log.write(buffer)

    print("Start pre-training discriminator.....")
    for t in range(2):
        print("Times: " + str(t))
        utils.generate_samples(sess, gen_model, 64, 1000,
                               "save/generate_sample.txt")
        dis_data_loader.create_batches("save/real_data.txt",
                                       "save/generate_sample.txt")
        for epoch in range(20):
            dis_data_loader.reset_pointer()
            for it in range(dis_data_loader.num_batch):
                x_batch, y_batch = dis_data_loader.next_batch()
                sess.run(dis_model.train_op,
                         feed_dict={
                             dis_model.input_data: x_batch,
                             dis_model.input_label: y_batch,
                             dis_model.keep_prob: 0.5
                         })

    print("Start adversarial training.....")
    for total_batch in range(20):
        for gen_step in range(1):
            samples = sess.run(gen_model.sample_word_list_reshape)
            reward_rollout = []
            for rollout_num in range(16):
                rollout_list = sess.run(
                    rollout_model.sample_rollout_step,
                    feed_dict={rollout_model.pre_seq: samples})
                rollout_list_stack = np.vstack(rollout_list)
                reward_rollout_seq = sess.run(dis_model.ypred_for_auc,
                                              feed_dict={
                                                  dis_model.input_data:
                                                  rollout_list_stack,
                                                  dis_model.keep_prob: 1.0
                                              })
                reward_all_seq = reward_rollout_seq[:, 1]
                reward_tmp = []
                for r in range(64):
                    reward_tmp.append(reward_all_seq[range(r, 64 * 20, 64)])
                reward_rollout.append(np.array(reward_tmp))

            rewards = np.mean(reward_rollout, axis=0)
            _, gen_loss = sess.run(
                [gen_adv_update, gen_model.adversarial_loss],
                feed_dict={
                    gen_model.input_seqs_adv: samples,
                    gen_model.rewards: rewards
                })

        if total_batch % 5 == 0 or total_batch == 199:
            utils.generate_samples(sess, gen_model, 64, 1000,
                                   "save/eval_data.txt")
            likelihood_data_loader.create_batches("save/eval_data.txt")
            test_loss = utils.target_loss(sess, target_lstm_model,
                                          likelihood_data_loader)
            print('total_batch:', total_batch, ' test_loss:', test_loss)
            buffer = 'total_batch:\t' + str(total_batch) + '\tnll:\t' + str(
                test_loss) + '\n'
            log.write(buffer)

        for dis_step in range(5):
            utils.generate_samples(sess, gen_model, 64, 1000,
                                   "save/generate_sample.txt")
            dis_data_loader.create_batches("save/real_data.txt",
                                           "save/generate_sample.txt")
            for epoch in range(3):
                dis_data_loader.reset_pointer()
                for it in range(dis_data_loader.num_batch):
                    x_batch, y_batch = dis_data_loader.next_batch()
                    sess.run(dis_model.train_op,
                             feed_dict={
                                 dis_model.input_data: x_batch,
                                 dis_model.input_label: y_batch,
                                 dis_model.keep_prob: 0.5
                             })

    log.close()
    sess.close()
Esempio n. 14
0
def main(unused_argv):
    config_train = training_config()
    config_gen = generator_config()
    config_dis = discriminator_config()

    np.random.seed(config_train.seed)

    assert config_train.start_token == 0
    gen_data_loader = Gen_Data_loader(config_gen.gen_batch_size)
    likelihood_data_loader = Gen_Data_loader(config_gen.gen_batch_size)
    dis_data_loader = Dis_dataloader(config_dis.dis_batch_size)

    generator = Generator(config=config_gen)
    generator.build()

    rollout_gen = rollout(config=config_gen)

    #Build target LSTM
    target_params = pickle.load(open('save/target_params.pkl','rb'),encoding='iso-8859-1')
    target_lstm = TARGET_LSTM(config=config_gen, params=target_params) # The oracle model


    # Build discriminator
    discriminator = Discriminator(config=config_dis)
    discriminator.build_discriminator()


    # Build optimizer op for pretraining
    pretrained_optimizer = tf.train.AdamOptimizer(config_train.gen_learning_rate)
    var_pretrained = [v for v in tf.trainable_variables() if 'teller' in v.name]
    gradients, variables = zip(
        *pretrained_optimizer.compute_gradients(generator.pretrained_loss, var_list=var_pretrained))
    gradients, _ = tf.clip_by_global_norm(gradients, config_train.grad_clip)
    gen_pre_update = pretrained_optimizer.apply_gradients(zip(gradients, variables))

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    generate_samples(sess,target_lstm,config_train.batch_size,config_train.generated_num,config_train.positive_file)
    gen_data_loader.create_batches(config_train.positive_file)

    log = open('save/experiment-log.txt','w')
    print('Start pre-training generator....')

    log.write('pre-training...\n')

    for epoch in range(config_train.pretrained_epoch_num):
        gen_data_loader.reset_pointer()
        for it in range(gen_data_loader.num_batch):
            batch = gen_data_loader.next_batch()
            _,g_loss = sess.run([gen_pre_update,generator.pretrained_loss],feed_dict={generator.input_seqs_pre:batch,
                                                                                      generator.input_seqs_mask:np.ones_like(batch)})

        if epoch % config_train.test_per_epoch == 0:
            #进行测试,通过Generator产生一批序列,
            generate_samples(sess,generator,config_train.batch_size,config_train.generated_num,config_train.eval_file)
            # 创建这批序列的data-loader
            likelihood_data_loader.create_batches(config_train.eval_file)
            # 使用oracle 计算 交叉熵损失nll
            test_loss = target_loss(sess,target_lstm,likelihood_data_loader)
            # 打印并写入日志
            print('pre-train ',epoch, ' test_loss ',test_loss)
            buffer = 'epoch:\t' + str(epoch) + '\tnll:\t' + str(test_loss) + '\n'
            log.write(buffer)


    print('Start pre-training discriminator...')
    for t in range(config_train.dis_update_time_pre):
        print("Times: " + str(t))
        generate_samples(sess,generator,config_train.batch_size,config_train.generated_num,config_train.negative_file)
        dis_data_loader.load_train_data(config_train.positive_file,config_train.negative_file)
        for _ in range(config_train.dis_update_time_pre):
            dis_data_loader.reset_pointer()
            for it in range(dis_data_loader.num_batch):
                x_batch,y_batch = dis_data_loader.next_batch()
                feed_dict = {
                    discriminator.input_x : x_batch,
                    discriminator.input_y : y_batch,
                    discriminator.dropout_keep_prob : config_dis.dis_dropout_keep_prob
                }
                _ = sess.run(discriminator.train_op,feed_dict)



    # Build optimizer op for adversarial training
    train_adv_opt = tf.train.AdamOptimizer(config_train.gen_learning_rate)
    gradients, variables = zip(*train_adv_opt.compute_gradients(generator.gen_loss_adv, var_list=var_pretrained))
    gradients, _ = tf.clip_by_global_norm(gradients, config_train.grad_clip)
    train_adv_update = train_adv_opt.apply_gradients(zip(gradients, variables))

    # Initialize global variables of optimizer for adversarial training
    uninitialized_var = [e for e in tf.global_variables() if e not in tf.trainable_variables()]
    init_vars_uninit_op = tf.variables_initializer(uninitialized_var)
    sess.run(init_vars_uninit_op)

    # Start adversarial training
    for total_batch in range(config_train.total_batch):
        for iter_gen in range(config_train.gen_update_time):
            samples = sess.run(generator.sample_word_list_reshpae)

            feed = {'pred_seq_rollout:0':samples}
            reward_rollout = []
            for iter_roll in range(config_train.rollout_num):
                rollout_list = sess.run(rollout_gen.sample_rollout_step,feed_dict=feed)
                # np.vstack 它是垂直(按照行顺序)的把数组给堆叠起来。
                rollout_list_stack = np.vstack(rollout_list)
                reward_rollout_seq = sess.run(discriminator.ypred_for_auc,feed_dict={
                    discriminator.input_x:rollout_list_stack,discriminator.dropout_keep_prob:1.0
                })
                reward_last_tok = sess.run(discriminator.ypred_for_auc,feed_dict={
                    discriminator.input_x:samples,discriminator.dropout_keep_prob:1.0
                })
                reward_allseq = np.concatenate((reward_rollout_seq,reward_last_tok),axis=0)[:,1]
                reward_tmp = []
                for r in range(config_gen.gen_batch_size):
                    reward_tmp.append(reward_allseq[range(r,config_gen.gen_batch_size * config_gen.sequence_length,config_gen.gen_batch_size)])

                reward_rollout.append(np.array(reward_tmp))
                rewards = np.sum(reward_rollout,axis = 0) / config_train.rollout_num
                _,gen_loss = sess.run([train_adv_update,generator.gen_loss_adv],feed_dict={generator.input_seqs_adv:samples,
                                                                                           generator.rewards:rewards})


        if total_batch % config_train.test_per_epoch == 0 or total_batch == config_train.total_batch - 1:
            generate_samples(sess, generator, config_train.batch_size, config_train.generated_num, config_train.eval_file)
            likelihood_data_loader.create_batches(config_train.eval_file)
            test_loss = target_loss(sess, target_lstm, likelihood_data_loader)
            buffer = 'epoch:\t' + str(total_batch) + '\tnll:\t' + str(test_loss) + '\n'
            print ('total_batch: ', total_batch, 'test_loss: ', test_loss)
            log.write(buffer)


        for _ in range(config_train.dis_update_time_adv):
            generate_samples(sess,generator,config_train.batch_size,config_train.generated_num,config_train.negative_file)
            dis_data_loader.load_train_data(config_train.positive_file,config_train.negative_file)

            for _ in range(config_train.dis_update_time_adv):
                dis_data_loader.reset_pointer()
                for it in range(dis_data_loader.num_batch):
                    x_batch,y_batch = dis_data_loader.next_batch()
                    feed = {
                        discriminator.input_x:x_batch,
                        discriminator.input_y:y_batch,
                        discriminator.dropout_keep_prob:config_dis.dis_dropout_keep_prob
                    }
                    _ = sess.run(discriminator.train_op,feed)

    log.close()
Esempio n. 15
0
 def epoch(self, epoch_index):
     gen_input = Variable(self.sample_fn(self.samples_count)).cuda()
     out = generate_samples(self.trainer.G, gen_input)
     for proc in self.output_postprocessors:
         proc(out, self.trainer.cur_nimg // 1000)
Esempio n. 16
0
def train(model):
    # Checks what kind of model it is training
    if model.model_type == "infoGAN":
        is_infogan = True
    else:
        is_infogan = False

    # Makes sure we have a dir to save the model and training info
    if not os.path.exists(model.save_dir):
        os.makedirs(model.save_dir)

    # Creates artificial labels that just indicates to the loss object if prediction of D should be 0 or 1
    if model.gpu_mode:
        y_real_ = Variable(torch.ones(model.batch_size,
                                      1).cuda(model.gpu_id))  # all ones
        y_fake_ = Variable(
            torch.zeros(model.batch_size, 1).cuda(model.gpu_id))  # all zeros
    else:
        y_real_ = Variable(torch.ones(model.batch_size, 1))
        y_fake_ = Variable(torch.zeros(model.batch_size, 1))

    model.D.train()  # sets discriminator in train mode

    # TRAINING LOOP
    start_time = time.time()
    print('[*] TRAINING STARTS')
    for epoch in range(model.epoch):
        model.G.train()  # sets generator in train mode
        epoch_start_time = time.time()

        # For each minibatch returned by the data_loader
        for step, (x_, _) in enumerate(model.data_loader):
            if step == model.data_loader.dataset.__len__() // model.batch_size:
                break

            # Creates a minibatch of latent vectors
            z_ = torch.rand((model.batch_size, model.z_dim))

            # Creates a minibatch of discrete and continuous codes
            c_disc_ = torch.from_numpy(
                np.random.multinomial(
                    1,
                    model.c_disc_dim * [float(1.0 / model.c_disc_dim)],
                    size=[model.batch_size])).type(torch.FloatTensor)
            for i in range(model.n_disc_code - 1):
                c_disc_ = torch.cat([
                    c_disc_,
                    torch.from_numpy(
                        np.random.multinomial(
                            1,
                            model.c_disc_dim * [float(1.0 / model.c_disc_dim)],
                            size=[model.batch_size])).type(torch.FloatTensor)
                ],
                                    dim=1)
            c_cont_ = torch.from_numpy(
                np.random.uniform(-1,
                                  1,
                                  size=(model.batch_size,
                                        model.c_cont_dim))).type(
                                            torch.FloatTensor)

            # Convert to Variables (sends to GPU if needed)
            if model.gpu_mode:
                x_ = Variable(x_.cuda(model.gpu_id))
                z_ = Variable(z_.cuda(model.gpu_id))
                c_disc_ = Variable(c_disc_.cuda(model.gpu_id))
                c_cont_ = Variable(c_cont_.cuda(model.gpu_id))
            else:
                x_ = Variable(x_)
                z_ = Variable(z_)
                c_disc_ = Variable(c_disc_)
                c_cont_ = Variable(c_cont_)

            # update D network
            model.D_optimizer.zero_grad()

            D_real, _, _ = model.D(x_, model.dataset)
            D_real_loss = model.BCE_loss(D_real, y_real_)

            G_ = model.G(z_, c_cont_, c_disc_, model.dataset)
            D_fake, _, _ = model.D(G_, model.dataset)
            D_fake_loss = model.BCE_loss(D_fake, y_fake_)

            D_loss = D_real_loss + D_fake_loss
            model.train_history['D_loss'].append(D_loss.data[0])

            D_loss.backward(retain_graph=is_infogan)
            model.D_optimizer.step()

            # update G network
            model.G_optimizer.zero_grad()

            G_ = model.G(z_, c_cont_, c_disc_, model.dataset)
            D_fake, D_cont, D_disc = model.D(G_, model.dataset)

            G_loss = model.BCE_loss(D_fake, y_real_)
            model.train_history['G_loss'].append(G_loss.data[0])

            G_loss.backward(retain_graph=is_infogan)
            model.G_optimizer.step()

            # information loss
            if is_infogan:
                disc_loss = 0
                for i, ce_loss in enumerate(model.CE_losses):
                    i0 = i * model.c_disc_dim
                    i1 = (i + 1) * model.c_disc_dim
                    disc_loss += ce_loss(D_disc[:, i0:i1],
                                         torch.max(c_disc_[:, i0:i1], 1)[1])
                cont_loss = model.MSE_loss(D_cont, c_cont_)
                info_loss = disc_loss + cont_loss
                model.train_history['info_loss'].append(info_loss.data[0])

                info_loss.backward()
                model.info_optimizer.step()

            # Prints training info every 100 steps
            if ((step + 1) % 100) == 0:
                if is_infogan:
                    print(
                        "Epoch: [{:2d}] [{:4d}/{:4d}] D_loss: {:.8f}, G_loss: {:.8f}, info_loss: {:.8f}"
                        .format((epoch + 1), (step + 1),
                                model.data_loader.dataset.__len__() //
                                model.batch_size, D_loss.data[0],
                                G_loss.data[0], info_loss.data[0]))
                else:
                    print(
                        "Epoch: [{:2d}] [{:4d}/{:4d}] D_loss: {:.8f}, G_loss: {:.8f}"
                        .format((epoch + 1), (step + 1),
                                model.data_loader.dataset.__len__() //
                                model.batch_size, D_loss.data[0],
                                G_loss.data[0]))

        model.train_history['per_epoch_time'].append(time.time() -
                                                     epoch_start_time)

        # Saves samples
        utils.generate_samples(
            model, os.path.join(model.save_dir, "epoch{}.png".format(epoch)))

    model.train_history['total_time'].append(time.time() - start_time)
    print("Avg one epoch time: %.2f, total %d epochs time: %.2f" %
          (np.mean(model.train_history['per_epoch_time']), model.epoch,
           model.train_history['total_time'][0]))
    print("[*] TRAINING FINISHED")

    # Saves the model
    model.save()

    # Saves the plot of losses for G and D
    utils.save_loss_plot(model.train_history,
                         filename=os.path.join(model.save_dir, "curves.png"),
                         infogan=is_infogan)