示例#1
0
def mnist():
	mnist = input_data.read_data_sets("mnist/")
	real_images = mnist.train.images.reshape(-1, 28, 28, 1)
	blurred_images = np.array([blur(image, 2) for image in real_images])
	gan = DCGAN(batch_size=100, Lambda=1e1, contextual='L1')
	gan.train(real_images, blurred_images, report_iter=100, visualize_iter=100)
	sample_idx = np.random.randint(len(blurred_images), size=10)
	generated_images = gan.reconstruct_image(blurred_images[sample_idx])
	save_grayscale(real_images[sample_idx], blurred_images[sample_idx], generated_images, 'result/mnist')
示例#2
0
def testing():
    sess, summary_writer = setup_tensorflow()

    # Prepare directories
    Tfilenames = prepare_dirs(delete_train_dir=False)

    features, labels = image_processing.get_inputs(sess, Tfilenames)

    # Create and initialize model
    [gene_minput, gene_moutput,
     gene_output, gene_var_list,
     disc_real_output, disc_fake_output, disc_var_list] = \
        DCGAN.create_model(sess, features, labels)

    # Restore variables from checkpoint_dir
    saver = tf.train.Saver()
    filename = 'checkpoint_new.txt'
    filename = os.path.join('checkpoint', filename)
    saver.restore(sess, filename)

    print("Restore model")

    # region prediction test
    predict_restore = gene_moutput

    # Prepare directories
    #filenames = prepare_dirs(delete_train_dir=False)
    test_filenames = filenames = tf.gfile.ListDirectory('test_img')
    #assert len(test_filenames) == batch_size, "Number of test images should to be exactly the same as batch size!"
    test_filenames = sorted(test_filenames)
    random.shuffle(test_filenames)
    test_filenames = [os.path.join('test_img', f) for f in filenames]
    # here you can put your images, just keep the 16 size
    #test_filenames = ['1.jpg', '2.jpg', '3.jpg', '4.jpg', '5.jpg',
    # '6.jpg', '7.jpg', '8.jpg', '9.jpg', '10.jpg',
    #'11.jpg', '12.jpg', '13.jpg', '14.jpg', '15.jpg', '16.jpg']

    test_features, test_labels = image_processing.get_inputs(
        sess, test_filenames)
    test_img4_input, test_img4_original = sess.run(
        [test_features, test_labels])

    feed_dict = {gene_minput: test_img4_input}
    # test_img5 = test_img4_input.eval(session=sess)
    # feed_dict={gene_minput:test_img5}
    prob = sess.run(predict_restore, feed_dict)
    # endregion prediction test

    td = TrainData(locals())
    # max_samples=10, test image will output 10 results
    train.summarize_progress(td,
                             test_img4_input,
                             test_img4_original,
                             prob,
                             3,
                             'out',
                             max_samples=10,
                             test=True)

    print("Finish testing")
示例#3
0
def _train(training_images, epochs, batch_size, output_dir, keep_checkpoints):
    # Extract image size from image loading routine
    data = DCGAN.ISIC_data(training_images, randomize=False, seed=511)
    image_size = data.size

    # Seed tensorflow
    tf.set_random_seed(11223344)

    generator = DCGAN.G_conv(image_size=image_size)
    discriminator = DCGAN.D_conv(image_size=image_size)

    # run
    dcgan = DCGAN.DCGAN()
    dcgan.create(generator, discriminator, data, learning_rate=0.1e-4)
    dcgan.train(output_dir,
                training_epochs=epochs,
                checkpoints=epochs / 10,
                batch_size=batch_size,
                keep_checkpoints=keep_checkpoints)
示例#4
0
def training():
    # Prepare directories

    all_filenames = prepare_dirs(delete_train_dir=True)

    # Setup global tensorflow state
    sess, summary_writer = setup_tensorflow()

    # Separate training and test sets
    train_filenames = all_filenames[:-16]
    test_filenames = all_filenames[-16:]

    # Setup async input queues
    train_features, train_labels = image_processing.get_inputs(
        sess, train_filenames)
    test_features, test_labels = image_processing.get_inputs(
        sess, test_filenames)

    # Add some noise during training (think denoising autoencoders)
    noise_level = .03
    noisy_train_features = train_features + \
                           tf.random_normal(train_features.get_shape(), stddev=noise_level)

    # Create and initialize model
    [gene_minput, gene_moutput,
     gene_output, gene_var_list,
     disc_real_output, disc_fake_output, disc_var_list] = \
        DCGAN.create_model(sess, noisy_train_features, train_labels)

    gene_loss = DCGAN.generator_loss(disc_fake_output, gene_output,
                                     train_features)
    disc_real_loss, disc_fake_loss = \
        DCGAN.discriminator_loss(disc_real_output, disc_fake_output)
    disc_loss = tf.add(disc_real_loss, disc_fake_loss, name='disc_loss')

    (global_step, learning_rate, gene_minimize, disc_minimize) = \
        DCGAN.optimizers(gene_loss, gene_var_list,
                                     disc_loss, disc_var_list)

    # Train model
    train_data = TrainData(locals())
    train.train_model(train_data)
示例#5
0
def get_config():
    return TrainConfig(
        model=Model(),
        dataflow=DCGAN.get_data(G.data),
        callbacks=[
            ModelSaver(),
            StatMonitorParamSetter('learning_rate', 'measure',
                                   lambda x: x * 0.5, 0, 10)
        ],
        steps_per_epoch=500,
        max_epoch=400,
    )
示例#6
0
def _generate(model, nimages, output_dir, overview):
    nimages = int(nimages)
    # Seed tensorflow
    tf.set_random_seed(11223344)

    dcgan = DCGAN.DCGAN()
    logging.info('Loading model from %s ...' % model)
    dcgan.load(model)

    logging.info('Generating %d images writing them to %s ...', nimages,
                 output_dir)
    os.makedirs(output_dir, exist_ok=True)
    dcgan.generate_images(nimages, output_dir, overview_figure=overview)
def main(_):
    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = True

    with tf.Session(config=run_config) as sess:
        dcgan = DCGAN(sess)
        dcgan.build_model()
        dcgan.train()
示例#8
0
文件: WGAN.py 项目: qiuchenqqb/tp091
        vars = tf.trainable_variables()
        ops = []
        for v in vars:
            n = v.op.name
            if not n.startswith('discrim/'):
                continue
            logger.info("Clip {}".format(n))
            ops.append(tf.assign(v, tf.clip_by_value(v, -0.01, 0.01)))
        self._op = tf.group(*ops, name='clip')

    def _trigger_step(self):
        self._op.run()


if __name__ == '__main__':
    args = DCGAN.get_args(default_batch=64)

    M = Model(shape=args.final_size, batch=args.batch, z_dim=args.z_dim)
    if args.sample:
        DCGAN.sample(M, args.load)
    else:
        logger.auto_set_dir()

        # The original code uses a different schedule, but this seems to work well.
        # Train 1 D after 2 G
        SeparateGANTrainer(
            input=QueueInput(DCGAN.get_data()),
            model=M, d_period=3).train_with_defaults(
            callbacks=[ModelSaver(), ClipCallback()],
            steps_per_epoch=500,
            max_epoch=200,
示例#9
0
def CelebA():
	real_images = read_CelebA(sample_size=1000)

	blurred_images = np.array([blur(image, 4) for image in real_images])
	blur_gan = DCGAN(image_height=218, image_width=178, image_color=3, batch_size=10, g_kernel_size=8, d_kernel_size=8, flatten_dim=14 * 12 * 32, hidden_dim=256, Lambda=1e1, contextual='L1')
	blur_gan.train(real_images, blurred_images, report_iter=100, visualize_iter=100)

	masked_images = np.array([mask(image) for image in real_images])
	mask_gan = DCGAN(image_height=218, image_width=178, image_color=3, batch_size=10, flatten_dim=14 * 12 * 32, Lambda=1e2, contextual='L1')
	mask_gan.train(real_images, masked_images, iteration=10000, report_iter=1000, visualize_iter=10000)
	sample_idx = np.random.randint(len(masked_images), size=10)
	generated_images = mask_gan.reconstruct_image(masked_images[sample_idx])
	save_images(real_images[sample_idx], masked_images[sample_idx].astype(np.uint8), generated_images.astype(np.uint8), 'result/CelebA', 'masked')

	down_sampled_images = np.array([down_sample(image, 2) for image in real_images])
	down_sample_gan = DCGAN(image_height=218, image_width=178, image_color=3, batch_size=10, stddev=1e-2, g_kernel_size=8, flatten_dim=14 * 12 * 32, hidden_dim=256, Lambda=1e-1, contextual='L1')
	down_sample_gan.train(real_images, down_sampled_images, alpha=1e-2, report_iter=100, visualize_iter=1000)
示例#10
0
    #     gradient_penalty = tf.reduce_mean(tf.square(gradients - 1), name='gradient_penalty')
    #     add_moving_summary(self.d_loss, self.g_loss, gradient_penalty, gradients_rms)

    #     self.d_loss = tf.add(self.d_loss, 10 * gradient_penalty)

    #     self.collect_variables()

    def _get_optimizer(self):
        lr = symbolic_functions.get_scalar_var('learning_rate',
                                               1e-4,
                                               summary=True)
        opt = tf.train.AdamOptimizer(lr, beta1=0.5, beta2=0.9)
        return opt


if __name__ == '__main__':
    assert get_tf_version_tuple() >= (1, 4)
    args = DCGAN.get_args(default_batch=32, default_z_dim=512)
    M = Model(shape=args.final_size, batch=args.batch, z_dim=args.z_dim)
    if args.sample:
        DCGAN.sample(M, args.load)
    else:
        logger.auto_set_dir()
        SeparateGANTrainer(QueueInput(DCGAN.get_data()), M,
                           g_period=5).train_with_defaults(
                               callbacks=[ModelSaver()],
                               steps_per_epoch=300,
                               max_epoch=200,
                               session_init=SmartInit(args.load))

z
示例#11
0
        gradients = tf.sqrt(tf.reduce_sum(tf.square(gradients), [1, 2, 3]))
        gradients_rms = symbolic_functions.rms(gradients, 'gradient_rms')
        gradient_penalty = tf.reduce_mean(tf.square(gradients - 1),
                                          name='gradient_penalty')
        add_moving_summary(self.d_loss, self.g_loss, gradient_penalty,
                           gradients_rms)

        self.d_loss = tf.add(self.d_loss, 10 * gradient_penalty)

        self.collect_variables()

    def optimizer(self):
        opt = tf.train.AdamOptimizer(1e-4, beta1=0.5, beta2=0.9)
        return opt


if __name__ == '__main__':
    assert get_tf_version_number() >= 1.4
    args = DCGAN.get_args(default_batch=64, default_z_dim=128)
    M = Model(shape=args.final_size, batch=args.batch, z_dim=args.z_dim)
    if args.sample:
        DCGAN.sample(M, args.load)
    else:
        logger.auto_set_dir()
        SeparateGANTrainer(QueueInput(
            DCGAN.get_data()), M, g_period=6).train_with_defaults(
                callbacks=[ModelSaver()],
                steps_per_epoch=300,
                max_epoch=200,
                session_init=SaverRestore(args.load) if args.load else None)
示例#12
0
        gradients = tf.sqrt(tf.reduce_sum(tf.square(gradients), [1, 2, 3]))
        gradients_rms = symbolic_functions.rms(gradients, 'gradient_rms')
        gradient_penalty = tf.reduce_mean(tf.square(gradients - 1), name='gradient_penalty')
        add_moving_summary(self.d_loss, self.g_loss, gradient_penalty, gradients_rms)

        self.d_loss = tf.add(self.d_loss, 10 * gradient_penalty)

        self.collect_variables()

    def optimizer(self):
        opt = tf.train.AdamOptimizer(1e-4, beta1=0.5, beta2=0.9)
        return opt


if __name__ == '__main__':
    assert get_tf_version_tuple() >= (1, 4)
    args = DCGAN.get_args(default_batch=64, default_z_dim=128)
    M = Model(shape=args.final_size, batch=args.batch, z_dim=args.z_dim)
    if args.sample:
        DCGAN.sample(M, args.load)
    else:
        logger.auto_set_dir()
        SeparateGANTrainer(
            QueueInput(DCGAN.get_data()),
            M, g_period=6).train_with_defaults(
            callbacks=[ModelSaver()],
            steps_per_epoch=300,
            max_epoch=200,
            session_init=SaverRestore(args.load) if args.load else None
        )
示例#13
0
import display

if __name__ == '__main__':

    # 训练设备
    ctx = mx.gpu()

    # 训练参数
    batch_size = 128
    learning_rate = 0.0002

    # 导入数据集
    data = dataOp.load_data_from_mnist(batch_size)

    # 加载网络架构
    generator = net.g_net(ctx)
    discriminator = net.d_net(ctx)

    # 配置训练参数
    trainer_g = gluon.Trainer(generator.collect_params(), 'Adam',
                              {'learning_rate': learning_rate})
    trainer_d = gluon.Trainer(discriminator.collect_params(), 'Adam',
                              {'learning_rate': learning_rate})

    print('Begin to train')
    for epoch in range(151):
        loss_Dis = 0.
        loss_Gen = 0.

        for real, _ in data:
示例#14
0
        add_moving_summary(L_pos, L_neg, eq, measure, self.d_loss)
        tf.summary.scalar('kt', kt)

        self.collect_variables()

    def optimizer(self):
        lr = tf.get_variable('learning_rate',
                             initializer=1e-4,
                             trainable=False)
        opt = tf.train.AdamOptimizer(lr, beta1=0.5, beta2=0.9)
        return opt


if __name__ == '__main__':
    args = DCGAN.get_args(default_batch=32, default_z_dim=64)
    if args.sample:
        DCGAN.sample(Model(), args.load, 'gen/conv4.3/output')
    else:
        logger.auto_set_dir()

        input = QueueInput(DCGAN.get_data())
        model = Model()
        nr_tower = max(get_nr_gpu(), 1)
        if nr_tower == 1:
            trainer = GANTrainer(input, model)
        else:
            trainer = MultiGPUGANTrainer(nr_tower, input, model)

        trainer.train_with_defaults(
            callbacks=[
示例#15
0
        '--log_dir',
        help='directory to save checkout point',
        type=str,
        default=
        '/media/kaicao/Data/checkpoint/FingerprintSynthesis/tensorpack/Improved-WGAN_AutoEncoder/'
    )
    args = parser.parse_args()
    opt.use_argument(args)
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    return args


if __name__ == '__main__':
    args = get_args()
    if args.sample:
        DCGAN.sample2(Model(), args.load, args.sample_dir, num=10000000)
    else:
        assert args.data
        #logger.auto_set_dir()
        logger.set_logger_dir(args.log_dir)
        #a = SaverRestore(args.load)
        config = TrainConfig(
            model=Model(),
            dataflow=DCGAN.get_data(args.data),
            callbacks=[ModelSaver()],
            steps_per_epoch=1000,
            max_epoch=500,
            session_init=SaverRestore(args.load) if args.load else None)
        SeparateGANTrainer(config, g_period=6).train()
示例#16
0
from mxnet import autograd
import matplotlib.pyplot as plt
import time
import sys
import os
import mxnet as mx
import numpy as np
#sys.path.append('..') Add the upper directory
sys.path.append('./dependencies')
import utils
ctx = utils.try_gpu()

# Upload trained Generator parameters
import DCGAN as dcgan
filename1 = './params/dcgan.netG.save'
netG = dcgan.Generator()
netG.load_params(filename1, ctx=ctx)

# If not updating the seed by system time, you'll get the same results
import time
from mxnet import random
seed = int(time.time() * 100)
random.seed(seed)


# Image Preprocessing
def transform(data):
    data = mx.image.imresize(data, 64, 64)  # state size: (64, 64, 3)
    data = nd.transpose(data, (2, 0, 1))
    data = data.astype(np.float32) / 127.5 - 1  # normalize to [-1, 1]
    if data.shape[0] == 1:
        vars = tf.trainable_variables()
        ops = []
        for v in vars:
            n = v.op.name
            if not n.startswith('discrim/'):
                continue
            logger.info("Clip {}".format(n))
            ops.append(tf.assign(v, tf.clip_by_value(v, -0.01, 0.01)))
        self._op = tf.group(*ops, name='clip')

    def _trigger_step(self):
        self._op.run()


if __name__ == '__main__':
    args = DCGAN.get_args()

    if args.sample:
        DCGAN.sample(Model(), args.load)
    else:
        assert args.data
        logger.auto_set_dir()
        config = TrainConfig(
            model=Model(),
            dataflow=DCGAN.get_data(args.data),
            callbacks=[ModelSaver(), ClipCallback()],
            steps_per_epoch=500,
            max_epoch=200,
            session_init=SaverRestore(args.load) if args.load else None)
        # The original code uses a different schedule, but this seems to work well.
        # Train 1 D after 2 G
示例#18
0
import torch
import DCGAN
import numpy as np
import matplotlib.pyplot as plt

NUM_IMG = 9
Z_DEMENSION = 110
N_IDEAS = 100

D = DCGAN.Discriminator()
G = DCGAN.Generator(Z_DEMENSION, 3136)
D.load_state_dict(torch.load(r'./DC-GAN-Networks/discriminator_cpu_.pkl'))
G.load_state_dict(torch.load(r'./DC-GAN-Networks/generator_cpu_.pkl'))

lis = []
for i in range(10):
    z = torch.randn((NUM_IMG, N_IDEAS))
    x = np.zeros((NUM_IMG, Z_DEMENSION - N_IDEAS))
    x[:, i] = 1
    z = np.concatenate((z.numpy(), x), 1)
    z = torch.from_numpy(z).float()
    fake_img = G(z)
    lis.append(fake_img.detach().numpy())
    output = D(fake_img)
    DCGAN.show(fake_img)
    plt.savefig('./GAN_IMAGE/Test %d.png' % i, bbox_inches='tight')

DCGAN.show_all(lis)
plt.savefig('./GAN_IMAGE/Test_All.png', bbox_inches='tight')
plt.show()
示例#19
0
        gradients = tf.sqrt(tf.reduce_sum(tf.square(gradients), [1, 2, 3]))
        gradients_rms = tf.sqrt(tf.reduce_mean(tf.square(gradients)), name='gradient_rms')
        gradient_penalty = tf.reduce_mean(tf.square(gradients - 1), name='gradient_penalty')
        add_moving_summary(self.d_loss, self.g_loss, gradient_penalty, gradients_rms)

        self.d_loss = tf.add(self.d_loss, 10 * gradient_penalty)

        self.collect_variables()

    def optimizer(self):
        opt = tf.train.AdamOptimizer(1e-4, beta1=0.5, beta2=0.9)
        return opt


if __name__ == '__main__':
    assert get_tf_version_tuple() >= (1, 4)
    args = DCGAN.get_args(default_batch=64, default_z_dim=128)
    M = Model(shape=args.final_size, batch=args.batch, z_dim=args.z_dim)
    if args.sample:
        DCGAN.sample(M, args.load)
    else:
        logger.auto_set_dir()
        SeparateGANTrainer(
            QueueInput(DCGAN.get_data()),
            M, g_period=5).train_with_defaults(
            callbacks=[ModelSaver()],
            steps_per_epoch=300,
            max_epoch=200,
            session_init=SmartInit(args.load)
        )
示例#20
0
        add_moving_summary(L_pos, L_neg, eq, measure, self.d_loss)
        tf.summary.scalar('kt-summary', kt)

        self.collect_variables()

    def _get_optimizer(self):
        lr = symbolic_functions.get_scalar_var('learning_rate',
                                               1e-4,
                                               summary=True)
        opt = tf.train.AdamOptimizer(lr, beta1=0.5, beta2=0.9)
        return opt


if __name__ == '__main__':
    args = DCGAN.get_args()
    if args.sample:
        DCGAN.sample(Model(), args.load, 'gen/conv4.3/output')
    else:
        assert args.data
        logger.auto_set_dir()

        config = TrainConfig(
            model=Model(),
            dataflow=DCGAN.get_data(args.data),
            callbacks=[
                ModelSaver(),
                StatMonitorParamSetter('learning_rate', 'measure',
                                       lambda x: x * 0.5, 0, 10)
            ],
            steps_per_epoch=500,
示例#21
0
                self.d_loss = tf.subtract(L_pos, kt * L_neg, name='loss_D')
                self.g_loss = L_neg

        add_moving_summary(L_pos, L_neg, eq, measure, self.d_loss)
        tf.summary.scalar('kt', kt)

        self.collect_variables()

    def _get_optimizer(self):
        lr = tf.get_variable('learning_rate', initializer=1e-4, trainable=False)
        opt = tf.train.AdamOptimizer(lr, beta1=0.5, beta2=0.9)
        return opt


if __name__ == '__main__':
    args = DCGAN.get_args(default_batch=32, default_z_dim=64)
    if args.sample:
        DCGAN.sample(Model(), args.load, 'gen/conv4.3/output')
    else:
        logger.auto_set_dir()

        input = QueueInput(DCGAN.get_data())
        model = Model()
        nr_tower = max(get_nr_gpu(), 1)
        if nr_tower == 1:
            trainer = GANTrainer(input, model)
        else:
            trainer = MultiGPUGANTrainer(nr_tower, input, model)

        trainer.train_with_defaults(
            callbacks=[
示例#22
0
                                               summary=True)
        opt = tf.train.AdamOptimizer(lr, beta1=0.5, beta2=0.9)
        return opt


def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
    parser.add_argument('--load',
                        help='load model',
                        default='model/I-WGAN_CAE/model-620000.index')
    parser.add_argument('--sample_dir',
                        help='directory for generated examples',
                        type=str,
                        default='/output/')
    parser.add_argument('--num_images',
                        help='number of fingerprint images ',
                        type=int,
                        default=250)
    global args
    args = parser.parse_args()
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    return args


if __name__ == '__main__':
    args = get_args()
    M = Model(shape=256, batch=32, z_dim=512)
    DCGAN.sample2(M, args.load, args.sample_dir, num=args.num_images)
示例#23
0
        vars = tf.trainable_variables()
        ops = []
        for v in vars:
            n = v.op.name
            if not n.startswith('discrim/'):
                continue
            logger.info("Clip {}".format(n))
            ops.append(tf.assign(v, tf.clip_by_value(v, -0.01, 0.01)))
        self._op = tf.group(*ops, name='clip')

    def _trigger_step(self):
        self._op.run()


if __name__ == '__main__':
    args = DCGAN.get_args(default_batch=64)

    M = Model(shape=args.final_size, batch=args.batch, z_dim=args.z_dim)
    if args.sample:
        DCGAN.sample(M, args.load)
    else:
        logger.auto_set_dir()

        # The original code uses a different schedule, but this seems to work well.
        # Train 1 D after 2 G
        SeparateGANTrainer(input=QueueInput(DCGAN.get_data()),
                           model=M,
                           d_period=3).train_with_defaults(
                               callbacks=[ModelSaver(),
                                          ClipCallback()],
                               steps_per_epoch=500,
示例#24
0
        vars = tf.trainable_variables()
        ops = []
        for v in vars:
            n = v.op.name
            if not n.startswith('discrim/'):
                continue
            logger.info("Clip {}".format(n))
            ops.append(tf.assign(v, tf.clip_by_value(v, -0.01, 0.01)))
        self._op = tf.group(*ops, name='clip')

    def _trigger_step(self):
        self._op.run()


if __name__ == '__main__':
    args = DCGAN.get_args()

    if args.sample:
        DCGAN.sample(Model(), args.load)
    else:
        assert args.data
        logger.auto_set_dir()

        # The original code uses a different schedule, but this seems to work well.
        # Train 1 D after 2 G
        SeparateGANTrainer(
            input=QueueInput(DCGAN.get_data(args.data)),
            model=Model(),
            d_period=3).train_with_defaults(
            callbacks=[ModelSaver(), ClipCallback()],
            steps_per_epoch=500,
示例#25
0
def get_config():
    return TrainConfig(
        model=Model(),
        dataflow=DCGAN.get_data(G.data),
        callbacks=[
            ModelSaver(),
            StatMonitorParamSetter('learning_rate', 'measure',
                                   lambda x: x * 0.5, 0, 10)
        ],
        steps_per_epoch=500,
        max_epoch=400,
    )


if __name__ == '__main__':
    args = DCGAN.get_args()
    if args.sample:
        DCGAN.sample(args.load, 'gen/conv4.3/output')
    else:
        assert args.data
        logger.auto_set_dir()
        config = get_config()
        if args.load:
            config.session_init = SaverRestore(args.load)
        nr_gpu = get_nr_gpu()
        config.nr_tower = max(get_nr_gpu(), 1)
        if config.nr_tower == 1:
            GANTrainer(config).train()
        else:
            MultiGPUGANTrainer(config).train()
示例#26
0
        gradients = tf.sqrt(tf.reduce_sum(tf.square(gradients), [1, 2, 3]))
        gradients_rms = symbolic_functions.rms(gradients, 'gradient_rms')
        gradient_penalty = tf.reduce_mean(tf.square(gradients - 1),
                                          name='gradient_penalty')
        add_moving_summary(self.d_loss, self.g_loss, gradient_penalty,
                           gradients_rms)

        self.d_loss = tf.add(self.d_loss, 10 * gradient_penalty)

        self.collect_variables()

    def _get_optimizer(self):
        opt = tf.train.AdamOptimizer(1e-4, beta1=0.5, beta2=0.9)
        return opt


if __name__ == '__main__':
    args = DCGAN.get_args()
    if args.sample:
        DCGAN.sample(Model(), args.load)
    else:
        assert args.data
        logger.auto_set_dir()
        SeparateGANTrainer(
            QueueInput(DCGAN.get_data(args.data)), Model(),
            g_period=6).train_with_defaults(
                callbacks=[ModelSaver()],
                steps_per_epoch=300,
                max_epoch=200,
                session_init=SaverRestore(args.load) if args.load else None)
示例#27
0

DCGAN.Model = Model


def get_config():
    return TrainConfig(
        model=Model(),
        dataflow=DCGAN.get_data(G.data),
        callbacks=[
            ModelSaver(),
            StatMonitorParamSetter('learning_rate', 'measure',
                                   lambda x: x * 0.5, 0, 10)
        ],
        steps_per_epoch=500,
        max_epoch=400,
    )


if __name__ == '__main__':
    args = DCGAN.get_args()
    if args.sample:
        DCGAN.sample(args.load)
    else:
        assert args.data
        logger.auto_set_dir()
        config = get_config()
        if args.load:
            config.session_init = SaverRestore(args.load)
        GANTrainer(config).train()
示例#28
0
import DCGAN
import numpy as np
from torch.autograd import Variable

# Hyper Parameters
EPOCH = 100  # 训练的epoch数
Z_DIMENSION = 110  # 生成器的idea数,最后十位为label
G_EPOCH = 1  # 判别器的epoch数
NUM_IMG = 100  # 图像的batch size
LR = 0.0003  # 学习率
OPTIMIZER = torch.optim.Adam  # 优化器
CRITERION = nn.BCELoss()  # 损失函数
NUM_OF_WORKERS = 10  # 线程数
N_IDEAS = 100  # 随机数,Z_DEMENSION比它多了tag的数量

D = DCGAN.Discriminator()
G = DCGAN.Generator(Z_DIMENSION, 1 * 56 * 56)  #
Training_Set, Testing_Set, Training_Loader, Testing_Loader = DCGAN.load_image(
    NUM_IMG, NUM_OF_WORKERS)
D = D.cuda()
G = G.cuda()
d_optimizer = OPTIMIZER(D.parameters(), lr=LR)
g_optimizer = OPTIMIZER(G.parameters(), lr=LR)

if __name__ == '__main__':
    for count, i in enumerate(range(EPOCH)):
        for (img, label) in Training_Loader:
            labels_one_hot = np.zeros((NUM_IMG, 10))
            labels_one_hot[np.arange(NUM_IMG), label.numpy()] = 1
            img = Variable(img).cuda()
            real_label = Variable(
示例#29
0
        add_moving_summary(L_pos, L_neg, eq, measure, self.d_loss)
        tf.summary.scalar('kt', kt)

        self.collect_variables()

    def _get_optimizer(self):
        lr = tf.get_variable('learning_rate',
                             initializer=1e-4,
                             trainable=False)
        opt = tf.train.AdamOptimizer(lr, beta1=0.5, beta2=0.9)
        return opt


if __name__ == '__main__':
    args = DCGAN.get_args()
    if args.sample:
        DCGAN.sample(Model(), args.load, 'gen/conv4.3/output')
    else:
        assert args.data
        logger.auto_set_dir()

        input = QueueInput(DCGAN.get_data(args.data))
        model = Model()
        nr_tower = max(get_nr_gpu(), 1)
        if nr_tower == 1:
            trainer = GANTrainer(input, model)
        else:
            trainer = MultiGPUGANTrainer(nr_tower, input, model)

        trainer.train_with_defaults(
示例#30
0
        gradients = tf.gradients(vec_interp, [interp])[0]
        gradients = tf.sqrt(tf.reduce_sum(tf.square(gradients), [1, 2, 3]))
        gradients_rms = symbolic_functions.rms(gradients, 'gradient_rms')
        gradient_penalty = tf.reduce_mean(tf.square(gradients - 1), name='gradient_penalty')
        add_moving_summary(self.d_loss, self.g_loss, gradient_penalty, gradients_rms)

        self.d_loss = tf.add(self.d_loss, 10 * gradient_penalty)

        self.collect_variables()

    def _get_optimizer(self):
        lr = symbolic_functions.get_scalar_var('learning_rate', 1e-4, summary=True)
        opt = tf.train.AdamOptimizer(lr, beta1=0.5, beta2=0.9)
        return opt


DCGAN.Model = Model


if __name__ == '__main__':
    args = DCGAN.get_args()
    if args.sample:
        DCGAN.sample(args.load)
    else:
        assert args.data
        logger.auto_set_dir()
        config = DCGAN.get_config()
        if args.load:
            config.session_init = SaverRestore(args.load)
        SeparateGANTrainer(config, g_period=6).train()
from mxnet import nd
from mxnet import random
from matplotlib import pyplot as plt
import numpy as np

#if not updating the seed by system time, you'll get the same results
import time
seed = int(time.time() * 100)
random.seed(seed)

import sys
sys.path.append('./dependencies')
import utils
ctx = utils.try_gpu()

filename = './params/dcgan.netG.save'
netG = dcgan.Generator()
netG.collect_params()
netG.load_params(filename, ctx=ctx)

z = nd.random_normal(0, 1, shape=(4, 100, 1, 1), ctx=ctx)
#print(z)
output = netG(z)

for i in range(4):
    plt.subplot(1, 4, i + 1)
    plt.imshow(((output[i].asnumpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(
        np.uint8))
    plt.axis('off')
plt.show()
示例#32
0
    n_chan = 1
elif dset == 'CIFAR10':
    transformations = [transforms.ToTensor()]
    transformations.append(
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
    dataset = dset.CIFAR10(root=root,
                           download=True,
                           transform=transforms.Compose(transformations))
    n_chan = 3

# DCGAN object initialization
n_z = 128
ngpu = 1
loss = 'BCE'

Gen_model = dc.DCGAN(arch=dset, n_z=n_z, ngpu=ngpu, loss=loss)

# DCGAN training scheme
batch_size = 100
n_iters = int(5e04)
opt_dets = {
    'gen': {
        'name': 'adam',
        'learn_rate': 1e-04,
        'betas': (0.5, 0.99)
    },
    'dis': {
        'name': 'adam',
        'learn_rate': 1e-04,
        'momentum': (0.5, 0.99)
    }
示例#33
0
flags = tf.app.flags
flags.DEFINE_bool("is_training", False, "training flag")
FLAGS = flags.FLAGS


def check_dir():
    if not os.path.exists('./sample'):
        os.mkdir('./sample')
    if not os.path.exists('./checkpoint'):
        os.mkdir('./checkpoint')
    if not os.path.exists('./logs'):
        os.mkdir('./logs')


if __name__ == '__main__':
    check_dir()
    with tf.Session() as sess:
        GAN = DCGAN(input_height=64,
                    input_width=64,
                    input_channels=3,
                    output_height=64,
                    output_width=64,
                    gf_dim=64,
                    input_fname_pattern='*.jpg',
                    is_grayscale=False,
                    sess=sess)
        GAN.build_model()
        if FLAGS.is_training:
            GAN.train()
示例#34
0
        vars = tf.trainable_variables()
        ops = []
        for v in vars:
            n = v.op.name
            if not n.startswith('discrim/'):
                continue
            logger.info("Clip {}".format(n))
            ops.append(tf.assign(v, tf.clip_by_value(v, -0.01, 0.01)))
        self._op = tf.group(*ops, name='clip')

    def _trigger_step(self):
        self._op.run()


if __name__ == '__main__':
    args = DCGAN.get_args(default_batch=64)

    M = Model(shape=args.final_size, batch=args.batch, z_dim=args.z_dim)
    if args.sample:
        DCGAN.sample(M, args.load)
    else:
        logger.auto_set_dir()

        # The original code uses a different schedule, but this seems to work well.
        # Train 1 D after 2 G
        SeparateGANTrainer(
            input=QueueInput(DCGAN.get_data()),
            model=M, d_period=3).train_with_defaults(
            callbacks=[ModelSaver(), ClipCallback()],
            steps_per_epoch=500,
            max_epoch=200,
示例#35
0
                self.d_loss = tf.subtract(L_pos, kt * L_neg, name='loss_D')
                self.g_loss = L_neg

        add_moving_summary(L_pos, L_neg, eq, measure, self.d_loss)
        tf.summary.scalar('kt', kt)

        self.collect_variables()

    def _get_optimizer(self):
        lr = tf.get_variable('learning_rate', initializer=1e-4, trainable=False)
        opt = tf.train.AdamOptimizer(lr, beta1=0.5, beta2=0.9)
        return opt


if __name__ == '__main__':
    args = DCGAN.get_args()
    if args.sample:
        DCGAN.sample(Model(), args.load, 'gen/conv4.3/output')
    else:
        assert args.data
        logger.auto_set_dir()

        input = QueueInput(DCGAN.get_data(args.data))
        model = Model()
        nr_tower = max(get_nr_gpu(), 1)
        if nr_tower == 1:
            trainer = GANTrainer(input, model)
        else:
            trainer = MultiGPUGANTrainer(nr_tower, input, model)

        trainer.train_with_defaults(