Exemplo n.º 1
0
def train(latent_dim, height, width, channels):
    (X_train, Y_train), (_, _) = tf.keras.datasets.mnist.load_data()
    # X_train = X_train[0: 2000]
    X_train = X_train.reshape((X_train.shape[0],) + (height, width, channels)).astype('float32')
    X_train = normalize(X_train)
    epochs = 20
    # epochs = 2
    batch_size = 128
    iterations = X_train.shape[0]//batch_size
    dcgan = DCGAN(latent_dim, height, width, channels)

    for epoch in range(epochs):
        for iteration in range(iterations):
            real_images = X_train[iteration*batch_size:(iteration+1)*batch_size]
            d_loss, g_loss = dcgan.train(real_images, batch_size)
            if (iteration + 1) % 10 == 0:
                print('{} / {}'.format(iteration + 1, iterations))
                print('discriminator loss: {}'.format(d_loss))
                print('generator loss: {}'.format(g_loss))
                print()
                with open('loss.txt', 'a') as f:
                    f.write(str(d_loss) + ',' + str(g_loss) + '\r')
        dcgan.save_weights('gan' + '_epoch' + str(epoch + 1) + '.h5')
        print('epoch' + str(epoch) + ' end')
        print()
Exemplo n.º 2
0
def main(_):
    pp.pprint(FLAGS.__flags)

    # training/inference
    with tf.Session() as sess:
        dcgan = DCGAN(sess, FLAGS)

        # path checks
        if not os.path.exists(FLAGS.checkpoint_dir):
            os.makedirs(FLAGS.checkpoint_dir)
        if not os.path.exists(
                os.path.join(FLAGS.log_dir, dcgan.get_model_dir())):
            os.makedirs(os.path.join(FLAGS.log_dir, dcgan.get_model_dir()))

        # load checkpoint if found
        if dcgan.checkpoint_exists():
            print("Loading checkpoints...")
            if dcgan.load():
                print "success!"
            else:
                raise IOError("Could not read checkpoints from {0}!".format(
                    FLAGS.checkpoint_dir))
        else:
            print "No checkpoints found. Training from scratch."
            dcgan.load()

        # train DCGAN
        if FLAGS.train:
            train(dcgan)
        else:
            dcgan.load()
Exemplo n.º 3
0
Arquivo: main.py Projeto: SmartAI/GAN
def main(_):
    pp.pprint(FLAGS.__flags)
    with tf.Session() as sess:
        dcgan = DCGAN(sess, FLAGS)

        if not os.path.exists(FLAGS.checkpoint_dir):
            os.makedirs(FLAGS.checkpoint_dir)
        if not os.path.exists(
                os.path.join(FLAGS.sample_dir, dcgan.get_model_dir())):
            os.makedirs(os.path.join(FLAGS.sample_dir, dcgan.get_model_dir()))
        if not os.path.exists(
                os.path.join(FLAGS.log_dir, dcgan.get_model_dir())):
            os.makedirs(os.path.join(FLAGS.log_dir, dcgan.get_model_dir()))

        if dcgan.checkpoint_exists():
            print "Loading checkpoints"
            if dcgan.load():
                print "Success"
            else:
                raise IOError("Could not read checkpoints from {}".format(
                    FLAGS.checkpoint_dir))
        else:
            if not FLAGS.train:
                raise IOError("No checkpoints found")
            print "No checkpoints found. Training from scratch"
            dcgan.load()

        if FLAGS.train:
            train(dcgan)

        print "Generating samples..."
        inference.sample_images(dcgan)
        inference.visualize_z(dcgan)
Exemplo n.º 4
0
    def compile(self):
        """Compile model (loss function, optimizers, etc.)"""

        # Create new GAN
        self.gan = DCGAN(self.gan_type, self.latent_dim, self.batch_size,
                         self.use_cuda)

        # Set optimizers for generator and discriminator
        if self.optim == 'adam':
            self.G_optimizer = optim.Adam(self.gan.G.parameters(),
                                          lr=self.learning_rate,
                                          betas=self.momentum)
            self.D_optimizer = optim.Adam(self.gan.D.parameters(),
                                          lr=self.learning_rate,
                                          betas=self.momentum)

        elif self.optim == 'rmsprop':
            self.G_optimizer = optim.RMSprop(self.gan.G.parameters(),
                                             lr=self.learning_rate)
            self.D_optimizer = optim.RMSprop(self.gan.D.parameters(),
                                             lr=self.learning_rate)

        else:
            raise NotImplementedError

        # CUDA support
        if torch.cuda.is_available() and self.use_cuda:
            self.gan = self.gan.cuda()

        # Create fixed latent variables for inference while training
        self.latent_vars = []
        for i in range(100):
            self.latent_vars.append(self.gan.create_latent_var(1))
Exemplo n.º 5
0
def main(**kwargs):
    from types import SimpleNamespace
    _ = SimpleNamespace(**kwargs)

    loader = ImageLoader(_.folder)
    data = loader.setup(datalen=_.datalen)
    dcgan = DCGAN(loader.shape_x, loader.shape_y, loader.channels, data)
    dcgan.train(epochs=_.epochs, batch_size=_.batch_size, save_interval=50)
Exemplo n.º 6
0
def predict(latent_dim, height, width, channels):
    random_latent_vectors = np.random.normal(size=(100, latent_dim))
    dcgan = DCGAN(latent_dim, height, width, channels)
    dcgan.load_weights('gan_epoch20.h5')
    # dcgan.load_weights('gan_epoch1.h5')
    generated_images = dcgan.predict(random_latent_vectors)
    for i, generated_image in enumerate(generated_images):
        img = image.array_to_img(denormalize(generated_image), scale=False)
        img.save(os.path.join('generated', str(i) + '.png'))
Exemplo n.º 7
0
def train():
    """
    学習データを構築する。
    """
    # 画像をデータセットから読み込む
    imgs = load_images()
    with tf.Session() as sess:
        # (3)DCGANネットワークの生成
        batch_size = 64
        dcgan = DCGAN(
            generator_layers=[1024, 512, 256, 128],
            discriminator_layer=[64, 128, 256, 512],
            batch_size=batch_size,
            image_inputs=tf.placeholder(tf.float32,
                                        [batch_size, SIZE, SIZE, 3]),
        )
        sess.run(tf.global_variables_initializer())

        # (4)ファイル保存の準備
        g_saver = tf.train.Saver(dcgan.generator.variables)
        d_saver = tf.train.Saver(dcgan.discriminator.variables)

        maxstep = 10000
        N = len(imgs)

        # (5)サンプル出力の準備
        sample_z = tf.random_uniform([dcgan.batch_size, dcgan.z_dim],
                                     minval=-1.0,
                                     maxval=1.0)
        images = dcgan.sample_images(8, 8, inputs=sample_z)

        os.makedirs('../data/generated_images/', exist_ok=True)

        # (6)学習
        for step in range(maxstep):
            permutation = np.random.permutation(N)
            imgs_batch = imgs[permutation[0:batch_size]]
            g_loss, d_loss = dcgan.fit_step(sess=sess, image_inputs=imgs_batch)

            # 100 stepごとに学習結果を出力する。
            if step % 100 == 0:
                filename = os.path.join('../data/', "generated_images",
                                        '%05d.jpg' % step)
                with open(filename, 'wb') as f:
                    f.write(sess.run(images))
                print("Generator loss: {} , Discriminator loss: {}".format(
                    g_loss, d_loss))

        # (7)学習済みモデルのファイル保存
        os.makedirs('../data/models/', exist_ok=True)
        g_saver.save(sess=sess, save_path="../data/models/g_saver.ckpg")
        d_saver.save(sess=sess, save_path="../data/models/d_saver.ckpg")
Exemplo n.º 8
0
def main():
    flags = tf.app.flags
    flags.DEFINE_integer("epoch", 25, "Epoch to train [25]")
    flags.DEFINE_float("learning_rate", 0.0002,
                       "Learning rate of for adam [0.0002]")
    flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]")
    FLAGS = flags.FLAGS
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        dcgan = DCGAN(sess)
        dcgan.train(FLAGS)
Exemplo n.º 9
0
def init():
    dcgan = DCGAN()
    if path.exists("%s/generator.h5" % modelSaveLocation):
        print("Model is trained. Loading from %s" % modelSaveLocation)
        dcgan.loadWeights(modelSaveLocation)
        dcgan.generate("./images/generated/example.png")
    else:
        data = loadData()
        print("Model is not trained. Loaded %d images for training" %
              len(data))
        dcgan.train(data=data, epochs=2000, batch_size=32, save_interval=50)
        print("Model is trained. Saving to %s" % modelSaveLocation)
        dcgan.saveWeights(modelSaveLocation)
Exemplo n.º 10
0
def main():

    dcgan = DCGAN(s_size=s_size, batch_size=batch_size)
    train_im, total_imgs = load_image()
    total_batch = int(total_imgs / batch_size)
    losses = dcgan.loss(train_im)
    train_op = dcgan.train(losses, learning_rate=learning_rate)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.33)
    config = tf.ConfigProto(gpu_options=gpu_options,
                            device_count={"CPU": 8},
                            inter_op_parallelism_threads=1,
                            intra_op_parallelism_threads=1)

    with tf.Session() as sess:

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        init = tf.global_variables_initializer()
        sess.run(init)
        g_saver = tf.train.Saver(dcgan.g.variables)
        d_saver = tf.train.Saver(dcgan.d.variables)

        if os.path.isdir(save_dir):
            g_saver.restore(sess,
                            tf.train.latest_checkpoint(save_dir + '/g_model'))
            d_saver.restore(sess,
                            tf.train.latest_checkpoint(save_dir + '/d_model'))
        else:
            os.mkdir(save_dir)

        sample_z = np.float32(
            np.random.uniform(-1, 1, [dcgan.batch_size, dcgan.z_dim]))
        images = dcgan.sample_images(5, 5, inputs=sample_z)

        print("Start training")
        for step in range(1, epochs + 1):
            start_time = time.time()
            for batch in range(total_batch):
                _, g_loss, d_loss = sess.run(
                    [train_op, losses[dcgan.g], losses[dcgan.d]])
            print("epochs {} loss = G: {:.8f}, D: {:.8f} run time:{:.4f} sec"\
            .format(step, g_loss, d_loss, time.time()-start_time))
            g_saver.save(sess, save_dir + '/g_model/g.ckpt', global_step=step)
            d_saver.save(sess, save_dir + '/d_model/d.ckpt', global_step=step)

            with open('./test/%05d.jpg' % step, 'wb') as f:
                f.write(sess.run(images))
        coord.request_stop()
        coord.join(threads)
Exemplo n.º 11
0
def geneator_handler():
    print('start request!')
    zvector = None
    batchSize = 1
    # Upload a serialized Zvector
    if request.method == 'POST':
        print('POST!')
        # DO things
        # check if the post request has the file part
        if 'file' not in request.files:
            return BadRequest("File not present in request")
        file = request.files['file']
        if file.filename == '':
            return BadRequest("File name is not present in request")
        if not allowed_file(file.filename):
            return BadRequest("Invalid file type")
        filename = secure_filename(file.filename)
        # input_filepath = os.path.join('./', filename)
        file.save(filename)
        # Load a Z vector and Retrieve the N of samples to generate
        zvector = torch.load(filename)
        batchSize = zvector.size()[0]

    checkpoint = request.form.get("ckp") or "netG_epoch_99.pth"
    # Check for cuda availability
    if torch.cuda.is_available():
        # GPU and cuda
        Generator = DCGAN(netG=os.path.join(MODEL_PATH, checkpoint), zvector=zvector, batchSize=batchSize, ngpu=1, cuda=True, outf="./")
    else:
        # CPU
        Generator = DCGAN(netG=os.path.join(MODEL_PATH, checkpoint), zvector=zvector, batchSize=batchSize, ngpu=0, outf="./")
    Generator.build_model()
    Generator.generate()
    return send_file(OUTPUT_PATH, mimetype='image/png')
Exemplo n.º 12
0
    def __init__(self):
        self.img_rows = 28
        self.img_cols = 28
        self.channel = 8

        self.x_train = input_data.read_data_sets("mnist",
                                                 one_hot=True).train.images
        self.x_train = self.x_train.reshape(-1, self.img_rows, self.img_cols,
                                            1).astype(np.float32)

        self.DCGAN = DCGAN()
        self.discriminator = self.DCGAN.discriminator_model()
        self.adversarial = self.DCGAN.adversarial_model()
        self.generator = self.DCGAN.generator()
Exemplo n.º 13
0
def main(argv=None):
    dcgan = DCGAN(
        batch_size=96, f_size=6, z_dim=40,
        gdepth1=512, gdepth2=256, gdepth3=128,  gdepth4=64,
        ddepth1=54,  ddepth2=90,  ddepth3=150, ddepth4=250)
    dcgan.d(dcgan.g(dcgan.z))
    g_saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='g'))
    d_saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='d'))


    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())

        g_checkpoint_path = os.path.join(os.path.dirname(__file__), '..', FLAGS.train_dir, 'g.ckpt')
        d_checkpoint_path = os.path.join(os.path.dirname(__file__), '..', FLAGS.train_dir, 'd.ckpt')
        if os.path.exists(g_checkpoint_path):
            g_saver.restore(sess, g_checkpoint_path)
        if os.path.exists(d_checkpoint_path):
            d_saver.restore(sess, d_checkpoint_path)

        ops = []
        targets = [
            {'name': 'g/conv1/Relu:0',    'row': 8, 'col': 32},
            {'name': 'g/conv2/Relu:0',    'row': 8, 'col': 16},
            {'name': 'g/conv3/Relu:0',    'row': 8, 'col': 8 },
            {'name': 'Tanh:0',            'row': 1, 'col': 3 },
            {'name': 'd/conv0/Maximum:0', 'row': 6, 'col': 9 },
            {'name': 'd/conv1/Maximum:0', 'row': 6, 'col': 15},
            {'name': 'd/conv2/Maximum:0', 'row': 6, 'col': 25},
        ]
        for target in targets:
            t = sess.graph.get_tensor_by_name(target['name'])
            batch_outputs = tf.split(0, dcgan.batch_size, t)
            for i in range(3):
                maps = tf.split(3, t.get_shape()[3], batch_outputs[i])
                rows = []
                cols = target['col']
                for row in range(target['row']):
                    rows.append(tf.concat(2, maps[cols * row: cols * row + cols]))
                montaged = tf.concat(1, rows)
                out = tf.image.convert_image_dtype(tf.squeeze(montaged, [0]), tf.uint8, saturate=True)
                ops.append(tf.image.encode_png(out, name=t.op.name.replace('/', '-') + '-%02d' % i))

        results = sess.run(ops)
        for i in range(len(ops)):
            filename = ops[i].op.name + '.png'
            print('write %s' % filename)
            with open(os.path.join(os.path.dirname(__file__), '..', FLAGS.images_dir, filename), 'wb') as f:
                f.write(results[i])
Exemplo n.º 14
0
def main(argv=None):
    dcgan = DCGAN(
        batch_size=96, f_size=6, z_dim=40,
        gdepth1=512, gdepth2=256, gdepth3=128,  gdepth4=64,
        ddepth1=54,  ddepth2=90,  ddepth3=150, ddepth4=250)
    dcgan.d(dcgan.g(dcgan.z))
    g_saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='g'))
    d_saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='d'))


    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())

        g_checkpoint_path = os.path.join(os.path.dirname(__file__), '..', FLAGS.train_dir, 'g.ckpt')
        d_checkpoint_path = os.path.join(os.path.dirname(__file__), '..', FLAGS.train_dir, 'd.ckpt')
        if os.path.exists(g_checkpoint_path):
            g_saver.restore(sess, g_checkpoint_path)
        if os.path.exists(d_checkpoint_path):
            d_saver.restore(sess, d_checkpoint_path)

        ops = []
        targets = [
            {'name': 'g/conv1/Relu:0',    'row': 8, 'col': 32},
            {'name': 'g/conv2/Relu:0',    'row': 8, 'col': 16},
            {'name': 'g/conv3/Relu:0',    'row': 8, 'col': 8 },
            {'name': 'Tanh:0',            'row': 1, 'col': 3 },
            {'name': 'd/conv0/Maximum:0', 'row': 6, 'col': 9 },
            {'name': 'd/conv1/Maximum:0', 'row': 6, 'col': 15},
            {'name': 'd/conv2/Maximum:0', 'row': 6, 'col': 25},
        ]
        for target in targets:
            t = sess.graph.get_tensor_by_name(target['name'])
            batch_outputs = tf.split(0, dcgan.batch_size, t)
            for i in range(3):
                maps = tf.split(3, t.get_shape()[3], batch_outputs[i])
                rows = []
                cols = target['col']
                for row in range(target['row']):
                    rows.append(tf.concat(2, maps[cols * row: cols * row + cols]))
                montaged = tf.concat(1, rows)
                out = tf.image.convert_image_dtype(tf.squeeze(montaged, [0]), tf.uint8, saturate=True)
                ops.append(tf.image.encode_png(out, name=t.op.name.replace('/', '-') + '-%02d' % i))

        results = sess.run(ops)
        for i in range(len(ops)):
            filename = ops[i].op.name + '.png'
            print('write %s' % filename)
            with open(os.path.join(os.path.dirname(__file__), '..', FLAGS.images_dir, filename), 'wb') as f:
                f.write(results[i])
Exemplo n.º 15
0
def main(N_EPOCH=10, BATCH_SIZE=32, G_SIZE=100):    
    (X_train, y_train), (_, _) = tf.contrib.keras.datasets.mnist.load_data()
    X = select(scaled(X_train), y_train, 8)
    
    gan = DCGAN(G_SIZE, (28, 28), 1, shape_trace=[(7, 7, 128), (14, 14, 64)])
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    
    plt.figure()
    for i, epoch in enumerate(range(N_EPOCH)):
        X = shuffle(X)
        for step, images in enumerate(gen_batch(X, BATCH_SIZE)):
            noise = np.random.randn(len(images), G_SIZE)

            sess.run(gan.D_train, {gan.G_in: noise, gan.X_in: images, gan.train_flag: True})
            for _ in range(2):
                sess.run(gan.G_train, {gan.G_in: noise, gan.train_flag: True})

            G_loss, D_loss, D_prob, G_prob, mse = sess.run([gan.G_loss, gan.D_loss, gan.X_prob, gan.G_prob, gan.mse],
                                                           {gan.G_in: noise, gan.X_in: images, gan.train_flag: False})
            print("Epoch %d/%d | Step %d/%d" % (epoch+1, N_EPOCH, step, len(X)//BATCH_SIZE))
            print("G loss: %.4f | D loss: %.4f | D prob: %.4f | G prob: %.4f | mse: %.4f" %
                 (G_loss, D_loss, D_prob.mean(), G_prob.mean(), mse))
        
        if i in range(N_EPOCH-4, N_EPOCH):
            img = sess.run(gan.G_out, {gan.G_in: noise, gan.train_flag: False})[0]
            plt.subplot(2, 2, i+1-(N_EPOCH-4))
            plt.imshow(np.squeeze(img))
    plt.tight_layout()
    plt.show()
Exemplo n.º 16
0
def dcgan_image_transition(z, checkpoint_file, model_params):
    tree_net = DCGAN(**model_params)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, checkpoint_file)
        x_transition = sess.run(tree_net.sampler, feed_dict={tree_net.z: z})
    return x_transition
Exemplo n.º 17
0
    def __init__(self, flags):
        run_config = tf.ConfigProto()
        run_config.gpu_options.allow_growth = True
        self.sess = tf.Session(config=run_config)

        self.flags = flags
        self.dataset = Dataset(self.sess, flags, self.flags.dataset)
        self.model = DCGAN(self.sess, self.flags, self.dataset.image_size)

        self._make_folders()
        self.iter_time = 0

        self.saver = tf.train.Saver()
        self.sess.run(tf.global_variables_initializer())

        tf_utils.show_all_variables()
Exemplo n.º 18
0
def load_model(model_params, contF=True):
    
    from dcgan import DCGAN
    import os
    model = DCGAN(model_params, ltype=os.environ['LTYPE'])
    if contF:
        # print '...Continuing from Last time'''
        from utils import unpickle
        _model = unpickle(os.environ['LOAD_PATH'])
        
        np_gen_params= [param.get_value() for param in _model.gen_network.params]
        np_dis_params= [param.get_value() for param in _model.dis_network.params]
        
        model.load(np_dis_params, np_gen_params, verbose=False)
        
    return model
Exemplo n.º 19
0
def main():

    # Load MNIST data
    print("[INFO] Loading MNIST handwritten digits...\n")
    (X, _), (_, _) = mnist.load_data()
    images = X.reshape(X.shape[0], 28, 28, 1)

    # Initialize DCGAN object
    gan_model = DCGAN(data=images,
                      learning_rate=2e-04,
                      batch_size=128,
                      latent_dim=100)

    # Train GAN Model
    gan_model.train(epochs=100,
                    batches_per_epoch=300,
                    checkpoint_frequency=1,
                    save_path='.././save_data/')
Exemplo n.º 20
0
    def __init__(self, j=10, batch_size=128, target="HH", data=None):
        self.training_generator = None
        self.test_generator = None
        self.batch_size = batch_size
        self.target = target
        self.data = data

        self.spec_cols = 1024
        self.spec_length = j

        self.DCGAN = DCGAN(img_rows=self.spec_length,
                           img_cols=self.spec_cols,
                           channel=1)
        self.discriminator = self.DCGAN.discriminator_model()
        self.adversarial = self.DCGAN.adversarial_model()
        self.generator = self.DCGAN.generator()

        self.data_size = 0
        self.load_data()
Exemplo n.º 21
0
def main(_):
    # create checkpoint saver
    # the checkpoint saver, can create checkpoint files, which later can be use to restore a model state, but it also
    # audits the model progress to a log file
    checkpoint_saver = CheckpointSaver(FLAGS.data_dir)
    checkpoint_saver.save_experiment_config(FLAGS.__dict__['__flags'])

    # load training data
    data_set, data_set_shape = hdf5_dataset.read_data_set(
        FLAGS.dataset,
        image_size=FLAGS.image_size,
        shape=(FLAGS.image_size, FLAGS.image_size, FLAGS.channels),
        binarized=FLAGS.binarized,
        validation=0)
    train_data = data_set.train

    # create a data visualizer
    visualizer = ImageVisualizer(checkpoint_saver.get_experiment_dir(),
                                 image_size=FLAGS.image_size)
    visualizer.training_data_sample(train_data)

    # create the actual DCGAN model
    dcgan_model = DCGAN(FLAGS.image_size,
                        FLAGS.channels,
                        z_size=FLAGS.z_size,
                        learning_rate=FLAGS.learning_rate)

    print("start", type(dcgan_model).__name__, "model training")
    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer())
        dcgan_model.initialize_summaries(sess,
                                         checkpoint_saver.get_experiment_dir())

        for epoch in range(FLAGS.max_epoch):

            for images in train_data.next_batch(FLAGS.batch_size):
                d_loss, g_loss = dcgan_model.update_params(sess, images)

            msg = "epoch: %3d" % epoch + " Discriminator loss %.4f" % d_loss + " Generator loss  %.4f" % g_loss
            checkpoint_saver.audit_loss(msg)

            dcgan_model.update_summaries(sess, images, epoch)

            if epoch % FLAGS.generation_step == 0:
                visualizer.save_generated_samples(
                    dcgan_model.generate_samples(sess, num_samples=200), epoch)

            if epoch % FLAGS.checkpoint_step == 0:
                checkpoint_saver.save_checkpoint(dcgan_model.saver, sess,
                                                 epoch)
Exemplo n.º 22
0
def main(_):
    pp.pprint(FLAGS.__flags)

    # training/inference
    with tf.Session() as sess:
        dcgan = DCGAN(sess, FLAGS)

        # path checks
        if not os.path.exists(FLAGS.checkpoint_dir):
            os.makedirs(FLAGS.checkpoint_dir)
        if not os.path.exists(
                os.path.join(FLAGS.log_dir, dcgan.get_model_dir())):
            os.makedirs(os.path.join(FLAGS.log_dir, dcgan.get_model_dir()))
        if not os.path.exists(
                os.path.join(FLAGS.sample_dir, dcgan.get_model_dir())):
            os.makedirs(os.path.join(FLAGS.sample_dir, dcgan.get_model_dir()))

        # load checkpoint if found
        if dcgan.checkpoint_exists():
            print("Loading checkpoints...")
            if dcgan.load():
                print "success!"
            else:
                raise IOError("Could not read checkpoints from {0}!".format(
                    FLAGS.checkpoint_dir))
        else:
            if not FLAGS.train:
                raise IOError("No checkpoints found but need for sampling!")
            print "No checkpoints found. Training from scratch."
            dcgan.load()

        # train DCGAN
        if FLAGS.train:
            train(dcgan)

        # inference/visualization code goes here
        print "Generating samples..."
        inference.sample_images(dcgan)
        print "Generating visualizations of z..."
        inference.visualize_z(dcgan)
Exemplo n.º 23
0
Arquivo: main.py Projeto: cmcuza/DCGAN
def main(_):
    pp = pprint.PrettyPrinter()
    pp.pprint(flags.FLAGS.__flags)

    if FLAGS.input_width is None:
        FLAGS.input_width = FLAGS.input_height
    if FLAGS.output_width is None:
        FLAGS.output_width = FLAGS.output_height

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
    #run_config = tf.compat.v1.ConfigProto
    #run_config.gpu_options.allow_growth = True
    #run_config.gpu_options.visible_device_list = ''
    with tf.compat.v1.Session() as sess:
        dcgan = DCGAN(
            sess,
            input_width=FLAGS.input_width,
            input_height=FLAGS.input_height,
            output_width=FLAGS.output_width,
            output_height=FLAGS.output_height,
            batch_size=FLAGS.batch_size,
            sample_num=FLAGS.sample_num,
            dataset_name=FLAGS.dataset,
            input_fname_pattern=FLAGS.input_fname_pattern,
            crop=FLAGS.crop,
            checkpoint_dir=FLAGS.checkpoint_dir,
            data_dir=FLAGS.data_dir)

        model_vars = tf.trainable_variables()
        slim.model_analyzer.analyze_vars(model_vars, print_info=True)

        if FLAGS.train:
            dcgan.train(FLAGS)
        else:
            if not dcgan.load(FLAGS.checkpoint_dir)[0]:
                raise Exception("[!] Train a model first, then run test mode")
            if FLAGS.predict:
                dcgan.predict(FLAGS.predict_dataset)
            else:
                dcgan.test()
Exemplo n.º 24
0
    def __init__(self, sess, flags):
        self.sess = sess
        self.flags = flags
        self.image_size = (flags.img_size, flags.img_size, 3)

        self.z_vectors, self.learning_rate, self.velocity = None, None, None
        self.masks, self.wmasks = None, None

        self.dcgan = DCGAN(sess, Flags(flags), self.image_size)
        self._build_net()
        self._tensorboard()

        print('Initialized Model Inpaint SUCCESS!')
Exemplo n.º 25
0
def run_test():
    model_object = DCGAN(dataset, label, data_object.input_size, class_name, generator_arch, discriminator_arch, encoder_arch, learning_rate, batch_size)

    if label is None:
        model_object.train(x, epochs)
    else:
        index = list(np.where(y[:, label] == 1)[0])
        x_positive = x[index]
        model_object.train(x_positive, epochs)
Exemplo n.º 26
0
def test(filepath):
    """
    判定を行う。
    """
    with tf.Session() as sess:
        # (1)モデルの復元
        batch_size = 64
        dcgan = DCGAN(
            generator_layers=[1024, 512, 256, 128],
            discriminator_layer=[64, 128, 256, 512],
            batch_size=batch_size,
            image_inputs=tf.placeholder(tf.float32, [batch_size, SIZE, SIZE, 3]),
        )
        sess.run(tf.global_variables_initializer())
        g_saver = tf.train.Saver(dcgan.generator.variables)
        d_saver = tf.train.Saver(dcgan.discriminator.variables)
        g_saver.restore(sess=sess, save_path="../data/models/g_saver.ckpg")
        d_saver.restore(sess=sess, save_path="../data/models/d_saver.ckpg")

        # (2)画像の生成
        sample_z = tf.random_uniform([dcgan.batch_size, dcgan.z_dim], minval=-1.0, maxval=1.0)
        images = dcgan.sample_images(8, 8, inputs=sample_z)
        with open(filepath, 'wb') as f:
            f.write(sess.run(images))
Exemplo n.º 27
0
def create_model(model=config.model,category=config.category):
    if model == 'DCGAN_1':
        my_model = DCGAN(name='DCGAN_1',category=category)
    elif model == 'DCGAN_1X':
        my_model = DCGAN(name='DCGAN_1X',category=category)
    elif model == 'DCGAN_1XX':
        my_model = DCGAN(name='DCGAN_1XX',category=category)
    elif model == 'DCGAN_2':
        my_model = DCGAN(name='DCGAN_2',category=category)
    elif model == 'DCGAN_3':
        my_model = DCGAN(name='DCGAN_3',category=category)
    elif model == 'VAE_1':
        my_model = VAE(name='VAE_1',category=category)
    elif model.upper() == 'VAE_2':
        my_model = VAE(name='VAE_2',category=category)
    elif model == 'VAE_3':
        my_model = VAE(name='VAE_3',category=category)
    elif model == 'VAE_4':
        my_model = VAE(name='VAE_4',category=category)
    else:
        my_model = VAE(name='VAE_2',category=category)
        print('The selected model {} is not in the list [DCGAN_1, DCGAN_1X, DCGAN_1XX, DCGAN_2, DCGAN_3, VAE_1, VAE_2, VAE_3, VAE_4]'.format(
            model))
    return my_model
Exemplo n.º 28
0
def main():
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch_size', '-bs',
                        type=int, default=64)
    parser.add_argument('--nb_epoch', '-e',
                        type=int, default=100)
    parser.add_argument('--save_steps', '-ss',
                        type=int, default=10)
    parser.add_argument('--lr_d',
                        type=float, default=2e-4)
    parser.add_argument('--lr_g',
                        type=float, default=2e-4)
    parser.add_argument('--ngf', '-ngf',
                        type=int, default=64)
    parser.add_argument('--ndf', '-ndf',
                        type=int, default=64)
    parser.add_argument('--latent_dim', '-ld',
                        type=int, default=128)
    parser.add_argument('--logdir',
                        type=str, default='logs')
    parser.add_argument('--no-cuda',
                        dest='use_cuda',
                        action='store_false', default=True,
                        help='disables CUDA training')
    args = parser.parse_args()

    device = torch.device("cuda" if args.use_cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if args.use_cuda else {}
    train_loader = torch.utils.data.DataLoader(
        datasets.MNIST('./data', train=True, download=True,
                       transform=transforms.Compose([
                           transforms.Resize(32),
                           transforms.ToTensor(),
                           transforms.Lambda(lambda x: (x-0.5)*2),
                       ])),
        batch_size=args.batch_size,
        shuffle=True,
        **kwargs)

    generator = Generator(args.latent_dim, args.ngf)
    discriminator = Discriminator(args.ndf)
    gan = DCGAN(generator, discriminator, device)
    gan.init_params()

    if args.use_cuda:
        generator.cuda() 
        discriminator.cuda()
                   
    gan.fit(train_loader,
            nb_epoch=args.nb_epoch,
            lr_d=args.lr_d,
            lr_g=args.lr_g,
            save_steps=args.save_steps,
            logdir=args.logdir)
def main(argv=None):
    log_dir, model_dir = generate_log_model_dirs(FLAGS.root_logdir,
                                                 FLAGS.root_model_dir)
    create_path(log_dir)
    create_path(model_dir)

    tf.reset_default_graph()
    with tf.Session() as sess:
        dcgan_nn = DCGAN(sess, log_dir, model_dir)
        dcgan_nn.build_graph(FLAGS)
        dcgan_nn.train(FLAGS)
Exemplo n.º 30
0
def gan_repository(sess, flags, dataset):
    if flags.gan_model == 'vanilla_gan':
        print('Initializing Vanilla GAN...')
        return GAN(sess, flags, dataset.image_size)
    elif flags.gan_model == 'dcgan':
        print('Initializing DCGAN...')
        return DCGAN(sess, flags, dataset.image_size)
    elif flags.gan_model == 'pix2pix':
        print('Initializing pix2pix...')
        return Pix2Pix(sess, flags, dataset.image_size)
    elif flags.gan_model == 'pix2pix-patch':
        print('Initializing pix2pix-patch...')
        return Pix2PixPatch(sess, flags, dataset.image_size)
    elif flags.gan_model == 'wgan':
        print('Initializing WGAN...')
        return WGAN(sess, flags, dataset)
    elif flags.gan_model == 'cyclegan':
        print('Initializing cyclegan...')
        return CycleGAN(sess, flags, dataset.image_size, dataset())
    elif flags.gan_model == 'mrigan':
        print('Initializing mrigan...')
        return MRIGAN(sess, flags, dataset.image_size, dataset())
    elif flags.gan_model == 'mrigan02':
        print('Initializing mrigan02...')
        return MRIGAN02(sess, flags, dataset.image_size, dataset())
    elif flags.gan_model == 'mrigan03':
        print('Initializing mrigan03...')
        return MRIGAN03(sess, flags, dataset.image_size, dataset())
    elif flags.gan_model == 'mrigan01_lsgan':
        print('Initializing mrigan01_lsgan...')
        return MRIGAN01_LSGAN(sess, flags, dataset.image_size, dataset())
    elif flags.gan_model == 'mrigan02_lsgan':
        print('Initializing mrigan02_lsgan...')
        return MRIGAN02_LSGAN(sess, flags, dataset.image_size, dataset())
    elif flags.gan_model == 'mrigan03_lsgan':
        print('Initializing mrigan03_lsgan...')
        return MRIGAN03_LSGAN(sess, flags, dataset.image_size, dataset())
    elif flags.gan_model == 'mrigan_01':
        print('Initializing mrigan_01...')
        return MRIGAN_01(sess, flags, dataset.image_size, dataset())
    elif flags.gan_model == 'mrigan_02':
        print('Initializing mrigan_02...')
        return MRIGAN_02(sess, flags, dataset.image_size, dataset())
    else:
        raise NotImplementedError
def run_gan_main(input_f,
                 name='normal',
                 generated_num=10000,
                 output_dir='log',
                 epochs=10,
                 show_flg=True,
                 gan_type='naive_gan',
                 time_str='',
                 **kwargs):
    # step 1 achieve train set
    train = TrafficDataset(input_f, transform=None, normalization_flg=False)
    print('\'%s\' train size : (%d, %d) used for training \'%s\' naive_gan.' %
          (name, len(train.X), len(train.X[0]), name))
    # step 2.1 initialize gan
    if gan_type == 'dcgan':
        gan_m = DCGAN(num_epochs=epochs,
                      num_features=len(train.X[0]),
                      batch_size=64,
                      show_flg=show_flg,
                      output_dir=output_dir,
                      GAN_name=name,
                      time_str=time_str)
    else:  # default gan_type
        gan_m = NaiveGAN(num_epochs=epochs,
                         num_features=len(train.X[0]),
                         batch_size=64,
                         show_flg=show_flg,
                         output_dir=output_dir,
                         GAN_name=name,
                         time_str=time_str)
    # step 2.2 train gan model
    print('\nTraining begins ...')
    gan_m.train(train)
    print('Train finished.')

    # step 3.1 dump model
    model_file = dump_model(gan_m,
                            os.path.join(output_dir, 'gan_%s_model.p' % name))

    # step 3.2 load model
    gan_m = load_model(model_file)

    # step 4 generated more data
    print('generated_num is', generated_num)
    gen_data = gan_m.generate_data(generated_num)
    output_f = save_data(np.asarray(gen_data).tolist(),
                         output_f=os.path.join(
                             output_dir, 'gan_%s_model' % name +
                             '_generated_%s_samples.csv' % str(generated_num)))

    return output_f, gan_m.gan_loss_file, gan_m.gan_decision_file
Exemplo n.º 32
0
def main(argv=None):
    dcgan = DCGAN(
        batch_size=128, f_size=6, z_dim=20,
        gdepth1=216, gdepth2=144, gdepth3=96,  gdepth4=64,
        ddepth1=64,  ddepth2=96,  ddepth3=144, ddepth4=216)
    input_images, num_samples = inputs(dcgan.batch_size, dcgan.f_size)
    train_op = dcgan.build(input_images, feature_matching=True)

    g_saver = tf.train.Saver(dcgan.g.variables)
    d_saver = tf.train.Saver(dcgan.d.variables)
    g_checkpoint_path = os.path.join(FLAGS.train_dir, 'g.ckpt')
    d_checkpoint_path = os.path.join(FLAGS.train_dir, 'd.ckpt')
    with tf.Session() as sess:
        # restore or initialize generator
        sess.run(tf.initialize_all_variables())
        if os.path.exists(g_checkpoint_path):
            print('restore variables:')
            for v in dcgan.g.variables:
                print('  ' + v.name)
            g_saver.restore(sess, g_checkpoint_path)

        if FLAGS.is_train:
            # restore or initialize discriminator
            if os.path.exists(d_checkpoint_path):
                print('restore variables:')
                for v in dcgan.d.variables:
                    print('  ' + v.name)
                d_saver.restore(sess, d_checkpoint_path)

            # setup for monitoring
            sample_z = sess.run(tf.random_uniform([dcgan.batch_size, dcgan.z_dim], minval=-1.0, maxval=1.0))
            images = dcgan.sample_images(4, 4, inputs=sample_z)

            # start training
            tf.train.start_queue_runners(sess=sess)
            # shuffle inputs
            for _ in range(num_samples // dcgan.batch_size + 1):
                sess.run(input_images)
                print('.', end='', flush=True)
            print()
            for step in range(FLAGS.max_steps):
                start_time = time.time()
                _, g_loss, d_loss = sess.run([train_op, dcgan.losses['g'], dcgan.losses['d']])
                duration = time.time() - start_time
                format_str = '%s: step %d, loss = (G: %.8f, D: %.8f) (%.3f sec/batch)'
                print(format_str % (datetime.now(), step, g_loss, d_loss, duration))

                # save generated images
                if step % 100 == 0:
                    filename = os.path.join(FLAGS.images_dir, '%04d.jpg' % step)
                    with open(filename, 'wb') as f:
                        f.write(sess.run(images))
                # save variables
                if step % 100 == 0:
                    g_saver.save(sess, g_checkpoint_path, global_step=step)
                    d_saver.save(sess, d_checkpoint_path, global_step=step)
        else:
            generated = sess.run(dcgan.sample_images(8, 8))
            filename = os.path.join(FLAGS.images_dir, 'out.jpg')
            with open(filename, 'wb') as f:
                print('write to %s' % filename)
                f.write(generated)