Ejemplo n.º 1
0
def train_cifar10():
    print('*** DCGAN trained with cifar10 ***')
    data = dataloader.cifar10.load('cifar10')['img']
    model = dcgan.DCGAN(data)
    try:
        model.train(steps=3000)
    except Exception as e:
        print(e)
    finally:
        model.save_log('train_log.pickle')
        model.G.save('generator.h5')
Ejemplo n.º 2
0
def train_dogs():
    print('*** DCGAN trained with dogs ***')
    data = dataloader.dogs.load('dogs')
    model = dcgan.DCGAN(data)
    try:
        model.train(steps=10000, save_interval=500)
    except Exception as e:
        print(e)
    finally:
        model.save_log('train_log.pickle')
        model.G.save('generator.h5')
Ejemplo n.º 3
0
def main(lr, batch_size, num_epochs, save_path):

    x, _, _, _ = utils.import_mnist()

    gan_model = dcgan.DCGAN(learning_rate=1e-4,
                            batch_size=128,
                            num_epochs=100,
                            save_path='D:/Downloads')

    gan_model.pretrain(x)
    gan_model.train(x, num_iter=100)
    print("Training Done!")
Ejemplo n.º 4
0
def main(_):
    sess = tf.Session()
    dcgan_obj = dcgan.DCGAN(sess, FLAGS.checkpoint_dir, FLAGS.gen_sample_dim,
                            FLAGS.input_size, FLAGS.layer_ratio,
                            FLAGS.disc_base_dim, FLAGS.gen_base_dim,
                            FLAGS.learning_rate)
    dcgan_obj.build_network()
    dcgan_obj.init_model()
    dp = data_provider.DataProvider(FLAGS.data_dir, FLAGS.input_size)
    dp.load()
    for img in dp.iter():
        prob = dcgan_obj.predict([img])
        print prob
Ejemplo n.º 5
0
def create_model(model_name, latent_size, color=False, binary=False):
    if model_name == 'ae':
        ae = auto_encoder.AE((28, 28, 3 if color else 1),
                             latent_size,
                             variational=False,
                             binary=binary)
        return ae, ae.encoder, ae.decoder
    elif model_name == 'vae':
        vae = auto_encoder.AE((28, 28, 3 if color else 1),
                              latent_size,
                              variational=True)
        return vae, vae.encoder, vae.decoder
    elif model_name == 'dcgan':
        gan = dcgan.DCGAN(color)
        return gan, gan.discriminator, gan.generator
Ejemplo n.º 6
0
def main(_):
    sess = tf.Session()
    dcgan_obj = dcgan.DCGAN(sess, FLAGS.checkpoint_dir, FLAGS.gen_sample_dim,
                            FLAGS.input_size, FLAGS.layer_ratio,
                            FLAGS.disc_base_dim, FLAGS.gen_base_dim,
                            FLAGS.learning_rate)
    dcgan_obj.build_network()
    dcgan_obj.init_model()
    dp = data_provider.DataProvider(FLAGS.data_dir, FLAGS.input_size)
    dp.load()
    try:
        for batch in dp.batchs(10000, FLAGS.batch_size):
            dcgan_obj.train(batch, FLAGS.sample_size)
    except:
        pass
    dcgan_obj.save_model()
Ejemplo n.º 7
0
import bigan
import dcgan
import wgan
import gan

EPO = 20000
BAT_SIZE = 64
INTERVAL = 19999



if __name__ == '__main__':

    dcgan = dcgan.DCGAN()
    #dcgan.train(epochs=EPO, batch_size=BAT_SIZE, save_interval=INTERVAL)
    del dcgan

    bigan = bigan.BIGAN()
    #bigan.train(epochs=EPO, batch_size=BAT_SIZE, sample_interval=INTERVAL)
    del bigan

    wgan = wgan.WGAN()
    #wgan.train(epochs=EPO, batch_size=BAT_SIZE, sample_interval=INTERVAL)
    del wgan

    gan = gan.GAN()
    del gan
Ejemplo n.º 8
0
    k_tmp_vocab = cPickle.load(open(os.path.join(args.prepro_dir, "k_tmp_vocab_ids.dat"), 'rb'))
    vocab_processor = Vocab_Operator.restore(args.vocab)        

else:
    img_feat, tags_idx, a_tags_idx, vocab_processor, k_tmp_vocab = data_utils.load_train_data(args.train_dir,
    args.tag_path, args.prepro_dir, args.vocab)        


img_feat = np.array(img_feat, dtype='float32')/127.5 - 1.
test_tags_idx = data_utils.load_test(args.test_path, vocab_processor, k_tmp_vocab)

print("Image feature shape: {}".format(img_feat.shape))
print("Tags index shape: {}".format(tags_idx.shape))
print("Attribute Tags index shape: {}".format(a_tags_idx.shape))
print("Test Tags index shape: {}".format(test_tags_idx.shape))

data = Data(img_feat, tags_idx, a_tags_idx, test_tags_idx, args.z_dim, vocab_processor)



dcgan = dcgan.DCGAN(model_options, training_options, data, args.mode, args.resume, args.model_dir)

input_tensors, variables, loss, outputs, checks = dcgan.build_model()

if args.mode == 0: 
    dcgan.train(input_tensors, variables, loss, outputs, checks)
else:
    dcgan.test()


    
Ejemplo n.º 9
0
def main():
    start_time = time.time()  # clocking start

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as s:
        # DCGAN model
        model = dcgan.DCGAN(s, batch_size=paras['batch_size'])

        # load model & graph & weight
        ckpt = tf.train.get_checkpoint_state('./model/')
        if ckpt and ckpt.model_checkpoint_path:
            # Restores from checkpoint
            model.saver.restore(s, ckpt.model_checkpoint_path)

            global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
            print("[+] global step : %s" % global_step, " successfully loaded")
        else:
            global_step = 0
            print('[-] No checkpoint file found')

        # initializing variables
        tf.global_variables_initializer().run()

        # training, test data set
        dataset = DataSet(input_height=32,
                          input_width=32,
                          input_channel=3, dataset_name='cifar-100')  # Dataset(dirs['cifar-100'])
        dataset_iter = DataIterator(dataset.train_images, dataset.train_labels, paras['batch_size'])

        sample_images = dataset.valid_images[:model.sample_num].astype(np.float32) / 255.0
        sample_z = np.random.uniform(-1., 1., size=(model.sample_num, model.z_dim))  # noise

        # export real image
        valid_image_height = model.sample_size
        valid_image_width = model.sample_size
        sample_dir = dirs['sample_output'] + 'valid.png'

        # Generated image save
        iu.save_images(sample_images, size=[valid_image_height, valid_image_width], image_path=sample_dir)

        d_overpowered = False  # G loss > D loss * 2

        step = int(global_step)
        cont = int(step / 750)
        for epoch in range(cont, cont + paras['epoch']):
            for batch_images, _ in dataset_iter.iterate():
                batch_images = batch_images.astype(np.float32) / 255.0
                batch_z = np.random.uniform(-1.0, 1.0, [paras['batch_size'], model.z_dim]).astype(np.float32)  # noise

                # update D network
                if not d_overpowered:
                    s.run(model.d_op, feed_dict={model.x: batch_images, model.z: batch_z})

                # update G network
                s.run(model.g_op, feed_dict={model.z: batch_z})

                if step % paras['logging_interval'] == 0:
                    batch_images = dataset.valid_images[:paras['batch_size']].astype(np.float32) / 255.0
                    batch_z = np.random.uniform(-1.0, 1.0, [paras['batch_size'], model.z_dim]).astype(np.float32)

                    d_loss, g_loss, summary = s.run([
                        model.d_loss,
                        model.g_loss,
                        model.merged
                    ], feed_dict={
                        model.x: batch_images,
                        model.z: batch_z
                    })

                    # print loss
                    print("[+] Epoch %03d Step %05d => " % (epoch, step),
                          "D loss : {:.8f}".format(d_loss), " G loss : {:.8f}".format(g_loss))

                    # update overpowered
                    d_overpowered = d_loss < g_loss / 2

                    # training G model with sample image and noise
                    samples = s.run(model.G, feed_dict={
                        model.x: sample_images,
                        model.z: sample_z
                    })

                    # summary saver
                    model.writer.add_summary(summary, step)

                    # export image generated by model G
                    sample_image_height = model.sample_size
                    sample_image_width = model.sample_size
                    sample_dir = dirs['sample_output'] + 'train_{0}_{1}.png'.format(epoch, step)

                    # Generated image save
                    iu.save_images(samples, size=[sample_image_height, sample_image_width], image_path=sample_dir)

                    # model save
                    model.saver.save(s, dirs['model'], global_step=step)

                step += 1

        end_time = time.time() - start_time

        # elapsed time
        print("[+] Elapsed time {:.8f}s".format(end_time))

        # close tf.Session
        s.close()
Ejemplo n.º 10
0
def run(args):
    """ load mnist data """
    #X_train, X_test, X_test_original, Y_test = load.load_mnist_data()
    """ load image data """
    X_train, test_img = load.load_image_data(args.datapath, args.testpath,
                                             args.imgsize, args.mode)
    """ load csv data """
    #X_train, Y_test, X_test_original, Y_test = load.load_csv_data(args.datapath, args.imgsize)
    """ init DCGAN """
    print("initialize DCGAN ")
    DCGAN = dcgan.DCGAN(args)
    """ train DCGAN(generator & discriminator) """
    if args.mode == 'train':
        print('============ train on DCGAN ============')
        DCGAN.train(X_train)

    print("trained")
    """ test generator """
    gen_img = DCGAN.generate(25)
    img = DCGAN.plot_generate_images(gen_img)
    img = (img * 127.5) + 127.5
    img = img.astype(np.uint8)
    img = cv2.resize(img, None, fx=4, fy=4, interpolation=cv2.INTER_NEAREST)
    """ openCV view """
    #cv2.namedWindow('generated', 0)
    #cv2.resizeWindow('generated', 256, 256)
    #cv2.imshow('generated', img)
    #cv2.imwrite('generator.png', img)
    #cv2.waitKey()
    """ plt view """
    plt.figure(num=0, figsize=(4, 4))
    plt.title('trained generator')
    plt.imshow(img, cmap=plt.cm.gray)
    plt.show()
    """ other class anomaly detection """
    # compute anomaly score - sample from test set
    #test_img = X_test_original[Y_test==1][30]
    # compute anomaly score - sample from strange image
    #test_img = X_test_original[Y_test==0][30]

    # compute anomaly score - sample from strange image
    #img_idx = args.img_idx
    #label_idx = args.label_idx
    #test_img = X_test_original[Y_test==label_idx][img_idx]
    #test_img = np.random.uniform(-1, 1 (args.imgsize, args.imgsize, args.channels))

    start = cv2.getTickCount()
    score, qurey, pred, diff = anomaly_detection(test_img, args)
    time = (cv2.getTickCount() - start) / cv2.getTickFrequency() * 1000
    #print ('%d label, %d : done ' %(label_idx, img_idx), '%.2f' %score, '%.2fms'%time)
    """ matplot view """
    plt.figure(1, figsize=(3, 3))
    plt.title('query image')
    plt.imshow(qurey.reshape(args.imgsize, args.imgsize), cmap=plt.cm.gray)
    plt.savefig('query_image.png')

    print('anomaly score :', score)
    plt.figure(2, figsize=(3, 3))
    plt.title('generated similar image')
    plt.imshow(pred.reshape(args.imgsize, args.imgsize), cmap=plt.cm.gray)
    plt.savefig('generated_similar.png')

    plt.figure(3, figsize=(3, 3))
    plt.title('anomaly detection')
    plt.imshow(cv2.cvtColor(diff, cv2.COLOR_BGR2RGB))
    plt.savefig('diff.png')
    plt.show()
Ejemplo n.º 11
0
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.

# Train Module
# Module to train a DCGAN model on image data.

import dcgan
import utility

input_arguments = utility.parse_input_arguments(module="train")
images = utility.load_images(input_arguments.image_path)
images = utility.preprocess(images)
utility.shuffle(images)

dcgan = dcgan.DCGAN(images.shape[1], images.shape[2], images.shape[3])
dcgan.summary()

dcgan.train(images,
            epochs=input_arguments.epochs,
            batch_size=input_arguments.batch_size,
            saving_frequency=input_arguments.saving_frequency,
            output_path=input_arguments.output_path)