Esempio n. 1
0
def main(args):
  mnist = utils.read_data_sets(args.train_dir)
  config_proto = utils.get_config_proto()

  with tf.device('/gpu:0'):
    if not os.path.exists("../saves"):
      os.mkdir("../saves")
    sess = tf.Session(config=config_proto)
    model = gan.GAN(args, sess)
    total_batch = mnist.train.num_examples // args.batch_size

    for epoch in range(1, args.nb_epochs + 1):
      for i in range(1, total_batch + 1):
        global_step = sess.run(model.global_step)
        x_batch, _ = mnist.train.next_batch(args.batch_size)
        noise = np.random.normal(size=[args.batch_size, args.noise_dim])

        D_loss = model.d_batch_fit(x_batch, noise)
        G_loss = model.g_batch_fit(noise)

        if i % args.log_period == 0:
          print "Epoch: ", '%02d' % epoch, "Batch: ", '%04d' % i, "D_loss: ", '%9.9f' % D_loss, "G_loss: ", '%9.9f' % G_loss

      if epoch % 50 == 0:
        print "- " * 50

      if epoch % args.save_period  == 0:
        if not os.path.exists("../saves/imgs"):
          os.mkdir("../saves/imgs")
        z = np.random.normal(size=[100, args.noise_dim])
        gen_images = np.reshape(model.generate(z), (100, 28, 28, 1))
        utils.save_images(gen_images, [10, 10], os.path.join(args.save_dir, "imgs/sample%s.jpg" % epoch))
Esempio n. 2
0
def test(path, mode, **kwargs):
    if mode == "gan":
        samplefiles = utils.parse_file([kwargs["samples"]], ext="h5")
        trainfiles = utils.parse_file(path, ext="pth")
        agent = gan.GAN(
            lr=PARAMS["GAN"]["lr"],
            x_size=PARAMS["GAN"]["x_size"],
            u_size=PARAMS["GAN"]["u_size"],
            z_size=PARAMS["GAN"]["z_size"],
        )

        for trainfile in trainfiles:
            agent.load(trainfile)
            agent.eval()

            logger = logging.Logger(path="data/tmp.h5", max_len=500)

            dataloader = gan.get_dataloader(samplefiles, shuffle=False)
            for i, (state, action) in enumerate(tqdm.tqdm(dataloader)):
                fake_action = agent.get_action(state)
                state, action = map(torch.squeeze, (state, action))
                fake_action = fake_action.ravel()
                logger.record(state=state,
                              action=action,
                              fake_action=fake_action)

            logger.close()

            print(f"Test data is saved in {logger.path}")
Esempio n. 3
0
    def make_gan_model_separating_disc_gene(self):
        # make model
        self.gan = gan.GAN(latent_dim=self.LATENT_DIM,
                           data_dim=self.real_datas.shape[1])
        #self.gan.make_model(gene_hidden_neurons=[32, 16, 16], disc_hidden_neurons=[32, 16, 16])
        self.gan.make_model(gene_hidden_neurons=[32, 16, 16],
                            disc_hidden_neurons=[248, 124, 16])

        # train disc model
        for iep in range(self.TRAIN_EPOCH):
            self.gan.train_step_only_disc_with_random_noise(self.real_datas,
                                                            batch_size=32,
                                                            now_epoch=iep)

        # train gene model
        fig = plt.figure()
        ims = []
        ims.append(
            [self.__plot_gene_data(self.gan, data_num=3000, show=False)])
        # training epoch roop
        for iep in range(self.TRAIN_EPOCH):
            self.gan.train_step_only_gene(self.real_datas,
                                          batch_size=32,
                                          now_epoch=iep)

            # images for animation
            ims.append(
                [self.__plot_gene_data(self.gan, data_num=3000, show=False)])

        # graph of real and generated data
        ani = animation.ArtistAnimation(fig, ims, interval=100)
        ani.save('generated_point.gif', writer='pillow')
        plt.show()

        return
Esempio n. 4
0
def train_gans(lst_saved_models,
               dataloaders,
               trial,
               alpha,
               num_gans=1,
               num_epochs=2000,
               printProgress=True,
               updateEvery=50):
    """
    lst_saved_models: List of Tuples(ID, num_epoch)
    where ID is {trial}.{numGAN} and num_epoch is the epoch of the model that you want to restore

    NAMING CONVENTION: {trial}.{number that GAN is supposed to work on}
    """
    gans = []
    for i in range(num_gans):
        name = '{}.{}'.format(trial, i)
        gans.append(
            gan.GAN(trial,
                    name,
                    discriminator_steps=1,
                    generator_steps=2,
                    disc_input_dim=IMG_SIZE,
                    gen_input_dim=1,
                    lr_disc=.00075,
                    lr_gen=.00015,
                    label_smooth=True,
                    alpha=alpha))

    for i in range(num_gans):
        epoch = 0
        if lst_saved_models[i] is not None:
            print("--------Loading GAN", i,
                  " from a previously saved model--------")
            (ID, epoch) = lst_saved_models[i]
            utils.load_model(gans[i], trial, ID, epoch)
            assert (gans[i] is not None), "Model didn't exist!"

        if epoch < num_epochs - 1:
            print("--------Training GAN", i, "--------")
            gans[i].train(dataloaders[i],
                          num_epochs,
                          start_epoch=epoch,
                          printProgress=printProgress,
                          updateEvery=updateEvery)
        else:
            print("GAN", i, " was already fully trained to ", epoch,
                  " epochs.")

    return gans
Esempio n. 5
0
    def __init__(self, context: PyTorchTrialContext) -> None:
        data_dir = f'/tmp/data-rank{context.distributed.get_rank()}'
        self.dm = gan.MNISTDataModule(context.get_data_config()['url'], data_dir,
                                      batch_size=context.get_per_slot_batch_size())
        channels, width, height = self.dm.size()
        lm = gan.GAN(channels, width, height,
                    batch_size=context.get_per_slot_batch_size(),
                    lr=context.get_hparam('lr'),
                    b1=context.get_hparam('b1'),
                    b2=context.get_hparam('b2'),
        )

        super().__init__(context, lightning_module=lm)
        self.dm.prepare_data()
Esempio n. 6
0
    def train_ae(self, input_data, test_data, sc_train_data, sc_test_data):
        i_shape = input_data.shape[1]
        s_time = time.time()

        ## Train Autoencoder
        #ae = autoencoder.SCAutoEncoder(input_dim=i_shape).train_model(input_data, test_data, sc_train_data, sc_test_data)

        ## Train GAN
        gen_net = gan.GAN(input_dim=i_shape).trainGAN(input_data, test_data,
                                                      sc_train_data,
                                                      sc_test_data)

        e_time = time.time()
        print("Training and prediction finished in {} seconds".format(
            int(e_time - s_time)))
Esempio n. 7
0
def main():
    gans = [gan.GAN(None, None, alpha = 4) for _ in range(10)]
    trial = 5
    for i in range(len(gans)):
        ID = '{}.{}'.format(trial, i)
        utils.load_model(gans[i], trial, ID, 2000)
    filename = "./classifier_results/trial{}/accuracies".format(trial)
    utils.make_folder(filename)
    fin = open(filename, "a")

    for num_real in real_data_to_test:
        for num_synth in synth_data_to_test:
            if num_real == 0 and num_synth == 0:
                continue
            train_and_test(gans, trial, num_real, num_synth, fin)

    fin.close()
Esempio n. 8
0
def test(obj, **kwargs):
    agent = gan.GAN(lr=1e-3, x_size=1, u_size=1, z_size=30)

    for weightfile in tqdm.tqdm(sorted(kwargs["weightfiles"])):
        tqdm.tqdm.write(f"Using {weightfile} ...")

        agent.load(weightfile)
        agent.eval()

        gandir = os.path.dirname(weightfile)
        testpath = os.path.join(
            obj.test_dir,
            os.path.splitext(os.path.relpath(weightfile, obj.gan_dir))[0] +
            ".h5")
        samplefiles = torch.load(os.path.join(gandir, "sample_path.h5"))

        if isinstance(samplefiles, str):
            samplefiles = [samplefiles]

        data_all = [logging.load(name) for name in samplefiles]
        real_x, real_u, mask = [
            np.vstack([data[k] for data in data_all])
            for k in ("state", "action", "mask")
        ]
        data_gen = DataGen(mode="even")
        fake_x = np.zeros_like(real_x)
        fake_u = np.zeros_like(real_u)
        for i in range(fake_x.size):
            fake_x[i] = data_gen()[0]
        fake_u = agent.get_action(fake_x)

        logging.save(
            testpath,
            dict(real_x=real_x,
                 real_u=real_u,
                 mask=mask,
                 fake_x=fake_x,
                 fake_u=fake_u))
        tqdm.tqdm.write(f"Test data is saved in {testpath}.")

        _plot(obj, testpath)
        plt.show()
Esempio n. 9
0
    def __init__(self,
                 modelfilename,
                 conf,
                 model_name='dcgan',
                 gen_input='z:0',
                 gen_output='gen_/tanh:0',
                 gen_loss='reduced_mean:0',
                 z_dim=100,
                 batch_size=64):

        self.conf = conf
        print(config)
        print("lala")
        self.args = train.configure()
        print("aha")

        self.batch_size = batch_size
        self.z_dim = z_dim

        # self.gi = self.graph.get_tensor_by_name(model_name+'/'+gen_input)
        # self.go = self.graph.get_tensor_by_name(model_name+'/'+gen_output)
        # self.gl = self.graph.get_tensor_by_name(model_name+'/'+gen_loss)

        self.gcgan = gan.GAN(args)

        self.gi = gcgan.z
        self.go = gcgan.gen_output
        self.gl = gcgan.G_loss

        self.image_shape = self.go.shape[1:].as_list()

        self.lamb = config.lambda_p

        self.sess = tf.Session(graph=self.graph)

        self.z = np.random.randn(self.batch_size, self.z_dim)
Esempio n. 10
0
def main(argv):

    genre = None
    if len(argv) == 1:
        genre = argv[0]

    groove_df = pd.read_csv(DATA_PATH)

    #Remove rows that are only drum fills
    groove_df = groove_df[groove_df.beat_type != 'fill']
    short = groove_df[groove_df.duration <= 30]

    #groove_df = groove_df[groove_df.audio_filename.isna() == False]

    single_styles = groove_df[groove_df['duration'] <= 20]

    styles = groove_df['style'].value_counts()

    #Add the multi-style tracks to their first substyle.
    logging.info('Parsing multi-styled genres and re-distributing')
    for s in styles.index:
        if '/' in s:
            style_a, style_b = s.split('/')
            if style_a in styles.index:
                split_style = groove_df.query('style=="' + s + '"')
                #test_df = test_df.replace({'style':{s:style_a}})
                groove_df = groove_df.replace({'style': {s: style_a}})
            if style_b in styles.index:
                split_style = groove_df.query('style=="' + s + '"')
                #test_df = test_df.replace({'style':{s:style_b}})
                groove_df = groove_df.replace({'style': {s: style_b}})

    #Find number of notes per style
    styles = groove_df['style'].value_counts()
    style_durations = pd.DataFrame(columns=['style', 'max', 'min', 'sum'])
    for s in styles.index:
        style_df = groove_df.query('style=="' + s + '"')
        style_durations = style_durations.append(
            {
                'style': s,
                'max': style_df['duration'].max(),
                'min': style_df['duration'].min(),
                'sum': int(style_df['duration'].sum())
            },
            ignore_index=True)

    #Removed styles that do not have significant duration size
    styles_removed = style_durations[style_durations['sum'] < 100]

    #Generates MIDI list for each genre
    genre_midi_list = {}
    for s in styles.index:
        style_df = groove_df.query('style=="' + s + '"')
        genre_midi_list[s] = style_df.midi_filename.tolist()

    #Determine if the model is on all of the styles or one specified
    if genre is None:
        logging.info('Saving full note list')
        r_list = groove_df.midi_filename.tolist()
    #Check that specified genre is valid
    elif genre not in styles.index:
        logging.debug('ERROR: Genre is not valid')
        return 1
    else:
        logging.info('Saving note list for genre: %s', genre)
        r_list = genre_midi_list[genre]

    #Generate GAN model and begin training
    logging.info('Begin training GAN model')
    gan = g.GAN(rows=100)
    gan.train(genre_dataset=r_list,
              genre=genre,
              epochs=1000,
              batch_size=32,
              sample_interval=1)
Esempio n. 11
0
def train_gan(input_genre):

    #Set genre to None if we are using all genres for training.
    if input_genre == 'any':
        genre = None
    else:
        genre = input_genre

    groove_df = pd.read_csv(DATA_PATH)

    #Remove rows that are only drum fills
    groove_df = groove_df[groove_df.beat_type != 'fill']
    short = groove_df[groove_df.duration <= 30]

    single_styles = groove_df[groove_df['duration'] <= 20]

    styles = groove_df['style'].value_counts()

    #Add the multi-style tracks to their first substyle.
    for s in styles.index:
        if '/' in s:
            style_a, style_b = s.split('/')
            if style_a in styles.index:
                split_style = groove_df.query('style=="' + s + '"')
                groove_df = groove_df.replace({'style': {s: style_a}})
            if style_b in styles.index:
                split_style = groove_df.query('style=="' + s + '"')
                groove_df = groove_df.replace({'style': {s: style_b}})

    #Find number of notes per style
    styles = groove_df['style'].value_counts()
    style_durations = pd.DataFrame(columns=['style', 'max', 'min', 'sum'])
    for s in styles.index:
        style_df = groove_df.query('style=="' + s + '"')
        style_durations = style_durations.append(
            {
                'style': s,
                'max': style_df['duration'].max(),
                'min': style_df['duration'].min(),
                'sum': int(style_df['duration'].sum())
            },
            ignore_index=True)

    #Removed styles that do not have significant duration size
    styles_removed = style_durations[style_durations['sum'] < 100]

    #Generates MIDI list for each genre
    genre_midi_list = {}
    for s in styles.index:
        style_df = groove_df.query('style=="' + s + '"')
        genre_midi_list[s] = style_df.midi_filename.tolist()

    #Determine if the model is on all of the styles or one specified
    if genre is None:
        logging.info('Saving full note list')
        r_list = groove_df.midi_filename.tolist()
    else:
        logging.info('Saving note list for genre: %s', genre)
        r_list = genre_midi_list[genre]

    #Generate GAN model and begin training
    logging.info('Begin training GAN model')
    gan = g.GAN(rows=100)
    gan.train(genre_dataset=r_list,
              genre=genre,
              epochs=1000,
              batch_size=32,
              sample_interval=1)
def train(dataset_path,
          batch_size,
          model_root,
          d_learning_rate,
          g_learning_rate,
          iteration,
          image_size,
          dim,
          istrain=True):
    print("start training..")

    #read data
    data_cele = readfile.CelebA(dataset_path)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    session = tf.Session(config=config)  #create a session

    #create an GAN object
    gan_model = gan.GAN(image_size, batch_size, dim, istrain)

    #build a graph and get the loss
    z, d, D_loss, G_loss = gan_model.build_model()

    #optimizer
    theta_D = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='dis_')
    d_optimizer = tf.train.AdamOptimizer(
        learning_rate=d_learning_rate).minimize(D_loss, var_list=theta_D)

    theta_G = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='gen_')
    g_optimizer = tf.train.AdamOptimizer(
        learning_rate=g_learning_rate).minimize(G_loss, var_list=theta_G)

    #initialize
    saver = tf.train.Saver()
    session.run(tf.global_variables_initializer())

    # summary
    summary_op = tf.summary.merge_all()
    writer = tf.summary.FileWriter(os.path.join(model_root, "logs/"),
                                   session.graph)

    for i in range(iteration):

        _, D_loss_curr, D_summary = session.run(
            [d_optimizer, D_loss, summary_op],
            feed_dict={
                z: sample_z(batch_size, dim),
                d: data_cele.train_next_batch(batch_size, image_size)
            })
        _, G_loss_curr = session.run(
            [g_optimizer, G_loss],
            feed_dict={
                z: sample_z(batch_size, dim),
                d: data_cele.train_next_batch(batch_size, image_size)
            })
        _, G_loss_curr = session.run(
            [g_optimizer, G_loss],
            feed_dict={
                z: sample_z(batch_size, dim),
                d: data_cele.train_next_batch(batch_size, image_size)
            })
        print("iter: ", i, " finished, D loss: ", D_loss_curr, ", G loss: ",
              G_loss_curr)
        writer.add_summary(D_summary, i)
        if i % 100 == 0:
            save_tf_model(session, model_root, saver, i, pb=True)

    print("Training finished!")
Esempio n. 13
0
import tensorflow as tf
import numpy as np
import os
import gan
from ImageOperation.images2one import *
from scipy import misc

save_path = 'output/checkpoint.ckpt'
batch_size = 128
hidden_size = 128
learning_rate = 1e-2
outter_epoch = 10000
inner_epoch = 10

# 模型
model = gan.GAN(hidden_size, batch_size, learning_rate)

# 生成图片
saver = tf.train.Saver()
saver.restore(model.sess, save_path)
sampled_images = model.sess.run(
    model.sampled_tensor,
    feed_dict={model.input_tensor: np.zeros([batch_size, 28 * 28])})
sampled_images = sampled_images.reshape(-1, 28, 28)

# 保存图片
img = images2one(sampled_images[:100])

if not os.path.exists('images'):
    os.makedirs('images')
misc.imsave('images/generate.png', img)
Esempio n. 14
0
def train(obj, samples, **kwargs):
    for sample in samples:
        basedir = os.path.relpath(sample, obj.sample_dir)

        if os.path.isdir(sample):
            samplefiles = sorted(glob.glob(os.path.join(sample, "*.h5")))
        elif os.path.isfile(sample):
            samplefiles = sample
            basedir = os.path.splitext(basedir)[0]
        else:
            raise ValueError("unknown sample type.")

        gandir = os.path.join(obj.gan_dir, basedir)

        torch.save(
            samplefiles,
            os.path.join(gandir, "sample_path.h5"),
        )

        if kwargs["continue"] is None and os.path.exists(gandir):
            shutil.rmtree(gandir)
        os.makedirs(gandir, exist_ok=True)

        print(f"Train GAN for sample ({sample}) ...")

        save_interval = int(kwargs["save_interval"])

        agent = gan.GAN(
            lr=kwargs["lr"],
            x_size=1,
            u_size=1,
            z_size=kwargs["z_size"],
            use_cuda=kwargs["use_cuda"],
        )

        prog = functools.partial(
            _gan_prog,
            agent=agent,
            files=samplefiles,
            batch_size=kwargs["batch_size"],
        )

        histpath = os.path.join(gandir, "train_history.h5")

        if kwargs["continue"] is not None:
            epoch_start = agent.load(kwargs["continue"])
            logger = logging.Logger(path=histpath,
                                    max_len=kwargs["save_interval"],
                                    mode="r+")
        else:
            epoch_start = 0
            logger = logging.Logger(path=histpath,
                                    max_len=kwargs["save_interval"])

        t0 = time.time()
        for epoch in tqdm.trange(epoch_start,
                                 epoch_start + 1 + kwargs["max_epoch"]):
            loss_d, loss_g = prog(epoch)

            logger.record(epoch=epoch, loss_d=loss_d, loss_g=loss_g)

            if (epoch % save_interval == 0
                    or epoch == epoch_start + 1 + kwargs["max_epoch"]):
                savepath = os.path.join(gandir, f"trained_{epoch:05d}.pth")
                agent.save(epoch, savepath)
                tqdm.tqdm.write(f"Weights are saved in {savepath}.")

        logger.close()

        print(f"Elapsed time: {time.time() - t0:5.2f} sec")
Esempio n. 15
0
basedir = 'SNGAN_nsnfixed'
basedir = 'WGAN_MNIST'
basedir = 'CIFAR'

for i in checkpoints:  #range(6):
    netG.load_state_dict(
        torch.load('{}/gen_{}.pth'.format(basedir, i),
                   map_location=lambda storage, loc: storage))
    netG.eval()

    opt = gan.Options()
    opt.cuda = False
    opt.nz = (128, 1, 1)
    opt.batch_size = 64

    gan1 = gan.GAN(netG, None, None, None, opt)

    iterator_fake = gan1.fake_data_generator(opt.batch_size, opt.nz, None)

    a = next(iterator_fake).data.cpu()

    b = torch.from_numpy(np.zeros((opt.batch_size, 3, 48, 80)) - 1)
    # b.zero_()
    # b[:,:2,:,:] = a
    b = a
    b = b / 2.0 + 0.5
    save_image(b, '{}/sample_fake{}.jpeg'.format(basedir, i))
    print(b.min(), b.max())

# opt = gan.Options()
Esempio n. 16
0
    # fake = next(data_iter)
    # print(fake.min(), fake.max())

    # fake = fake.view(-1, 3, 32, 32)

    fake_01 = (fake.cpu() + 1.0) * 0.5
    # print(fake_01.min(), fake_01.max())

    save_image(fake_01, opt.path + 'tmp/' + '{:0>5}.png'.format(i_iter), nrow=41)
    gan.netG.train()


def callback(gan, i_iter):

    if i_iter % 50 == 0:
        save_samples(gan, i_iter)

    if i_iter % 50 == 0:
        log.save()


gan1 = gan.GAN(netG=netG, netD=netD, optimizerD=optimizerD, optimizerG=optimizerG, opt=opt)

gan1.train(data_iter, opt, logger=log, callback=callback)

torch.save(netG.state_dict(), opt.path + 'gen.pth')
torch.save(netD.state_dict(), opt.path + 'disc.pth')

log.close()

Esempio n. 17
0
#!/usr/bin/env python3.6

import sys

import matplotlib as mpl
mpl.use('Agg')

import gan
import keras
from keras_contrib.layers import InstanceNormalization
import time
import tensorflow as tf

g = gan.GAN(gray=False, size=128, dis_depth=64, gen_depth=64)

g.generator(silent=True)
g.discriminator(silent=True)

if len(sys.argv) > 1:
    c = 3 if len(sys.argv) <= 2 else int(sys.argv[2])
    if c & 1:
        g.G = keras.models.load_model(f"../model_{sys.argv[1]}_G.h5")
        g.G.summary()
        print("GENERATOR LOADED")
        time.sleep(1)
    else:
        g.generator(silent=True)

    if c & 2:
        g.D = keras.models.load_model(f"../model_{sys.argv[1]}_D.h5")
        g.D.summary()
Esempio n. 18
0
def main():
    start_time = time.time()  # clocking start

    # mnist data loading
    mnist = DataSet(dataset_name='mnist')
    mnist = mnist.mnist

    # GPU configure
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as s:
        # GAN Model
        model = gan.GAN(s)

        # initializing
        s.run(tf.global_variables_initializer())

        sample_x, _ = mnist.train.next_batch(model.sample_num)
        sample_z = np.random.uniform(
            -1., 1., [model.sample_num, model.z_dim]).astype(np.float32)

        # original sample image
        #  original_image_height = model.sample_size
        #  original_image_width = model.sample_size
        #  original_dir = dirs['sample_output'] + 'original.png'

        # original image save
        #  original_x = sample_x.reshape([-1, model.input_height, model.input_width, model.channel])
        #  iu.save_images(original_x, size=[original_image_height, original_image_width],
        #                 image_path=original_dir)

        d_overpowered = False
        for step in range(paras['global_step']):
            batch_x, _ = mnist.train.next_batch(model.batch_size)
            batch_x = batch_x.reshape(-1, model.n_input)

            # generate z
            batch_z = np.random.uniform(-1.,
                                        1.,
                                        size=[model.batch_size, model.z_dim
                                              ]).astype(np.float32)  # 64 x 128

            # update D network
            if not d_overpowered:
                _, d_loss = s.run([model.d_op, model.d_loss],
                                  feed_dict={
                                      model.x: batch_x,
                                      model.z: batch_z
                                  })

            # update G network
            _, g_loss = s.run([model.g_op, model.g_loss],
                              feed_dict={
                                  model.x: batch_x,
                                  model.z: batch_z
                              })

            if step % paras['logging_interval'] == 0:
                batch_x, _ = mnist.test.next_batch(model.batch_size)
                batch_z = np.random.uniform(
                    -1., 1.,
                    [model.batch_size, model.z_dim]).astype(np.float32)

                d_loss, g_loss, summary = s.run(
                    [model.d_loss, model.g_loss, model.merged],
                    feed_dict={
                        model.x: batch_x,
                        model.z: batch_z
                    })

                # print loss
                print("[+] Step %08d => " % (step),
                      "D loss : {:.8f}".format(d_loss),
                      " G loss : {:.8f}".format(g_loss))

                # update overpowered
                d_overpowered = d_loss < g_loss / 2

                # training G model with sample image and noise
                samples = s.run(model.G,
                                feed_dict={
                                    model.x: sample_x,
                                    model.z: sample_z
                                })

                # summary saver
                model.writer.add_summary(summary, step)

                # export image generated by model G
                sample_image_height = model.sample_size
                sample_image_width = model.sample_size
                sample_dir = dirs['sample_output'] + 'train_{:08d}.png'.format(
                    step)

                # Generated image save
                iu.save_images(samples,
                               size=[sample_image_height, sample_image_width],
                               image_path=sample_dir)

                # model save
                model.saver.save(s, dirs['model'], global_step=step)

    end_time = time.time() - start_time

    # elapsed time
    print("[+] Elapsed time {:.8f}s".format(end_time))

    # close tf.Session
    s.close()
Esempio n. 19
0
def train(sample, mode, **kwargs):
    samplefiles = utils.parse_file(sample, ext="h5")

    if mode == "gan" or mode == "all":
        torch.manual_seed(0)
        np.random.seed(0)

        gandir = kwargs["gan_dir"]
        histpath = os.path.join(gandir, "train-history.h5")

        print("Train GAN ...")

        agent = gan.GAN(
            lr=kwargs["gan_lr"],
            x_size=PARAMS["GAN"]["x_size"],
            u_size=PARAMS["GAN"]["u_size"],
            z_size=PARAMS["GAN"]["z_size"],
            use_cuda=kwargs["use_cuda"],
        )

        if kwargs["continue"] is not None:
            epoch_start = agent.load(kwargs["continue"])
            logger = logging.Logger(path=histpath,
                                    max_len=kwargs["save_interval"],
                                    mode="r+")
        else:
            epoch_start = 0
            logger = logging.Logger(path=histpath,
                                    max_len=kwargs["save_interval"])

        t0 = time.time()
        for epoch in tqdm.trange(epoch_start,
                                 epoch_start + 1 + kwargs["max_epoch"]):
            dataloader = gan.get_dataloader(samplefiles,
                                            shuffle=True,
                                            batch_size=kwargs["batch_size"])

            loss_d = loss_g = 0
            for i, data in enumerate(tqdm.tqdm(dataloader)):
                agent.set_input(data)
                agent.train()
                loss_d += agent.loss_d.mean().detach().numpy()
                loss_g += agent.loss_g.mean().detach().numpy()

            logger.record(epoch=epoch, loss_d=loss_d, loss_g=loss_g)

            if (epoch % kwargs["save_interval"] == 0
                    or epoch == epoch_start + 1 + kwargs["max_epoch"]):
                savepath = os.path.join(gandir, f"trained-{epoch:05d}.pth")
                agent.save(epoch, savepath)
                tqdm.tqdm.write(f"Weights are saved in {savepath}.")

        print(f"Elapsed time: {time.time() - t0:5.2f} sec")

    if mode == "copdac" or mode == "all":
        np.random.seed(1)

        env = envs.BaseEnv(initial_perturb=[0, 0, 0, 0.2])

        copdacdir = kwargs["copdac_dir"]

        agentname = "COPDAC"
        Agent = getattr(agents, agentname)
        agent = Agent(
            env,
            lrw=PARAMS["COPDAC"]["lrw"],
            lrv=PARAMS["COPDAC"]["lrv"],
            lrtheta=PARAMS["COPDAC"]["lrtheta"],
            w_init=PARAMS["COPDAC"]["w_init"],
            v_init=PARAMS["COPDAC"]["v_init"],
            theta_init=PARAMS["COPDAC"]["lrv"],
            maxlen=PARAMS["COPDAC"]["maxlen"],
            batch_size=PARAMS["COPDAC"]["batch_size"],
        )

        expname = "-".join([type(n).__name__ for n in (env, agent)])
        if kwargs["with_gan"]:
            expname += "-gan"
            agent.set_gan(kwargs["with_gan"], PARAMS["COPDAC"]["lrg"])

        if kwargs["with_reg"]:
            expname += "-reg"
            agent.set_reg(PARAMS["COPDAC"]["lrc"])

        histpath = os.path.join(copdacdir, expname + ".h5")
        if kwargs["continue"] is not None:
            epoch_start, i = agent.load(kwargs["continue"])
            logger = logging.Logger(path=histpath, max_len=100, mode="r+")
        else:
            epoch_start, i = 0, 0
            logger = logging.Logger(path=histpath, max_len=100)

        print(f"Training {agentname}...")

        epoch_end = epoch_start + kwargs["max_epoch"]
        for epoch in tqdm.trange(epoch_start, epoch_end):
            dataloader = gan.get_dataloader(samplefiles,
                                            keys=("state", "action", "reward",
                                                  "next_state"),
                                            shuffle=True,
                                            batch_size=64)

            for data in tqdm.tqdm(dataloader, desc=f"Epoch {epoch}"):
                agent.set_input(data)
                agent.train()

                if i % kwargs["save_interval"] == 0 or i == len(dataloader):
                    logger.record(epoch=epoch,
                                  i=i,
                                  w=agent.w,
                                  v=agent.v,
                                  theta=agent.theta,
                                  loss=agent.get_losses())

                i += 1

        logger.close()
Esempio n. 20
0
netG.load_state_dict(torch.load('multiGAN_CIFAR/gen_100000.pth'))


def save_samples(gan, i_iter):
    gan.netG.eval()

    if 'noise' not in save_samples.__dict__:
        save_samples.noise = Variable(gan.gen_latent_noise(64, opt.nz))

    if not os.path.exists(opt.path + 'tmp/'):
        os.makedirs(opt.path + 'tmp/')

    fake = gan.gen_fake_data(64, opt.nz, noise=save_samples.noise)
    # fake = next(data_iter)
    # print(fake.min(), fake.max())

    fake = fake.view(-1, 3, 32, 32)

    fake_01 = (fake.data.cpu() + 1.0) * 0.5
    # print(fake_01.min(), fake_01.max())

    save_image(fake_01,
               opt.path + 'tmp/' + '{:0>5}.jpeg'.format(i_iter),
               nrow=10)
    # alkjfd
    gan.netG.train()


gan1 = gan.GAN(netG=netG, netD=None, optimizerD=None, optimizerG=None, opt=opt)

save_samples(gan1, 'final')
Esempio n. 21
0
cf_latent_dim = 100
cf_num_models_load = 12000
cf_batch_size = 256
cf_gen_lr = 0.0001
cf_dis_lr = 0.0004

#%%
lg = logger.logger() #root_dir = '/home/starstorms/Insight/shape/runs/0131-2024')
lg.writeConfig(locals(), [gan])

#%%
# vox_in_dir = '/home/starstorms/Insight/ShapeNet/all/'
train_dataset = ut.loadData(cf_vox_size, cf_num_models_load, lg.vox_in_dir, cf_cat_prefixes, cf_batch_size)

#%% Make gen model and show initial
model = gan.GAN(cf_latent_dim, cf_vox_size, cf_gen_lr, cf_dis_lr)
model.printMSums()
model.printIO()
lg.setupCP(encoder=model.dis_model, generator=model.gen_model, gen_opt=model.gen_opt, dis_opt=model.dis_opt)
model.setLR(cf_gen_lr, cf_dis_lr)

#%%
def train(dataset, epochs):
  print('\n\nStarting training...\n\n')
  gen_loss_metric = tf.keras.metrics.Mean()
  dis_loss_metric = tf.keras.metrics.Mean()
  
  for epoch in range(1, epochs):
    start = time.time()

    for vox_batch, labels in dataset:
Esempio n. 22
0
def c2st(netG, netG_path, netD_0, gan_type, opt, real_dataset, selected=None, logger=None):

    netG.load_state_dict(torch.load(netG_path))
    netG.eval()

    gan1 = gan.GAN(netG, None, None, None, opt)
    opt = copy(opt)
    opt.conditional = False

    data = real_dataset

    if selected is not None:
        iterator_fake = gan1.fake_data_generator(opt.batch_size, opt.nz, None, selected=selected, drop_labels=True)
    else:
        iterator_fake = gan1.fake_data_generator(opt.batch_size, opt.nz, None)

    random_state = [23, 42, 180, 34, 194, 3424, 234, 23423, 221, 236]

    roc_list = []
    loss_list = []

    for attempt in range(N_ATTEMPTS):

        train_indices, test_indices = train_test_split(range(len(data)), test_size=0.1, random_state=random_state[attempt])

        netD = deepcopy(netD_0)
        # netD = mnistnet.Discriminator()
        netD.train()
        optimizerD = torch.optim.Adam(netD.parameters(), lr=2e-4, betas=(.5, .999))
        gan_t = gan_type(None, netD, optimizerD, None, opt)


        # for _ in range(N_EPOCHS):
        #     iterator_real = varIter(DataLoader(data, sampler=SubsetRandomSampler(train_indices), batch_size=opt.batch_size), opt)
        #     for i_iter in tqdm(range(int(len(train_indices) / opt.batch_size))):
        #         gan_t.train_D_one_step(iterator_real, iterator_fake)

        iterator_real = datasets.MyDataLoader().return_iterator(
            DataLoader(data, sampler=SubsetRandomSampler(train_indices),
                batch_size=opt.batch_size), is_cuda=opt.cuda,
            conditional=opt.conditional, n_classes=opt.n_classes)

        for i_iter in tqdm(range(N_ITER)):
            loss, _, _ = gan_t.train_D_one_step(iterator_real, iterator_fake)
            if logger is not None:
                logger.add('disc_loss{}'.format(attempt), loss, i_iter)


        gan_t.save(attempt)

        # iterator_real = varIter(DataLoader(data, sampler=SubsetRandomSampler(test_indices), batch_size=opt.batch_size), opt)

        iterator_real = datasets.MyDataLoader().return_iterator(
            DataLoader(data, sampler=SubsetRandomSampler(test_indices),
                batch_size=opt.batch_size), is_cuda=opt.cuda,
            conditional=opt.conditional, n_classes=opt.n_classes)

        err = 0

        loss = []
        y_true = []
        y_score = []

        for i in range(int(len(test_indices) / opt.batch_size)):
            batch_real = next(iterator_real)
            batch_fake = next(iterator_fake)

            if gan_type == wgan.WGANGP:
                y_true = y_true + [0] * batch_real.size()[0]
                y_true = y_true + [1] * batch_real.size()[0]
            else:
                y_true = y_true + [1] * batch_real.size()[0]
                y_true = y_true + [0] * batch_real.size()[0]


            y_score = y_score + list(gan_t.netD(batch_real).cpu().data.numpy())
            y_score = y_score + list(gan_t.netD(batch_fake).cpu().data.numpy())

            loss.append(float(gan_t.compute_disc_score(batch_real, batch_fake).data.cpu().numpy()))

        loss = np.mean(loss)
        roc = roc_auc_score(y_true, y_score)

        loss_list.append(loss)
        roc_list.append(roc)

    return loss_list, roc_list

# opt = gan.Options()
# opt.cuda = True
# opt.nz = (100,1,1)
# opt.batch_size = 50

# print(c2st(Generator(), 'wgan_test/gen_1000.pth', Discriminator(), wgan.WGANGP, opt))
Esempio n. 23
0
if opt.beta2 is None:
    opt.beta2 = gan_choice({ 'gan': 0.999, 'wgan': 0.9, 'wgan-gp': 0.9}, 'beta2')

if optimizer_name == 'adam':
    optimizerD = optim.Adam(netD.parameters(), lr=opt.lrD, betas=(opt.beta1, opt.beta2))
    #,weight_decay=opt.weight_decay)
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lrG, betas=(opt.beta1, opt.beta2))
    #,weight_decay=opt.weight_decay)
elif optimizer_name == 'rmsprop':
    optimizerD = optim.RMSprop(netD.parameters(), lr=opt.lrD)
    optimizerG = optim.RMSprop(netG.parameters(), lr=opt.lrG)
else:
    raise (RuntimeError("Do not recognize optimizer %s" % opt.optimizer))

# create the GAN class
gan_model = gan_choice({'gan': gan.GAN(netG, netD, optimizerD, optimizerG, opt),
                        'wgan': wgan.WGAN(netG, netD, optimizerD, optimizerG, opt),
                        'wgan-gp': wgan.WGANGP(netG, netD, optimizerD, optimizerG, opt)},
                       'GAN_algorithm')

# the main operation
if execution_mode == 'train':
    # train the model
    print ('Training begins......')
    gan_model.train(dataloader_train, opt)

elif execution_mode == 'reconstruction':
    reconstruction.run_experiment(gan_model.netG, dataloader_test, opt.dataroot, opt, optimize_red_first=False)
else:
    raise RuntimeError("Unknown mode: {0}".format(opt.mode))
Esempio n. 24
0
File: main.py Progetto: nicy1/hmm
from sklearn.metrics import classification_report

util = utils.Utils()

# Read and normalize dataset according the model
reader = readfile.read('telepathology1.csv')
trainX, trainY, testX, testY = reader.get_data()

# Define GAN parameters
epochs = len(trainX)
latent_dim = 1
feature_num = 1
batch_size = 1

# Build GAN
gan = gan.GAN(latent_dim, feature_num)
gan.load_data(trainX, trainY)
# Train GAN
start = time.time()
generator = gan.train(epochs, batch_size)
stop = time.time()
print("Training time: %.2f s" % (stop - start))

# Test modello GAN + HMM
y_pred = []
for i in range(len(testX)):
    temp_X = np.array(testX[i])
    temp_X = np.array(temp_X).reshape(batch_size, latent_dim, feature_num)
    predictions = generator.predict(temp_X)
    y_pred.append(predictions[0][0])
Esempio n. 25
0
import bigan
import dcgan
import wgan
import gan

EPO = 20000
BAT_SIZE = 64
INTERVAL = 19999



if __name__ == '__main__':

    dcgan = dcgan.DCGAN()
    #dcgan.train(epochs=EPO, batch_size=BAT_SIZE, save_interval=INTERVAL)
    del dcgan

    bigan = bigan.BIGAN()
    #bigan.train(epochs=EPO, batch_size=BAT_SIZE, sample_interval=INTERVAL)
    del bigan

    wgan = wgan.WGAN()
    #wgan.train(epochs=EPO, batch_size=BAT_SIZE, sample_interval=INTERVAL)
    del wgan

    gan = gan.GAN()
    del gan
Esempio n. 26
0
 def set_gan(self, path, lrg):
     self.gan = gan.GAN(x_size=4, u_size=4, z_size=100)
     self.gan.load(path)
     self.gan.eval()
     self.lrg = lrg
     self.is_gan = True
Esempio n. 27
0
    optimizerD = optim.Adam(netD.parameters(),
                            lr=opt.lrD,
                            betas=(opt.beta1, opt.beta2))
    optimizerG = optim.Adam(netG.parameters(),
                            lr=opt.lrG,
                            betas=(opt.beta1, opt.beta2))
elif optimizer_name == 'rmsprop':
    optimizerD = optim.RMSprop(netD.parameters(), lr=opt.lrD)
    optimizerG = optim.RMSprop(netG.parameters(), lr=opt.lrG)
else:
    raise (RuntimeError("Do not recognize optimizer %s" % opt.optimizer))

# create the GAN class
gan_model = gan_choice(
    {
        'gan': gan.GAN(netG, netD, optimizerD, optimizerG, opt),
        'wgan': wgan.WGAN(netG, netD, optimizerD, optimizerG, opt),
        'wgan-gp': wgan.WGANGP(netG, netD, optimizerD, optimizerG, opt)
    }, 'GAN_algorithm')

# the main operation
if execution_mode == 'train':
    # train the model
    gan_model.train(dataloader_train, opt)
elif execution_mode == 'eval-gen-vs-real':
    # save samples
    fixed_noise = gan_model.generate_fixed_noise(opt)
    gan_model.save_samples(None, fixed_noise, None, opt)
    # train an independent discriminator
    gan_model.train_disc_fake_vs_data(dataloader_train, opt)
    # compute the two-sample score