Пример #1
0
def generate(args):
    # Create a new DCGAN object
    dcgan = DCGAN(config)

    # Load existing model from saved_models folder (you can pass different indexes to see the effect on the generated signal)
    dcgan.load()  #loads the last trained generator
    #dcgan.load(500)
    #dcgan.load(1000)
    #dcgan.load(2000)
    #dcgan.load(3000)

    # Create a DataLoader utility object
    data_loader = DataLoader(config)

    #
    # Generate a batch of new fake signals and evaluate them against the discriminator
    #

    # Select a random batch of signals
    signals = data_loader.get_training_batch()

    # Generate latent noise for generator
    noise = dcgan.generate_noise(signals)

    # Generate prediction
    gen_signal = dcgan.generator.predict(noise)

    # Evaluate prediction
    validated = dcgan.critic.predict(gen_signal)

    # Plot and save prediction
    plot_prediction(gen_signal)
    gen_signal = np.reshape(gen_signal,
                            (gen_signal.shape[0], gen_signal.shape[1]))
    np.savetxt('./output/generated_signal.csv', gen_signal, delimiter=",")
Пример #2
0
def run_anime():
    FNAME = "animeds.pyobj"
    anime_dataset = None
    if os.path.isfile(FNAME):
        with open(FNAME, 'r') as f:
            anime_dataset = pickle.load(f)
    else:
        anime_dataset = get_dataset()
        with open(FNAME, 'w') as f:
            pickle.dump(anime_dataset, f)

    model = DCGAN(100, (64, 64), [1024, 512, 256, 128], [64, 128, 256, 512],
                  colorspace_dim=3)
    model.train(anime_dataset, img_size=96, num_steps=30000, d_steps=1)
Пример #3
0
 def init_model(self):
     self.model = DCGAN(img_size=self.t_c.img_size, **self.m_c)
Пример #4
0
from train.train_dcgan import train
from models.dcgan import DCGAN

if __name__ == "__main__":
    real_size = (64,64,3)
    z_size = 100
    learning_rate = 0.0004
    batch_size = 128
    epochs = 10
    alpha = 0.2
    beta1 = 0.5
    dim = 64

    memes = get_memes_data('C:\\Users\\Albert\\Documents\\GitHub\\DeepMeme\\data\\me_irl')
    reshaped_memes = np.array([path_to_tensor(img_path, dim) for img_path in memes])

    now = datetime.utcnow().strftime('%Y%m%d%H%M%S')
    root_logdir = "tf_logs"
    logdir = "{}/run-{}/".format(root_logdir, now)

    net = DCGAN(real_size, z_size, learning_rate, alpha, beta1)
    generator_summary = tf.summary.scalar('g_loss', net.g_loss)
    discriminator_summary = tf.summary.scalar('d_loss', net.d_loss)
    file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())
    print(tf.get_default_graph())

    reshaped_memes = reshaped_memes[:100] # start with 100 images only

    trainset, testset = train_test_split(reshaped_memes, test_size=0.2)
    dataset = Dataset(trainset, testset)
    losses, samples = train(net, dataset, epochs, batch_size, figsize=(10,5))
Пример #5
0
def main(args=None):
    # Parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # Check if a GPU Id was set
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # Load appropriate model:
    if args.type == 'DCGAN':  # Deep Convolutional GAN
        model = DCGAN(args)
    elif (args.type == 'WGAN'):  # Wasserstein GAN
        model = WGAN(args)
    elif (args.type == 'CGAN'):  # Conditional GAN
        model = CGAN(args)
    elif (args.type == 'InfoGAN'):  # InfoGAN
        model = InfoGAN(args)

    # Load pre-trained weights
    if args.model:
        model.load_weights(args.model)
    elif not args.train:
        raise Exception('Please specify path to pretrained model')

    # Load MNIST Data, pre-train D for a couple of iterations and train model
    if args.train:
        X_train, y_train, _, _, N = import_mnist()
        model.pre_train(X_train, y_train)
        model.train(X_train,
                    bs=args.batch_size,
                    nb_epoch=args.nb_epochs,
                    nb_iter=X_train.shape[0] // args.batch_size,
                    y_train=y_train,
                    save_path=args.save_path)

    # (Optional) Visualize results
    if args.visualize:
        model.visualize()
Пример #6
0
def main(model_name):
    if not os.path.isdir(".ckpts"):
        os.mkdir(".ckpts")

    if model_name not in ["gan", "dcgan"]:
        print("The model name is wrong!")
        return

    ckpt_path = ".ckpts/%s/" % model_name
    if not os.path.isdir(ckpt_path):
        os.mkdir(ckpt_path)

    with open("config.json") as f:
        config = json.load(f)[model_name]

    loader = MNISTLoader()

    if model_name == "gan":
        model = GAN(loader.feature_depth, config["latent_depth"])
    elif model_name == "dcgan":
        model = DCGAN(loader.feature_shape, config["latent_depth"])

    steps_per_epoch = (loader.num_train_sets +
                       loader.num_test_sets) // config["batch_size"]

    features = np.vstack([loader.train_features, loader.test_features])
    features = feature_normalize(features)

    generator_losses_epoch = []
    discriminator_losses_epoch = []
    generated_images = []
    for i in range(1, config["num_epochs"] + 1):
        generator_loss_epoch = []
        discriminator_loss_epoch = []
        for _ in range(steps_per_epoch):
            sampled_indices = \
                np.random.choice(
                    loader.num_train_sets + loader.num_test_sets,
                    config["batch_size"],
                    replace=False
                )
            real_samples = features[sampled_indices]

            generator_loss, discriminator_loss = model.train_one_step(
                real_samples)

            generator_loss_epoch.append(generator_loss)
            discriminator_loss_epoch.append(discriminator_loss)

        generator_loss_epoch = np.mean(generator_loss_epoch)
        discriminator_loss_epoch = np.mean(discriminator_loss_epoch)
        print(
            "Epoch: %i,  Generator Loss: %f,  Discriminator Loss: %f" % \
                (i, generator_loss_epoch, discriminator_loss_epoch)
        )
        generator_losses_epoch.append(generator_loss_epoch)
        discriminator_losses_epoch.append(discriminator_loss_epoch)

        torch.save(model.generator.state_dict(),
                   ckpt_path + "generator_%i.ckpt" % i)
        torch.save(model.discriminator.state_dict(),
                   ckpt_path + "discriminator_%i.ckpt" % i)

        faked_samples = feature_denormalize(
            model.generate(config["batch_size"]))
        generated_images.append(faked_samples.detach().numpy())

        with open(ckpt_path + "results.pkl", "wb") as f:
            pickle.dump((generator_losses_epoch, discriminator_losses_epoch,
                         generated_images), f)
                      bias=False), nn.BatchNorm2d(nf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(nf * 4,
                      nf * 8,
                      kernel_size=4,
                      stride=2,
                      padding=1,
                      bias=False), nn.BatchNorm2d(nf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(nf * 8,
                      1,
                      kernel_size=2,
                      stride=2,
                      padding=0,
                      bias=False), nn.Sigmoid())

    def forward(self, x):
        return self.main(x).view(-1, 1).squeeze(1)


params = {
    'device': 'cuda',
    'size_z': 100,
    'lr_g': 0.0002,
    'lr_d': 0.0002,
    'g': Generator(),
    'd': Discriminator(),
}

model = DCGAN(**params)
                      stride=2,
                      padding=1,
                      bias=False), nn.BatchNorm2d(nf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(nf * 8,
                      1,
                      kernel_size=2,
                      stride=2,
                      padding=0,
                      bias=False), nn.Sigmoid())

    def forward(self, x):
        return self.main(x).view(-1, 1).squeeze(1)


params = {
    'device': 'cuda',
    'size_z': 100,
    'lr_g': 0.0002,
    'lr_d': 0.0002,
    'g': Generator(),
    'd': Discriminator(),
}

model = DCGAN(**params)
model.scheduler_g = torch.optim.lr_scheduler.StepLR(model.optim_g,
                                                    step_size=25,
                                                    gamma=0.5)
model.scheduler_d = torch.optim.lr_scheduler.StepLR(model.optim_d,
                                                    step_size=25,
                                                    gamma=0.5)
Пример #9
0
def main(args=None):
    # Parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # Check if a GPU Id was set
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # Load appropriate model:
    if args.type == 'DCGAN':  # Deep Convolutional GAN
        model = DCGAN(args)
    elif (args.type == 'WGAN'):  # Wasserstein GAN
        model = WGAN(args)
    elif (args.type == 'CGAN'):  # Conditional GAN
        model = CGAN(args)
    elif (args.type == 'InfoGAN'):  # InfoGAN
        model = InfoGAN(args)

    # Load pre-trained weights
    if args.model:
        model.load_weights(args.model)
    elif not args.train:
        raise Exception('Please specify path to pretrained model')

    # # Load MNIST Data, pre-train D for a couple of iterations and train model
    # if args.train:
    #     X_train, y_train, _, _, N = import_mnist()
    #     model.pre_train(X_train, y_train)
    #     model.train(X_train,
    #         bs=args.batch_size,
    #         nb_epoch=args.nb_epochs,
    #         nb_iter=2,
    #         y_train=y_train,
    #         save_path=args.save_path)

    # (Optional) Visualize results
    # if args.visualize:
    #     model.visualize()

    # X_train, y_train, _, _, N = import_mnist()
    layers = [0, 4, 7, 9, 11]
    old_params = []
    A = []
    B = []

    n_x = 50
    n_y = 50
    n_samples = 500

    for l in layers:
        W, b = model.G.layers[l].get_weights()
        old_params.append((W, b))

        A_W = np.random.randn(*W.shape)
        A_W /= np.linalg.norm(A_W.reshape(-1, A_W.shape[-1]), axis=0)
        A_W *= np.linalg.norm(W.reshape(-1, W.shape[-1]), axis=0)
        A.append(A_W)

        B_W = np.random.randn(*W.shape)
        B_W /= np.linalg.norm(B_W.reshape(-1, B_W.shape[-1]), axis=0)
        B_W *= np.linalg.norm(W.reshape(-1, W.shape[-1]), axis=0)
        B.append(B_W)

    xs = np.linspace(-3, 3, n_x)
    ys = np.linspace(-3, 3, n_y)
    loss = np.zeros((n_x, n_y))

    for i, x in enumerate(ys):
        for j, y in enumerate(ys):
            for A_W, B_W, (W, b), l in zip(A, B, old_params, layers):
                model.G.layers[l].set_weights((W + x * A_W + y * B_W, b))
            print((i, j))
            loss[i, j] = model.eval_gen_loss(n_samples)

    print('Done!')

    np.save(
        '{}_loss_n_samples_{}_xlarge_epoch_100'.format(args.type, n_samples),
        loss)
    xx, yy = np.meshgrid(xs, ys)
    plt.contour(xx, yy, loss)
    # plt.show()
    plt.savefig(
        'figures/{}_landscape_n_samples_{}_xlarge_epoch_100.png'.format(
            args.type, n_samples))
Пример #10
0
def train(config):

    # Create a new DCGAN object
    dcgan = DCGAN(config, training=True)

    # Create a DataLoader utility object
    data_loader = DataLoader(config)

    # Adversarial ground truths
    valid = np.ones((config["batch_size"], 1))
    fake = np.zeros((config["batch_size"], 1))

    metrics = []

    for epoch in range(config["epochs"]):

        # Select a random batch of signals
        signals = data_loader.get_training_batch()

        # Generate latent noise for generator
        noise = dcgan.generate_noise(signals)

        # Generate a batch of new fake signals and evaluate them against the discriminator
        gen_signal = dcgan.generator.predict(noise)
        validated = dcgan.critic.predict(gen_signal)

        #Sample real and fake signals

        # ---------------------
        #  Calculate metrics
        # ---------------------

        # Calculate metrics on best fake data
        metrics_index = np.argmax(validated)

        #Calculate metrics on first fake data
        #metrics_index = 0

        generated = gen_signal[metrics_index].flatten()
        reference = signals[metrics_index].flatten()
        fft_metric, fft_ref, fft_gen = loss_fft(reference, generated)
        dtw_metric = dtw_distance(reference, generated)
        cc_metric = cross_correlation(reference, generated)

        # ---------------------
        #  Train Discriminator
        # ---------------------
        d_loss_real = dcgan.critic.model.train_on_batch(
            signals, valid)  #train on real data
        d_loss_fake = dcgan.critic.model.train_on_batch(
            gen_signal, fake)  #train on fake data
        d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)  #mean loss

        # ---------------------
        #  Train Generator
        # ---------------------

        g_loss = dcgan.combined.train_on_batch(noise,
                                               valid)  #train combined model

        # Plot the progress
        print(
            "%d [D loss: %f, acc: %f] [G loss: %f] [FFT Metric: %f] [DTW Metric: %f] [CC Metric: %f]"
            % (epoch, d_loss[0], d_loss[1], g_loss, fft_metric, dtw_metric,
               cc_metric[0]))
        metrics.append([[d_loss[0]], [g_loss], [fft_metric], [dtw_metric],
                        [cc_metric[0]]])

        # If at save interval => save generated image samples
        if epoch % config["sample_interval"] == 0:
            if config["save_sample"]:
                dcgan.save_sample(epoch, signals)

            if config["plot_losses"]:
                plot_losses(metrics, epoch)

            if config["save_models"]:
                dcgan.save_critic(epoch)
                dcgan.save_generator(epoch)

    dcgan.save_sample(epoch, signals)
    dcgan.save_critic()
    dcgan.save_generator()
    plot_losses(metrics, epoch)
Пример #11
0
def run_mnist():
    mnist_dataset = input_data.read_data_sets("MNIST_data", one_hot=True)
    model = DCGAN(32, (64, 64), [1024, 512, 256, 128], [64, 128, 256, 512],
                  colorspace_dim=1)
    model.train(mnist_dataset.train, img_size=28, num_steps=15000, d_steps=1)
Пример #12
0
def main():
    model_spec_name = "%s-model-spec.json" % conf.MODEL_NAME
    model_rslt_name = "%s-results.pickle" % conf.MODEL_NAME

    model_save_path = os.path.join(conf.MODEL_SAVE_DIR, conf.MODEL_NAME)
    if not os.path.exists(model_save_path):
        os.makedirs(model_save_path)

    model_ckpt_path = os.path.join(model_save_path, "model-ckpt")
    model_spec_path = os.path.join(model_save_path, model_spec_name)
    model_rslt_path = os.path.join(model_save_path, model_rslt_name)

    hyparams = conf.HYPARAMS[conf.DATASET]

    latent_depth = conf.LATENT_DEPTH

    batch_size = conf.BATCH_SIZE
    num_epochs = conf.NUM_EPOCHS

    loader, info = tfds.load(conf.DATASET, in_memory=True, with_info=True)
    # loader = loader["train"].concatenate(loader["test"])
    train_loader = loader["train"].repeat().shuffle(1024).batch(batch_size)

    num_sets = info.splits[
        "train"].num_examples  # + info.splits["test"].num_examples

    feature_shape = info.features["image"].shape
    feature_depth = np.prod(feature_shape)

    model = DCGAN(project_shape=hyparams["project_shape"],
                  gen_filters_list=hyparams["gen_filters_list"],
                  gen_strides_list=hyparams["gen_strides_list"],
                  disc_filters_list=hyparams["disc_filters_list"],
                  disc_strides_list=hyparams["disc_strides_list"])

    generator_opt = tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5)
    discriminator_opt = tf.keras.optimizers.Adam(learning_rate=0.0002,
                                                 beta_1=0.5)

    @tf.function
    def train_step(x, z):
        with tf.GradientTape() as generator_tape, tf.GradientTape(
        ) as discriminator_tape:
            generator_loss = model.generator_loss(z)
            discriminator_loss = model.discriminator_loss(x, z)

            grads_generator_loss = generator_tape.gradient(
                target=generator_loss,
                sources=model.generator.trainable_variables)
            grads_discriminator_loss = discriminator_tape.gradient(
                target=discriminator_loss,
                sources=model.discriminator.trainable_variables)

            discriminator_opt.apply_gradients(
                zip(grads_discriminator_loss,
                    model.discriminator.trainable_variables))
            generator_opt.apply_gradients(
                zip(grads_generator_loss, model.generator.trainable_variables))

        return generator_loss, discriminator_loss

    ckpt = tf.train.Checkpoint(generator=model.generator,
                               discriminator=model.discriminator)

    steps_per_epoch = num_sets // batch_size
    train_steps = steps_per_epoch * num_epochs

    generator_losses = []
    discriminator_losses = []
    generator_losses_epoch = []
    discriminator_losses_epoch = []
    x_fakes = []
    for i in range(1, train_steps + 1):
        epoch = i // steps_per_epoch

        print("Epoch: %i ====> %i / %i" %
              (epoch + 1, i % steps_per_epoch, steps_per_epoch),
              end="\r")

        for x in train_loader.take(1):
            x_i = feature_normalize(x["image"])
            z_i = np.random.normal(size=[batch_size, latent_depth]).astype(
                np.float32)

            generator_loss_i, discriminator_loss_i = train_step(x_i, z_i)

            generator_losses.append(generator_loss_i)
            discriminator_losses.append(discriminator_loss_i)

        if i % steps_per_epoch == 0:
            x_fake = model.generator(z_i, training=False)
            x_fake = feature_denormalize(x_fake)

            generator_loss_epoch = np.mean(generator_losses[-steps_per_epoch:])
            discriminator_loss_epoch = np.mean(
                discriminator_losses[-steps_per_epoch:])

            print("Epoch: %i,  Generator Loss: %f,  Discriminator Loss: %f" % \
                (epoch, generator_loss_epoch, discriminator_loss_epoch)
            )

            generator_losses_epoch.append(generator_loss_epoch)
            discriminator_losses_epoch.append(discriminator_loss_epoch)

            x_fakes.append(x_fake)

            ckpt.save(file_prefix=model_ckpt_path)

            with open(model_rslt_path, "wb") as f:
                pickle.dump((generator_losses_epoch,
                             discriminator_losses_epoch, x_fakes), f)