Example #1
0
def initialize(mean, std, lr):
    
    param_cuda = torch.cuda.is_available()
    
    G = gan.generator(128).cuda() if param_cuda else gan.generator(128)
    D = gan.discriminator(128).cuda() if param_cuda else gan.discriminator(128)
    G.weight_init(mean=mean, std=std)
    D.weight_init(mean=mean, std=std)
    G_opt = optim.Adam(G.parameters(), lr=lr, betas=(.5, .999))
    D_opt = optim.Adam(D.parameters(), lr=lr, betas=(.5, .999))
    
    return G, D, G_opt, D_opt
Example #2
0
def build_and_train():
    (x_train, _), (x_test, _) = mnist.load_data()
    image_size = x_train.shape[1]
    x_train = np.reshape(x_train, [-1, image_size, image_size, 1])
    x_train = x_train.astype('float32') / 255
    input_shape = [image_size, image_size, 1]
    latent_size = 100
    batch_size = 64
    train_steps = 40000
    lr = 2e-04
    decay = 6e-08
    optimizer = RMSprop(lr=lr, decay=decay)
    inputs = Input(shape=input_shape)
    discriminator = gan.discriminator(inputs, image_size, activation=None)
    discriminator.compile(loss='mse',
                          optimizer=optimizer,
                          metrics=['accuracy'])
    discriminator.summary()

    inputs = Input(shape=(latent_size, ))
    generator = gan.generator(inputs, image_size)
    generator.summary()

    discriminator.trainable = False
    adversarial = Model(inputs, discriminator(generator(inputs)))
    adversarial.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])
    adversarial.summary()
    models = (generator, discriminator, adversarial)
    model_name = 'lsgan_mnist'
    params = (batch_size, latent_size, train_steps, model_name)
    gan.train(models, x_train, params)
Example #3
0
def generator_fn(noise, reuse):
    if args.model == 'dcgan':
        images = gan.generator(noise, reuse)
    elif args.model == 'vanilla':
        images = gan.generator_vanilla(noise, reuse)
    else:
        print('check your gan model')
        pdb.set_trace()
    return images
Example #4
0
def build_generator(latent_codes, image_size, feature_dim=256):
    labels, z0, z1, feature1 = latent_codes
    inputs = [labels, z1]
    x = concatenate(inputs, axis=1)
    x = Dense(512, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dense(512, activation='relu')(x)
    x = BatchNormalization()(x)
    fake_feature = Dense(feature_dim, activation='relu')(x)
    gen1 = Model(inputs, fake_feature)
    gen0 = gan.generator(feature1, image_size, codes=z0)
    return gen0, gen1
Example #5
0
def build_and_train_models():
    (x_train, y_train), (_, _) = mnist.load_data()

    image_size = x_train.shape[1]
    x_train = np.reshape(x_train, [-1, image_size, image_size, 1])
    x_train = x_train.astype('float32') / 255

    num_labels = len(np.unique(y_train))
    y_train = to_categorical(y_train)

    model_name = "acgan_mnist"
    latent_size = 100
    batch_size = 64
    train_steps = 40000
    lr = 2e-4
    decay = 6e-8
    input_shape = (image_size, image_size, 1)
    label_shape = (num_labels, )

    inputs = Input(shape=input_shape, name='discriminator_input')
    discriminator = gan.discriminator(inputs, num_labels=num_labels)
    optimizer = RMSprop(lr=lr, decay=decay)
    loss = ['binary_crossentropy', 'categorical_crossentropy']
    discriminator.compile(loss=loss,
                          optimizer=optimizer,
                          metrics=['accuracy'])
    discriminator.summary()

    input_shape = (latent_size, )
    inputs = Input(shape=input_shape, name='z_input')
    labels = Input(shape=label_shape, name='labels')
    generator = gan.generator(inputs, image_size, labels=labels)
    generator.summary()

    optimizer = RMSprop(lr=lr*0.5, decay=decay*0.5)
    discriminator.trainable = False
    adversarial = Model([inputs, labels],
                        discriminator(generator([inputs, labels])),
                        name=model_name)
    
    adversarial.compile(loss=loss,
                        optimizer=optimizer,
                        metrics=['accuracy'])
    adversarial.summary()

    models = (generator, discriminator, adversarial)
    data = (x_train, y_train)
    params = (batch_size, latent_size, train_steps, num_labels, model_name)
    train(models, data, params)
Example #6
0
def build_and_train_models():
    (x_train, _), (_, _) = mnist.load_data()

    image_size = x_train.shape[1]
    x_train = np.reshape(x_train, [-1, image_size, image_size, 1])
    x_train = x_train.astype('float32') / 255

    model_name = "wgan_mnist"
    latent_size = 100
    n_critic = 5
    clip_value = 0.01
    batch_size = 64
    lr = 5e-5
    train_steps = 20000
    input_shape = (image_size, image_size, 1)

    inputs = Input(shape=input_shape, name='discriminator_input')
    discriminator = gan.discriminator(inputs, activation='linear')
    optimizer = RMSprop(lr=lr)
    discriminator.compile(loss=wasserstein_loss,
                          optimizer=optimizer,
                          metrics=['accuracy'])
    discriminator.summary()

    input_shape = (latent_size, )
    inputs = Input(shape=input_shape, name='z_input')
    generator = gan.generator(inputs, image_size)
    generator.summary()

    discriminator.trainable = False
    adversarial = Model(inputs,
                        discriminator(generator(inputs)),
                        name=model_name)
    adversarial.compile(loss=wasserstein_loss,
                        optimizer=optimizer,
                        metrics=['accuracy'])
    adversarial.summary()

    models = (generator, discriminator, adversarial)
    params = (batch_size,
              latent_size,
              n_critic,
              clip_value,
              train_steps,
              model_name)
    train(models, x_train, params)
Example #7
0
def build_and_train(latent_size=100):
    batch_size = 64
    model_name = 'infogan_mnist'
    train_steps = 40000
    lr = 2e-4
    decay = 6e-8
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    image_size = x_train.shape[1]
    x_train = np.reshape(x_train, [-1, image_size, image_size, 1])
    x_train = x_train.astype('float32') / 255
    input_shape = [image_size, image_size, 1]
    num_labels = len(np.unique(y_train))
    y_train = to_categorical(y_train)
    codes_shape = (1,)

    inputs = Input(shape=input_shape)
    discriminator = gan.discriminator(
        inputs, image_size, num_labels=num_labels, num_codes=2)
    loss = ['binary_crossentropy', 'categorical_crossentropy', mi_loss, mi_loss]
    optimizer = RMSprop(lr=lr, decay=decay)
    loss_weights = [1.0, 1.0, 0.5, 0.5]
    discriminator.compile(loss=loss, loss_weights=loss_weights,
                          optimizer=optimizer, metrics=['accuracy'])
    discriminator.summary()

    inputs = Input(shape=(latent_size,))
    labels = Input(shape=(num_labels,))
    codes1 = Input(shape=codes_shape)
    codes2 = Input(shape=codes_shape)
    generator = gan.generator(
        inputs, image_size, labels=labels, codes=[codes1, codes2])
    generator.summary()

    discriminator.trainable = False
    optimizer = RMSprop(lr=lr*0.5, decay=decay*0.5)
    inputs = [inputs, labels, codes1, codes2]
    adversarial = Model(inputs, discriminator(generator(inputs)))
    adversarial.compile(loss=loss, loss_weights=loss_weights,
                        optimizer=optimizer, metrics=['accuracy'])
    adversarial.summary()

    models = (generator, discriminator, adversarial)
    data = (x_train, y_train)
    params = (batch_size, latent_size, train_steps, num_labels, model_name)
    train(models, params, data)
Example #8
0
def build_and_train_models():
    (x_train, _), (_, _) = mnist.load_data()

    image_size = x_train.shape[1]
    x_train = np.reshape(x_train, [-1, image_size, image_size, 1])
    x_train = x_train.astype('float32') / 255

    model_name = "lsgan_mnist"
    latent_size = 100
    input_shape = (image_size, image_size, 1)
    batch_size = 64
    lr = 2e-4
    decay = 6e-8
    train_steps = 20000

    inputs = Input(shape=input_shape, name='discriminator_input')
    discriminator = gan.discriminator(inputs, activation=None)
    optimizer = RMSprop(lr=lr, decay=decay)
    discriminator.compile(loss='mse',
                          optimizer=optimizer,
                          metrics=['accuracy'])
    discriminator.summary()

    input_shape = (latent_size, )
    inputs = Input(shape=input_shape, name='z_input')
    generator = gan.generator(inputs, image_size)
    generator.summary()

    optimizer = RMSprop(lr=lr*0.5, decay=decay*0.5)
    discriminator.trainable = False
    adversarial = Model(inputs,
                        discriminator(generator(inputs)),
                        name=model_name)
    adversarial.compile(loss='mse',
                        optimizer=optimizer,
                        metrics=['accuracy'])
    adversarial.summary()

    models = (generator, discriminator, adversarial)
    params = (batch_size, latent_size, train_steps, model_name)
    gan.train(models, x_train, params)
Example #9
0
def train_generator(batch_size, generator, discriminator, loss_fun):
    '''
        Train the generator on one batch using the discriminator. 
        Generate a batch of fake pictures, classify them with
        the discriminator and calculate loss based on how sure the 
        discriminator was that the fake data was real.
    '''
    # we want the discriminator to think the fake data is real
    target_fake_labels = torch.ones(batch_size, 1)
    # discriminator and generator have separate optimizers
    generator_optimizer = torch.optim.Adam(generator.parameters(), lr=0.001)
    # generator's input
    random_seed = torch.randn(batch_size, generator.input_size)
    generator_optimizer.zero_grad()
    # generate a batch of fake data
    fake_data = generator(random_seed)
    # see what the discriminator thinks it is
    predictions = discriminator(fake_data)
    # we wnat the discriminator to think they're real
    generator_loss = loss_fun(predictions, target_fake_labels)
    generator_loss.backward()
    generator_optimizer.step()
    return generator_loss.item()
Example #10
0
def load_model(model_type, dict_file):
    state_dict = torch.load(dict_file,
                            map_location=lambda storage, loc: storage)

    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        name = k
        if k[:7] == 'module.':
            name = k[7:]  # remove `module.`
        if name[:2] == 'fc':
            name = 'decoder.' + name
        new_state_dict[name] = v

    model = None
    if model_type == 'g':
        model = generator(128)
    elif model_type == 'd':
        model = Net(num_conv_in_channel=args.num_conv_in_channel,
                    num_conv_out_channel=args.num_conv_out_channel,
                    num_primary_unit=args.num_primary_unit,
                    primary_unit_size=args.primary_unit_size,
                    num_classes=args.num_classes,
                    output_unit_size=args.output_unit_size,
                    num_routing=args.num_routing,
                    use_reconstruction_loss=args.use_reconstruction_loss,
                    regularization_scale=args.regularization_scale,
                    input_width=args.input_width,
                    input_height=args.input_height,
                    cuda_enabled=args.cuda)
    elif model_type == 'b_g':
        model = base_generator(128)
    elif model_type == 'b_d':
        model = base_discriminator(128)

    model.load_state_dict(new_state_dict)
    return model
Example #11
0
def build_and_train_models():
    # Parameters
    param = {
        "Max_A_Size": 10,
        "Max_B_Size": 10,
        "Dynamic_Size": False,
        'Metod': 'tSNE',
        "ValidRatio": 0.1,
        "seed": 180,
        "dir": "dataset/AAGM/",
        "Mode": "CNN2",  # Mode : CNN_Nature, CNN2
        "LoadFromJson": False,
        "mutual_info": True,  # Mean or MI
        "hyper_opt_evals": 20,
        "epoch": 150,
        "No_0_MI": False,  # True -> Removing 0 MI Features
        "autoencoder": False,
        "cut": None
    }
    """Load the dataset, build ACGAN discriminator,
    generator, and adversarial models.
    Call the ACGAN train routine.
    """

    images = {}
    if param['mutual_info']:
        method = 'MI'
    else:
        method = 'Mean'
    f_myfile = open(
        param["dir"] + 'train_' + str(param['Max_A_Size']) + 'x' +
        str(param['Max_B_Size']) + '_' + method + '.pickle', 'rb')
    images["Xtrain"] = pickle.load(f_myfile)
    f_myfile.close()

    f_myfile = open(param["dir"] + 'YTrain.pickle', 'rb')
    images["Classification"] = pickle.load(f_myfile)
    f_myfile.close()
    (x_train, y_train) = np.asarray(images["Xtrain"]), np.asarray(
        images["Classification"])
    print(type(x_train))

    # reshape data for CNN as (28, 28, 1) and normalize
    image_size = x_train.shape[1]

    x_train = np.reshape(x_train, [-1, image_size, image_size, 1])
    x_train = x_train.astype('float32') / 255

    # train labels
    num_labels = len(np.unique(y_train))
    y_train = to_categorical(y_train)

    model_name = "acgan_aagm"
    # network parameters
    latent_size = 100
    batch_size = 64
    train_steps = 40000
    lr = 2e-4
    decay = 6e-8
    input_shape = (image_size, image_size, 1)
    label_shape = (num_labels, )

    # build discriminator Model
    inputs = Input(shape=input_shape, name='discriminator_input')
    # call discriminator builder
    # with 2 outputs, pred source and labels
    discriminator = gan.discriminator(inputs, num_labels=num_labels)
    # [1] uses Adam, but discriminator
    # easily converges with RMSprop
    optimizer = RMSprop(lr=lr, decay=decay)
    # 2 loss fuctions: 1) probability image is real
    # 2) class label of the image
    loss = ['binary_crossentropy', 'categorical_crossentropy']
    discriminator.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
    discriminator.summary()

    # build generator model
    input_shape = (latent_size, )
    inputs = Input(shape=input_shape, name='z_input')
    labels = Input(shape=label_shape, name='labels')
    # call generator builder with input labels
    generator = gan.generator(inputs, image_size, labels=labels)
    generator.summary()

    # build adversarial model = generator + discriminator
    optimizer = RMSprop(lr=lr * 0.5, decay=decay * 0.5)
    # freeze the weights of discriminator
    # during adversarial training
    discriminator.trainable = False
    adversarial = Model([inputs, labels],
                        discriminator(generator([inputs, labels])),
                        name=model_name)
    # same 2 loss fuctions: 1) probability image is real
    # 2) class label of the image
    adversarial.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
    #adversarial.summary()

    # train discriminator and adversarial networks
    models = (generator, discriminator, adversarial)
    data = (x_train, y_train)
    params = (batch_size, latent_size, \
             train_steps, num_labels, model_name)

    print(num_labels)

    print(x_train.shape)
    print('here')
    print(image_size)
    train(models, data, params)
Example #12
0
        if len(training_dataloader) == 0:
            print('Where\'s your data you dweeb')
            exit()
        dis_acc_av = float(dis_acc) / len(training_dataloader)
        gen_loss_av = float(gen_loss) / len(training_dataloader)
        print('Average discriminator accuracy {} and generator loss {}'.format(
            dis_acc_av, gen_loss_av))
    return training_dataset


def test_generator(gen, real_data_shape):
    fig = plt.figure(figsize=(8, 8))
    cols, rows = 4, 4
    for i in range(1, cols * rows + 1):
        # get images from trained generator
        random_seed = torch.randn(1, gen.input_size)
        image = gen(random_seed)
        # reshape back to original form
        image = image.view(real_data_shape[1], real_data_shape[2])
        image = image.detach().numpy()
        fig.add_subplot(rows, cols, i)
        plt.imshow(image)
    plt.show()


gen = generator()
dis = discriminator()
real_image_dataset = train_gan(gen, dis)
# generate example images
test_generator(gen, real_image_dataset.real_data_shape)
Example #13
0
def generator_fn(noise, reuse):
  images = gan.generator(noise, reuse)
  return images
Example #14
0
                              batch_size=2,
                              shuffle=True,
                              pin_memory=True)
valloader = data.DataLoader(VOCDataSet(
    "./",
    split='val',
    img_transform=val_transform,
    label_transform=target_transform,
    image_label_transform=img_label_transform),
                            batch_size=1,
                            shuffle=False,
                            pin_memory=True)

schedule = Scheduler(lr=1e-4, total_epoches=4000)
D = torch.nn.DataParallel(discriminator(n_filters=32)).cuda()
G = torch.nn.DataParallel(generator(n_filters=32)).cuda()
gan_loss_percent = 0.03

one = torch.FloatTensor([1])
mone = one * -1
moneg = one * -1 * gan_loss_percent

one = one.cuda()
mone = mone.cuda()
moneg = moneg.cuda()

loss_func = BCE_Loss()
optimizer_D = Adam(D.parameters(), lr=1e-4, betas=(0.5, 0.9), eps=10e-8)
optimizer_G = Adam(G.parameters(), lr=1e-4, betas=(0.5, 0.9), eps=10e-8)

for epoch in range(schedule.get_total_epoches()):