Esempio n. 1
0
def create_model(args):

    generator = Generator(args.local_condition_dim, args.z_dim)
    discriminator = Multiple_Random_Window_Discriminators(
        args.local_condition_dim)

    return generator, discriminator
Esempio n. 2
0
def get_models(config):
    disc = Discriminator(config['channels'], config['load_size'],
                         config['num_classes'], 64).to(config['device'])
    gen = Generator(config['z_dim'], config['channels'], config['load_size'],
                    config['num_classes'],
                    config['embed_size']).to(config['device'])
    initialise_weights(gen)
    initialise_weights(disc)

    return gen, disc
Esempio n. 3
0
    def __init__(self, config: GANConfig, num_workers: int, improved=False):
        super().__init__()
        self.save_hyperparameters()
        self.config = config
        self.num_workers = num_workers
        self.improved = improved

        generator_config = self.config.generator_config
        discriminator_config = self.config.discriminator_config
        if self.improved:
            self.generator = SN_Generator(**asdict(generator_config))
            self.discriminator = SN_Discriminator(**asdict(discriminator_config))
        else:
            self.generator = Generator(**asdict(generator_config))
            self.discriminator = Discriminator(**asdict(discriminator_config))
import torch
from models.modules import Generator
import os
import torchvision.transforms as transforms
from PIL import Image

config = {
    'channels': 3,
    'device': 'cuda' if torch.cuda.is_available() else 'cpu',
    'load_state': 'latest',
    'checkpoint_path': 'checkpoints',
    'test_image': 'datasets/testA/0.jpg'
}

G = Generator(config['channels']).to(config['device'])
G.load_state_dict(
    torch.load(
        os.path.join(config['checkpoint_path'],
                     f'G_{config["load_state"]}.pth')))

to_tensor = transforms.Compose([transforms.ToTensor()])

im = Image.open(config['test_image'])
im_tensor = to_tensor(im)
im_tensor = im_tensor.unsqueeze(0)
Esempio n. 5
0
    'img_dim': 28,
    'device': 'cuda' if torch.cuda.is_available() else 'cpu'
}

print("CUDA =", torch.cuda.is_available())

transforms = torchvision.transforms.Compose([
    torchvision.transforms.ToTensor(),
    torchvision.transforms.Normalize((0.5, ), (0.5, ))
])

train_data = get_dataset(config, transforms=transforms)

gen = Generator(config['z_dim'],
                config['channels'],
                img_dim=config['img_dim'],
                embed_size=config['embed_size'],
                num_classes=config['num_classes']).to(config['device'])
disc = Discriminator(config['channels'],
                     num_classes=config['num_classes'],
                     img_size=config['img_dim']).to(config['device'])

loss_function = torch.nn.BCELoss()

optimiser_G = torch.optim.Adam(gen.parameters(),
                               lr=config['lr'],
                               betas=(0.0, 0.9))
optimiser_D = torch.optim.Adam(disc.parameters(),
                               lr=config['lr'],
                               betas=(0.0, 0.9))
Esempio n. 6
0
    def _create_model(self):

        args = self._args
        batch = self._batch
        eps = 1e-12

        paths, x, y = batch['paths'], batch['inputs'], batch['targets']
        """
            ======================================
            Todo:
                Add your model structure code here
            ======================================
        """

        with tf.variable_scope("generator"):
            out_channels = int(y.get_shape()[-1])
            G = Generator(out_channels, args.ngf)
            outputs = G(x)
        """
            ======================================
            Todo:
                Add your losses here
            ======================================
        """

        with tf.variable_scope('losses'):
            l1_loss = L1(y, outputs)
            l2_loss = L2(y, outputs)

            y_sigmoid = tf.nn.sigmoid(y)
            outputs_sigmoid = tf.nn.sigmoid(outputs)

            y_norm = (y + 1) / 2
            outputs_norm = (outputs + 1) / 2

            cross_entropy = bin_cross_entropy(y_norm, outputs_norm)

            #cross_entropy = tf.reduce_min(outputs_sigmoid)

        #loss = l1_loss * 100 + l2_loss + cross_entropy * 200
        loss = l1_loss * 100 + l2_loss + cross_entropy * 20
        """
            ======================================
            Todo:
                Add your optimizer & trainer here
            ======================================
        """

        with tf.variable_scope('trainer'):
            trainer = tf.train.AdamOptimizer(args.lr,
                                             args.beta1).minimize(loss)
        """
           ===================================
             save to self._ member variables
           ===================================
        """
        ema = tf.train.ExponentialMovingAverage(decay=0.99)  # MovingAverage
        update_losses = ema.apply([l1_loss, l2_loss, cross_entropy])

        global_step = tf.train.get_or_create_global_step()
        incr_global_step = tf.assign(global_step, global_step + 1)

        self._losses['L1_loss'] = ema.average(l1_loss)
        self._losses['L2_loss'] = ema.average(l2_loss)
        self._losses['cross_entropy'] = ema.average(cross_entropy)
        # self._losses['cross_entropy'] = cross_entropy

        self._outputs['outputs'] = outputs
        self._train = tf.group(update_losses, incr_global_step, trainer)
Esempio n. 7
0
def create_model(args):

    generator = Generator(args.local_condition_dim, args.z_dim)
    
    return generator