예제 #1
0
 def __init__(self, configs):
     self.configs = configs
     self.generator_a2b = Generator(
         input_size=self.configs['input_size'],
         n_res_blocks=self.configs['residual_blocks'])
     self.generator_b2a = Generator(
         input_size=self.configs['input_size'],
         n_res_blocks=self.configs['residual_blocks'])
예제 #2
0
 def _make_generator(self) -> K.Model:
     """
     :return: Make a generator model for this instance
     """
     return Generator.build_model(out_size=self._size,
                                  out_channels=self._channels,
                                  z_dim=self._z_dim)
예제 #3
0
 def _make_generator_get_output_shapes(out_size: int,
                                       out_channels: int) -> list:
     """
     Makes a generator and returns a list of its layers' output shapes
     :param out_size: output size
     :param out_channels: output channels
     :return: a list of tuples for each layer's output shape
     """
     return [
         layer.output_shape for layer in Generator.build_model(
             out_size=out_size, out_channels=out_channels).layers
     ]
예제 #4
0
파일: train.py 프로젝트: yrpang/mindspore
def train():
    """Train function."""
    args = get_args("train")
    if args.need_profiler:
        from mindspore.profiler.profiling import Profiler
        profiler = Profiler(output_path=args.outputs_dir,
                            is_detail=True,
                            is_show_op_path=True)
    ds = create_dataset(args)
    G_A = get_generator(args)
    G_B = get_generator(args)
    D_A = get_discriminator(args)
    D_B = get_discriminator(args)
    load_ckpt(args, G_A, G_B, D_A, D_B)
    imgae_pool_A = ImagePool(args.pool_size)
    imgae_pool_B = ImagePool(args.pool_size)
    generator = Generator(G_A, G_B, args.lambda_idt > 0)

    loss_D = DiscriminatorLoss(args, D_A, D_B)
    loss_G = GeneratorLoss(args, generator, D_A, D_B)
    optimizer_G = nn.Adam(generator.trainable_params(),
                          get_lr(args),
                          beta1=args.beta1)
    optimizer_D = nn.Adam(loss_D.trainable_params(),
                          get_lr(args),
                          beta1=args.beta1)

    net_G = TrainOneStepG(loss_G, generator, optimizer_G)
    net_D = TrainOneStepD(loss_D, optimizer_D)

    data_loader = ds.create_dict_iterator()
    reporter = Reporter(args)
    reporter.info('==========start training===============')
    for _ in range(args.max_epoch):
        reporter.epoch_start()
        for data in data_loader:
            img_A = data["image_A"]
            img_B = data["image_B"]
            res_G = net_G(img_A, img_B)
            fake_A = res_G[0]
            fake_B = res_G[1]
            res_D = net_D(img_A, img_B, imgae_pool_A.query(fake_A),
                          imgae_pool_B.query(fake_B))
            reporter.step_end(res_G, res_D)
            reporter.visualizer(img_A, img_B, fake_A, fake_B)
        reporter.epoch_end(net_G)
        if args.need_profiler:
            profiler.analyse()
            break

    reporter.info('==========end training===============')
예제 #5
0
    def test_invalid_sizes(self):
        """
        Tests building a generator with an invalid size raises an error
        """
        min_size = list(Generator.get_valid_sizes(limit=20))[0]
        for size in range(min_size, 50):
            if Generator.prev_valid_size(size) == size:
                continue

            with self.assertRaises(GeneratorProjectionError):
                Generator.build_model(out_size=size, out_channels=1)

            with self.assertRaises(GeneratorProjectionError):
                Generator.build_model(out_size=size, out_channels=3)
예제 #6
0
def test():
    """Test Notebook API"""
    dataset = MelFromDisk(path="data/test")
    dataloader = torch.utils.data.DataLoader(dataset)
    loaders = OrderedDict({"train": dataloader})
    generator = Generator(80)
    discriminator = Discriminator()

    model = torch.nn.ModuleDict({
        "generator": generator,
        "discriminator": discriminator
    })
    optimizer = {
        "opt_g": torch.optim.Adam(generator.parameters()),
        "opt_d": torch.optim.Adam(discriminator.parameters()),
    }
    callbacks = {
        "loss_g":
        GeneratorLossCallback(),
        "loss_d":
        DiscriminatorLossCallback(),
        "o_g":
        dl.OptimizerCallback(metric_key="generator_loss",
                             optimizer_key="opt_g"),
        "o_d":
        dl.OptimizerCallback(metric_key="discriminator_loss",
                             optimizer_key="opt_d"),
    }
    runner = MelGANRunner()

    runner.train(
        model=model,
        loaders=loaders,
        optimizer=optimizer,
        callbacks=callbacks,
        check=True,
        main_metric="discriminator_loss",
    )
예제 #7
0
    def __init__(self, configs):
        self.configs = configs

        wandb.init(project=self.configs['project_name'],
                   name=self.configs['experiment_name'],
                   sync_tensorboard=True)

        self.fake_pool_b2a = ImagePool(self.configs['pool_size'])
        self.fake_pool_a2b = ImagePool(self.configs['pool_size'])

        self.loss_gen_total_metrics = tf.keras.metrics.Mean(
            'loss_gen_total_metrics', dtype=tf.float32)
        self.loss_dis_total_metrics = tf.keras.metrics.Mean(
            'loss_dis_total_metrics', dtype=tf.float32)
        self.loss_cycle_a2b2a_metrics = tf.keras.metrics.Mean(
            'loss_cycle_a2b2a_metrics', dtype=tf.float32)
        self.loss_cycle_b2a2b_metrics = tf.keras.metrics.Mean(
            'loss_cycle_b2a2b_metrics', dtype=tf.float32)
        self.loss_gen_a2b_metrics = tf.keras.metrics.Mean(
            'loss_gen_a2b_metrics', dtype=tf.float32)
        self.loss_gen_b2a_metrics = tf.keras.metrics.Mean(
            'loss_gen_b2a_metrics', dtype=tf.float32)
        self.loss_dis_b_metrics = tf.keras.metrics.Mean('loss_dis_b_metrics',
                                                        dtype=tf.float32)
        self.loss_dis_a_metrics = tf.keras.metrics.Mean('loss_dis_a_metrics',
                                                        dtype=tf.float32)
        self.loss_id_b2a_metrics = tf.keras.metrics.Mean('loss_id_b2a_metrics',
                                                         dtype=tf.float32)
        self.loss_id_a2b_metrics = tf.keras.metrics.Mean('loss_id_a2b_metrics',
                                                         dtype=tf.float32)

        self.mse_loss = tf.keras.losses.MeanSquaredError()
        self.mae_loss = tf.keras.losses.MeanAbsoluteError()

        self.dataset = self.get_dataset()

        self.generator_a2b = Generator(
            input_size=self.configs['input_size'],
            n_res_blocks=self.configs['residual_blocks'])
        self.generator_b2a = Generator(
            input_size=self.configs['input_size'],
            n_res_blocks=self.configs['residual_blocks'])
        self.discriminator_a = Discriminator(
            input_size=self.configs['input_size'])
        self.discriminator_b = Discriminator(
            input_size=self.configs['input_size'])

        total_batches = count_batches(self.dataset)
        self.generator_lr_scheduler = LinearDecay(
            initial_learning_rate=self.configs['lr'],
            total_steps=self.configs['epochs'] * total_batches,
            step_decay=self.configs['decay_epochs'] * total_batches)
        self.discriminator_lr_scheduler = LinearDecay(
            initial_learning_rate=self.configs['lr'],
            total_steps=self.configs['epochs'] * total_batches,
            step_decay=self.configs['decay_epochs'] * total_batches)

        self.generator_optimizer = tf.keras.optimizers.Adam(
            self.generator_lr_scheduler, self.configs['adam_beta_1'])
        self.discriminator_optimizer = tf.keras.optimizers.Adam(
            self.discriminator_lr_scheduler, self.configs['adam_beta_1'])

        self.checkpoint, self.checkpoint_manager = self.make_checkpoints()
예제 #8
0
def train(
    max_int: int = 128,
    batch_size: int = 16,
    training_steps: int = 500,
    learning_rate: float = 0.001,
    print_output_every_n_steps: int = 10,
):
    """Trains the even GAN

    Args:
        max_int: The maximum integer our dataset goes to.  It is used to set the size of the binary
            lists
        batch_size: The number of examples in a training batch
        training_steps: The number of steps to train on.
        learning_rate: The learning rate for the generator and discriminator
        print_output_every_n_steps: The number of training steps before we print generated output

    Returns:
        generator: The trained generator model
        discriminator: The trained discriminator model
    """
    input_length = int(math.log(max_int, 2))

    # Models
    generator = Generator(input_length)
    discriminator = Discriminator(input_length)

    # Optimizers
    generator_optimizer = torch.optim.Adam(generator.parameters(), lr=0.001)
    discriminator_optimizer = torch.optim.Adam(discriminator.parameters(),
                                               lr=0.001)

    # loss
    loss = nn.BCELoss()
    gen_loss = []
    dis_loss = []

    for i in range(training_steps):
        # zero the gradients on each iteration
        generator_optimizer.zero_grad()

        # Create noisy input for generator
        # Need float type instead of int
        noise = torch.randint(0, 2, size=(batch_size, input_length)).float()
        generated_data = generator(noise)

        # Generate examples of even real data
        # true labels: [1,1,1,1,1,1,....] i.e all ones
        # true data: [[0,0,0,0,1,0,0],....] i.e binary code for even numbers
        true_labels, true_data = generate_even_data(max_int,
                                                    batch_size=batch_size)
        true_labels = torch.tensor(true_labels).float()
        true_data = torch.tensor(true_data).float()

        # Train the generator
        # We invert the labels here and don't train the discriminator because we want the generator
        # to make things the discriminator classifies as true.
        # true labels: [1,1,1,1,....]
        discriminator_out_gen_data = discriminator(generated_data)
        generator_loss = loss(discriminator_out_gen_data.squeeze(),
                              true_labels)
        gen_loss.append(generator_loss.item())
        generator_loss.backward()
        generator_optimizer.step()

        # Train the discriminator
        # Teach Discriminator to distinguish true data with true label i.e [1,1,1,1,....]
        discriminator_optimizer.zero_grad()
        discriminator_out_true_data = discriminator(true_data)
        discriminator_loss_true_data = loss(
            discriminator_out_true_data.squeeze(), true_labels)

        # add .detach() here think about this
        discriminator_out_fake_data = discriminator(generated_data.detach())
        fake_labels = torch.zeros(batch_size)  # [0,0,0,.....]
        discriminator_loss_fake_data = loss(
            discriminator_out_fake_data.squeeze(), fake_labels)
        # total discriminator loss
        discriminator_loss = (discriminator_loss_true_data +
                              discriminator_loss_fake_data) / 2

        dis_loss.append(discriminator_loss.item())

        discriminator_loss.backward()
        discriminator_optimizer.step()
        if i % print_output_every_n_steps == 0:
            output = convert_float_matrix_to_int_list(generated_data)
            even_count = len(list(filter(lambda x: (x % 2 == 0), output)))
            print(
                f"steps: {i}, output: {output}, even count: {even_count}/16, Gen Loss: {np.round(generator_loss.item(),4)}, Dis Loss: {np.round(discriminator_loss.item(),4)}"
            )

    history = {}
    history['dis_loss'] = dis_loss
    history['gen_loss'] = gen_loss

    return generator, discriminator, history
예제 #9
0
    type=int,
    default=4,
    help='number of cpu threads to use during batch generation')
opt = parser.parse_args()
print(opt)

info = 'test'

if torch.cuda.is_available() and not opt.cuda:
    print(
        "WARNING: You have a CUDA device, so you should probably run with --cuda"
    )

###### Definition of variables ######
# Networks
netG_A2B = Generator(opt.input_nc, opt.output_nc)
netG_B2A = Generator(opt.output_nc, opt.input_nc)
netD_A = Discriminator(opt.input_nc)
netD_B = Discriminator(opt.output_nc)

if opt.cuda:
    netG_A2B.cuda()
    netG_B2A.cuda()
    netD_A.cuda()
    netD_B.cuda()

netG_A2B.apply(weights_init_normal)
netG_B2A.apply(weights_init_normal)
netD_A.apply(weights_init_normal)
netD_B.apply(weights_init_normal)
예제 #10
0
print(len(train_dataset))
x, y = train_dataset[0]
print(x.shape, y.shape)

plt.imshow(ToPILImage()(x))
plt.show()
plt.imshow(ToPILImage()(y))
plt.show()

val_dataset = ValidationDataset(glob('./VOC2012/JPEGImages/*')[16000:17000], 4)
print(len(val_dataset))
x, y, y_res = val_dataset[0]
print(x.shape, y.shape, y_res.shape)

plt.imshow(ToPILImage()(x))
plt.show()
plt.imshow(ToPILImage()(y))
plt.show()
plt.imshow(ToPILImage()(y_res))
plt.show()

generator = Generator(scale=2)
x = torch.ones((1, 3, 44, 44))
y = generator(x)
print(x.shape, y.shape)

discriminator = Discriminator()
x = torch.ones((1, 3, 88, 88))
y = discriminator(x)
print(x.shape, y.shape)
예제 #11
0
 def get_models(self):
     generator = Generator(self.config['scale']).to(self.device)
     discriminator = Discriminator().to(self.device)
     return generator, discriminator
예제 #12
0
    )
    nc = 3

assert dataset
dataloader = torch.utils.data.DataLoader(
    dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers)
)

device = torch.device("cuda:0" if opt.cuda else "cpu")
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)


generator = Generator(nz, nc, ngf, opt.imageSize, ngpu).to(device)
generator.apply(weights_init)
if opt.generator != "":
    generator.load_state_dict(torch.load(opt.generator))
print(generator)

discriminator = Discriminator(nc, ndf, opt.imageSize, ngpu).to(device)
discriminator.apply(weights_init)
if opt.discriminator != "":
    discriminator.load_state_dict(torch.load(opt.discriminator))
print(discriminator)

# setup optimizer
optimizerD = optim.Adam(
    discriminator.parameters(), lr=opt.lr_d, betas=(opt.beta1, 0.999)
)