Exemple #1
0
            r_loss = vae_criterion(
                outputs, inputs, size_average=False) / inputs.size(0)
        except RuntimeError as e:
            logger.error('Runtime error. This could possibly be due to using '
                         'the wrong encoder / decoder for this dataset. '
                         'If you are using MNIST, for example, use the '
                         'arguments `--encoder_type mnist --decoder_type '
                         'mnist`')
            raise e

        kl = (0.5 * (vae.std**2 + vae.mu**2 - 2. * torch.log(vae.std) -
                     1.).sum(1).mean())

        msssim = ms_ssim(inputs, outputs)

        self.losses.vae = (r_loss + beta_kld * kl)
        self.results.update(KL_divergence=kl.item(), ms_ssim=msssim.item())

    def visualize(self, inputs, targets, Z):
        vae = self.nets.vae

        outputs = vae(inputs)

        self.add_image(outputs, name='reconstruction')
        self.add_image(inputs, name='ground truth')
        self.add_scatter(vae.mu.data, labels=targets.data, name='latent values')
        self.decoder.visualize(Z)


register_plugin(VAE)
Exemple #2
0
        """

        for _ in range(discriminator_updates):
            self.data.next()
            inputs, Z = self.inputs('inputs', 'Z')
            generated = self.generator.generate(Z)
            self.discriminator.routine(inputs, generated.detach())
            self.optimizer_step()
            self.penalty.train_step()

        for _ in range(generator_updates):
            self.generator.train_step()

    def eval_step(self):
        self.data.next()

        inputs, Z = self.inputs('inputs', 'Z')
        generated = self.generator.generate(Z)
        self.discriminator.routine(inputs, generated)
        self.penalty.routine(auto_input=True)
        self.generator.routine(auto_input=True)

    def visualize(self, images, Z):
        self.add_image(images, name='ground truth')
        generated = self.generator.generate(Z)
        self.discriminator.visualize(images, generated)
        self.generator.visualize(Z)


register_plugin(GAN)
Exemple #3
0
            inferred = self.bidirectional_model.encoder.encode(inputs)

            self.discriminator.routine(
                inputs, generated.detach(), inferred.detach(), Z)
            self.optimizer_step()

            self.penalty.routine((inputs, inferred))
            self.optimizer_step()

        self.bidirectional_model.train_step()

    def eval_step(self):
        self.data.next()
        inputs, Z = self.inputs('inputs', 'Z')

        generated = self.bidirectional_model.decoder.decode(Z)
        inferred = self.bidirectional_model.encoder.encode(inputs)

        self.discriminator.routine(inputs, generated, inferred, Z)
        self.bidirectional_model.eval_step()

    def visualize(self, inputs, Z, targets):
        generated = self.bidirectional_model.decoder.decode(Z)
        inferred = self.bidirectional_model.encoder.encode(inputs)

        self.bidirectional_model.visualize()
        self.discriminator.visualize(inputs, generated, inferred, Z, targets)


register_plugin(ALI)
Exemple #4
0
              classifier_type='convnet',
              classifier_args=dict(dropout=0.2),
              Encoder=None):
        '''Builds a simple image classifier.

        Args:
            classifier_type (str): Network type for the classifier.
            classifier_args: Classifier arguments. Can include dropout,
            batch_norm, layer_norm, etc.

        '''

        classifier_args = classifier_args or {}

        shape = self.get_dims('x', 'y', 'c')
        dim_a = self.get_dims('attributes')

        Encoder_, args = update_encoder_args(shape,
                                             model_type=classifier_type,
                                             encoder_args=classifier_args)
        Encoder = Encoder or Encoder_

        args.update(**classifier_args)

        classifier = Encoder(shape, dim_out=dim_a, **args)
        self.nets.classifier = classifier


register_plugin(ImageClassification)
register_plugin(ImageAttributeClassification)
Exemple #5
0
    def build(self, dim_z=64):
        self.encoder.build(dim_out=dim_z)
        self.decoder.build(dim_in=dim_z)

        encoder = self.nets.encoder
        decoder = self.nets.decoder
        ae = AENetwork(encoder, decoder)
        self.nets.ae = ae

    def routine(self, inputs, targets, ae_criterion=F.mse_loss):
        '''

        Args:
            ae_criterion: Criterion for the autoencoder.

        '''
        ae = self.nets.ae
        outputs = ae(inputs)
        r_loss = ae_criterion(
            outputs, inputs, size_average=False) / inputs.size(0)
        self.losses.ae = r_loss

    def visualize(self, inputs, targets):
        ae = self.nets.ae
        outputs = ae(inputs)
        self.add_image(outputs, name='reconstruction')
        self.add_image(inputs, name='ground truth')


register_plugin(Autoencoder)
Exemple #6
0
        """

        for _ in range(discriminator_updates):
            self.data.next()
            inputs, Z = self.inputs('inputs', 'Z')
            generated = self.generator.generate(Z)
            self.discriminator.routine(inputs, generated.detach())
            self.optimizer_step()
            self.penalty.train_step()

        for _ in range(generator_updates):
            self.generator.train_step()

    def eval_step(self):
        self.data.next()

        inputs, Z = self.inputs('inputs', 'Z')
        generated = self.generator.generate(Z)
        self.discriminator.routine(inputs, generated)
        self.penalty.routine(auto_input=True)
        self.generator.routine(auto_input=True)

    def visualize(self, images, Z):
        self.add_image(images, name='ground truth')
        generated = self.generator.generate(Z)
        self.discriminator.visualize(images, generated)
        self.generator.visualize(Z)


register_plugin(GAN)
            try:
                uniques.remove(-1)
            except ValueError:
                pass
            dim_l = len(uniques)
        else:
            if len(train_set.train_data.shape) == 4:
                dim_c, dim_x, dim_y = train_set[0][0].size()
            else:
                dim_x, dim_y = train_set[0][0].size()
                dim_c = 1

            labels = train_set.train_labels
            if not isinstance(labels, list):
                labels = labels.numpy()
            dim_l = len(np.unique(labels))

        dims = dict(x=dim_x, y=dim_y, c=dim_c, labels=dim_l)
        input_names = ['images', 'targets', 'index']

        self.add_dataset('train', train_set)
        self.add_dataset('test', test_set)
        self.set_input_names(input_names)
        self.set_dims(**dims)

        if scale is not None:
            self.set_scale(scale)


register_plugin(TorchvisionDatasetPlugin)
Exemple #8
0
        train=dict(epochs=200, save_on_best='losses.classifier'))

    def build(self, classifier_type='convnet',
              classifier_args=dict(dropout=0.2), Encoder=None):
        '''Builds a simple image classifier.

        Args:
            classifier_type (str): Network type for the classifier.
            classifier_args: Classifier arguments. Can include dropout,
            batch_norm, layer_norm, etc.

        '''

        classifier_args = classifier_args or {}

        shape = self.get_dims('x', 'y', 'c')
        dim_a = self.get_dims('attributes')

        Encoder_, args = update_encoder_args(
            shape, model_type=classifier_type, encoder_args=classifier_args)
        Encoder = Encoder or Encoder_

        args.update(**classifier_args)

        classifier = Encoder(shape, dim_out=dim_a, **args)
        self.nets.classifier = classifier


register_plugin(ImageClassification)
register_plugin(ImageAttributeClassification)
                            transform=transform,
                            download=True)
        test_set = Dataset(data_path,
                           train=False,
                           transform=test_transform,
                           download=True)
        return train_set, test_set

    def _handle_ImageFolder(self,
                            Dataset,
                            data_path,
                            transform=None,
                            test_transform=None,
                            **kwargs):
        train_set = Dataset(f'{data_path}/train', transform=transform)
        if 'UNL' in train_set.classes:
            train_set.targets = np.array(train_set.targets)
            train_set.targets[train_set.targets ==
                              train_set.class_to_idx['UNL']] = -1
            train_set.class_to_idx['UNL'] = -1
        val_set = Dataset(f'{data_path}/val', transform=test_transform)
        test_set = Dataset(f'{data_path}/test', transform=test_transform)
        return train_set, val_set, test_set


# Removing all registered DatasetPlugins
_PLUGINS.clear()
register_plugin(SSLDatasetPlugin)
DataHandler.make_iterator = make_iterator
DataHandler.__next__ = __next__
Exemple #10
0
        self.add_dataset('test', test_set)
        self.set_input_names(input_names)
        self.set_dims(**dims)

        self.set_scale((-1, 1))

    def make_split(self, data_path, split, Dataset, train_transform,
                   test_transform):
        train_set = Dataset(root=data_path, transform=train_transform,
                            download=True, split=split)
        test_set = Dataset(root=data_path, transform=test_transform,
                           split=split - 1)
        return train_set, test_set


register_plugin(CelebAPlugin)


class CelebA(torchvision.datasets.ImageFolder):

    url = ('https://www.dropbox.com/sh/8oqt9vytwxb3s4r/'
           'AADIKlz8PR9zr6Y20qbkunrba/Img/img_align_celeba.zip?dl=1')
    attr_url = ('https://www.dropbox.com/s/auexdy98c6g7y25/'
                'list_attr_celeba.zip?dl=1')
    filename = 'img_align_celeba.zip'
    attr_filename = 'list_attr_celeba.zip'

    def __init__(
            self,
            root,
            transform=None,
Exemple #11
0
        if source in ('SVHN', 'STL10'):
            dim_c, dim_x, dim_y = train_set[0][0].size()
            uniques = np.unique(train_set.labels).tolist()
            try:
                uniques.remove(-1)
            except ValueError:
                pass
            dim_l = len(uniques)
        else:
            dim_c, dim_x, dim_y = train_set[0][0].size()

            labels = train_set.train_labels
            if not isinstance(labels, list):
                labels = labels.numpy()
            dim_l = len(np.unique(labels))

        dims = dict(x=dim_x, y=dim_y, c=dim_c, labels=dim_l)
        input_names = ['images', 'targets', 'index']

        self.add_dataset('train', train_set)
        self.add_dataset('test', test_set)
        self.set_input_names(input_names)
        self.set_dims(**dims)

        if scale is not None:
            self.set_scale(scale)


register_plugin(TorchvisionDatasetPlugin)
Exemple #12
0
    def build(self, dim_z=64):
        self.encoder.build(dim_out=dim_z)
        self.decoder.build(dim_in=dim_z)

        encoder = self.nets.encoder
        decoder = self.nets.decoder
        ae = AENetwork(encoder, decoder)
        self.nets.ae = ae

    def routine(self, inputs, targets, ae_criterion=F.mse_loss):
        '''

        Args:
            ae_criterion: Criterion for the autoencoder.

        '''
        ae = self.nets.ae
        outputs = ae(inputs)
        r_loss = ae_criterion(outputs, inputs,
                              size_average=False) / inputs.size(0)
        self.losses.ae = r_loss

    def visualize(self, inputs, targets):
        ae = self.nets.ae
        outputs = ae(inputs)
        self.add_image(outputs, name='reconstruction')
        self.add_image(inputs, name='ground truth')


register_plugin(Autoencoder)