コード例 #1
0
    def save_images(self,
                    batch_size,
                    input_var,
                    iteration,
                    loader,
                    path_fake,
                    path_real=None):
        # Check if dataset contains its own image conversion method (e.g. for gaussian values)
        if hasattr(loader.dataset, 'save_images'):
            # Save real images once
            if iteration == 0 and path_real:
                loader.dataset.save_images(input_var, path_real)

            z = noise(batch_size, self.network_factory.gen_input_size)
            if self.cc.settings['dataloader'][
                    'dataset_name'] == 'network_traffic':
                sequence_length = input_var.size(1)
                z = z.unsqueeze(1).repeat(1, sequence_length, 1)

            if self.cc.settings['general']['logging'].get(
                    'print_multiple_generators', False):
                generated_output = []
                for i in range(min(len(self.population_gen.individuals), 5)):
                    gen = self.population_gen.individuals[i].genome.net
                    gen.eval()
                    generated_output.append(gen(z))
                    gen.train()
            else:
                gen = self.population_gen.individuals[0].genome.net
                gen.eval()
                generated_output = gen(z)
                gen.train()

            print_discriminator = self.cc.settings['general']['logging'].get(
                'print_discriminator', False)
            discr = self.population_dis.individuals[
                0].genome if print_discriminator else None
            loader.dataset.save_images(generated_output, path_fake, discr)
        else:
            # Some datesets (e.g. ImageFolder) do not need shapes
            shape = loader.dataset.train_data.shape if hasattr(
                loader.dataset, 'train_data') else None

            # Save real images once
            if iteration == 0 and path_real:
                self.dataloader.save_images(input_var, shape, path_real)

            z = noise(batch_size, self.network_factory.gen_input_size)
            gen = self.population_gen.individuals[0].genome.net
            gen.eval()
            generated_output = gen(z)
            self.dataloader.save_images(generated_output, shape, path_fake)
            gen.train()
コード例 #2
0
    def compute_loss_against(self, opponent, input, training_epoch=None):

        # If HeuristicLoss is applied in the Generator, the Discriminator applies BCELoss
        if self.loss_function.__class__.__name__ == 'MustangsLoss':
            if 'HeuristicLoss' in self.loss_function.get_applied_loss_name():
                self.loss_function.set_applied_loss(torch.nn.BCELoss())

        # Compute loss using real images
        # Second term of the loss is always zero since real_labels == 1
        batch_size = input.size(0)

        real_labels = to_pytorch_variable(torch.ones(batch_size))
        fake_labels = to_pytorch_variable(torch.zeros(batch_size))

        outputs = self.net(input)  #.view(-1)
        d_loss_real = self.loss_function(outputs, real_labels)

        # Compute loss using fake images
        # First term of the loss is always zero since fake_labels == 0
        z = noise(batch_size, self.data_size)
        fake_images = opponent.net(z)
        outputs = self.net(fake_images).view(-1)
        d_loss_fake = self.loss_function(outputs, fake_labels)

        return d_loss_real + d_loss_fake, None
コード例 #3
0
    def _update_generators(self, population_attacker, population_defender, input_var, defender_weights):

        batch_size = input_var.size(0)

        for generator in population_attacker.individuals:
            weights = [self.get_weight(defender, defender_weights) for defender in population_defender.individuals]
            weights /= np.sum(weights)
            discriminator = np.random.choice(population_defender.individuals, p=weights)
            optimizer = self._get_optimizer(generator)

            # Avoid computation
            for p in discriminator.genome.net.parameters():
                p.requires_grad = False

            generator.genome.net.zero_grad()
            # in case our last batch was the tail batch of the dataloader,
            # make sure we feed a full batch of noise
            z = noise(batch_size, generator.genome.data_size)
            fake_data = generator.genome.net(z)
            error = discriminator.genome.net(fake_data).mean(0).view(1)
            error.backward(self.real_labels)
            optimizer.step()

            generator.optimizer_state = optimizer.state_dict()

            for p in discriminator.genome.net.parameters():
                p.requires_grad = True

        self.gen_iterations += 1
        return input_var
コード例 #4
0
    def _update_generators(self, population_attacker, population_defender,
                           input_var):

        batch_size = input_var.size(0)
        # Randomly pick one only, referred from asynchronous_ea_trainer
        discriminator = random.choice(population_defender.individuals)

        for generator in population_attacker.individuals:
            optimizer = self._get_optimizer(generator)

            # Avoid computation
            for p in discriminator.genome.net.parameters():
                p.requires_grad = False

            generator.genome.net.zero_grad()
            # in case our last batch was the tail batch of the dataloader,
            # make sure we feed a full batch of noise
            z = noise(batch_size, generator.genome.data_size)
            fake_data = generator.genome.net(z)
            error = discriminator.genome.net(fake_data).mean(0).view(1)
            error.backward(self.real_labels)
            optimizer.step()

            generator.optimizer_state = optimizer.state_dict()

            for p in discriminator.genome.net.parameters():
                p.requires_grad = True

        self.gen_iterations += 1
        return input_var
コード例 #5
0
    def compute_loss_against(self, opponent, input):
        # Compute BCE_Loss using real images where BCE_Loss(x, y): - y * log(D(x)) - (1-y) * log(1 - D(x))
        # Second term of the loss is always zero since real_labels == 1

        batch_size = input.size(0)
        sequence_length = input.size(1)
        num_inputs = input.size(2)

        real_labels = to_pytorch_variable(torch.ones(batch_size))
        fake_labels = to_pytorch_variable(torch.zeros(batch_size))

        outputs_intermediate = self.net(input)
        sm = Softmax()

        outputs = sm(outputs_intermediate[:, -1, :].contiguous().view(-1))
        d_loss_real = self.loss_function(outputs, real_labels)

        # Compute BCELoss using fake images
        # First term of the loss is always zero since fake_labels == 0
        z = noise(batch_size, self.data_size)
        new_z = z.unsqueeze(1).repeat(1, sequence_length, 1)

        fake_images = opponent.net(new_z)
        outputs_full = self.net(fake_images)
        sm = Softmax()

        outputs = sm(outputs_full[:, -1, :].contiguous().view(-1))
        d_loss_fake = self.loss_function(outputs, fake_labels)

        return d_loss_real + d_loss_fake, None
コード例 #6
0
    def __init__(self, generator_population, weights, n_samples,
                 mixture_generator_samples_mode):
        """
        Creates samples from a mixture of generators, with sample probability defined by a weights vector

        :param generator_population: Population of generators that will be used to create the images
        :param weights: Dictionary that maps generator IDs to weights, e.g. {'127.0.0.1:5000': 0.8, '127.0.0.1:5001': 0.2}
        :param n_samples: Number of samples that will be generated
        """
        self.n_samples = n_samples

        def _parse_node(node):
            if isinstance(node, int):
                return node
            elif isinstance(node, float):
                return int(node)
            try:
                return int(node["id"])
            except:
                return node

        self.individuals = sorted(generator_population.individuals,
                                  key=lambda x: _parse_node(x.source))
        for individual in self.individuals:
            individual.genome.net.eval()
        self.data = []

        weights = collections.OrderedDict(sorted(weights.items()))
        weights = {
            k: v
            for k, v in weights.items()
            if any([i for i in self.individuals if i.source == k])
        }
        weights_np = np.asarray(list(weights.values()))
        if np.sum(weights_np) != 1:
            weights_np = weights_np / np.sum(weights_np).astype(
                float)  # Abit of patching, but normalize it again

        if mixture_generator_samples_mode == 'independent_probability':
            self.gen_indices = np.random.choice(len(self.individuals),
                                                n_samples,
                                                p=weights_np.tolist())
        elif mixture_generator_samples_mode == 'exact_proportion':
            # Does not perform checking here if weights_np.tolist() sum up to one
            # There will be some trivial error if prob*n_samples is not integer for prob in weights_np.tolist()
            self.gen_indices = [
                i for gen_idx, prob in enumerate(weights_np.tolist())
                for i in [gen_idx] * math.ceil(n_samples * prob)
            ]
            np.random.shuffle(self.gen_indices)
            self.gen_indices = self.gen_indices[:n_samples]
        else:
            raise NotImplementedError(
                "Invalid argument for mixture_generator_samples_mode: {}".
                format(mixture_generator_samples_mode))

        self.z = noise(n_samples, self.individuals[0].genome.data_size)
コード例 #7
0
    def compute_loss_against(self, opponent, input, training_epoch=None):
        batch_size = input.size(0)

        real_labels = to_pytorch_variable(torch.ones(batch_size))

        z = noise(batch_size, self.data_size)

        fake_images = self.net(z)
        outputs = opponent.net(fake_images).view(-1)

        return self.loss_function(outputs, real_labels), fake_images, None
コード例 #8
0
    def _update_discriminators(self, population_attacker, population_defender, input_var, loaded, data_iterator, defender_weights):

        batch_size = input_var.size(0)

        for discriminator in population_attacker.individuals:
            weights = [self.get_weight(defender, defender_weights) for defender in population_defender.individuals]
            weights /= np.sum(weights)
            generator = np.random.choice(population_defender.individuals, p=weights)
            optimizer = self._get_optimizer(discriminator)

            # Train the discriminator Diters times
            if self.gen_iterations < 25 or self.gen_iterations % 500 == 0:
                discriminator_iterations = 100
            else:
                discriminator_iterations = DISCRIMINATOR_STEPS

            j = 0
            while j < discriminator_iterations and self.batch_number < len(loaded):
                if j > 0:
                    input_var = to_pytorch_variable(self.dataloader.transpose_data(next(data_iterator)[0]))
                j += 1

                # Train with real data
                discriminator.genome.net.zero_grad()
                error_real = discriminator.genome.net(input_var)
                error_real = error_real.mean(0).view(1)
                error_real.backward(self.real_labels)

                # Train with fake data
                z = noise(batch_size, generator.genome.data_size)
                z.volatile = True
                fake_data = Variable(generator.genome.net(z).data)
                loss = discriminator.genome.net(fake_data).mean(0).view(1)
                loss.backward(self.fake_labels)
                optimizer.step()

                # Clamp parameters to a cube
                for p in discriminator.genome.net.parameters():
                    p.data.clamp_(CLAMP_LOWER, CLAMP_UPPER)

                self.batch_number += 1

            discriminator.optimizer_state = optimizer.state_dict()

        return input_var
コード例 #9
0
ファイル: competetive_net.py プロジェクト: swalpa/mustang
    def compute_loss_against(self, opponent, input):
        # Compute BCE_Loss using real images where BCE_Loss(x, y): - y * log(D(x)) - (1-y) * log(1 - D(x))
        # Second term of the loss is always zero since real_labels == 1
        batch_size = input.size(0)
        real_labels = to_pytorch_variable(torch.ones(batch_size))
        fake_labels = to_pytorch_variable(torch.zeros(batch_size))

        outputs = self.net(input).view(-1)
        #d_loss_real = self.loss_function(outputs, real_labels)
        d_loss_real = torch.nn.functional.binary_cross_entropy(outputs, real_labels)

        # Compute BCELoss using fake images
        # First term of the loss is always zero since fake_labels == 0
        z = noise(batch_size, self.data_size)
        fake_images = opponent.net(z)
        outputs = self.net(fake_images).view(-1)
        d_loss_fake = torch.nn.functional.binary_cross_entropy(outputs, fake_labels)

        return d_loss_real + d_loss_fake, None
コード例 #10
0
    def __init__(self, generator_population, weights, n_samples):
        """
        Creates samples from a mixture of generators, with sample probability defined by a weights vector

        :param generator_population: Population of generators that will be used to create the images
        :param weights: Dictionary that maps generator IDs to weights, e.g. {'127.0.0.1:5000': 0.8, '127.0.0.1:5001': 0.2}
        :param n_samples: Number of samples that will be generated
        """
        self.n_samples = n_samples
        self.individuals = sorted(generator_population.individuals, key=lambda x: x.source)
        for individual in self.individuals:
            individual.genome.net.eval()
        self.data = []

        weights = collections.OrderedDict(sorted(weights.items()))
        weights = {k: v for k, v in weights.items() if any([i for i in self.individuals if i.source == k])}
        self.gen_indices = np.random.choice(len(self.individuals), n_samples, p=list(weights.values()))

        self.z = noise(n_samples, self.individuals[0].genome.data_size)
コード例 #11
0
    def compute_loss_against(self, opponent, input, labels = None, alpha = None, beta = None, iter = None, log_class_distribution = False,):
        FloatTensor = torch.cuda.FloatTensor if is_cuda_enabled() else torch.FloatTensor
        LongTensor = torch.cuda.LongTensor if is_cuda_enabled() else torch.LongTensor
        batch_size = input.size(0)
        # print(batch_size)
        # print(input.size(1))
        # print('batch size')
        # print(batch_size)
        real_labels = to_pytorch_variable(torch.ones(batch_size))  # label all generator images 1 (real)

        z = noise(batch_size, self.data_size)  # dims: batch size x data_size

        labels = LongTensor(
            np.random.randint(0, self.num_classes, batch_size))  # random labels between 0 and 9, output of shape batch_size
        labels = labels.view(-1, 1)
        labels_onehot = torch.FloatTensor(batch_size, self.num_classes)
        labels_onehot.zero_()
        labels_onehot.scatter_(1, labels, 1)
        # print(labels_onehot)


        labels = to_pytorch_variable(labels_onehot.type(FloatTensor))
        # print(labels)
        # print(self.label_emb(labels))
        # concatenate z and labels here before passing into generator net

        gen_input = torch.cat((labels, z), -1)
        # print(gen_input)
        # print('gen input shape')
        # print(gen_input.shape)
        fake_images = self.net(gen_input)
        # print('fake images shape')
        # print(fake_images.shape)
        # fake_images = fake_images.view(fake_images.size(0), *)

        dis_input = torch.cat((fake_images, labels), -1)  # discriminator training data input
        # concatenate fake_images and labels here before passing into discriminator net
        outputs = opponent.net(dis_input).view(-1)  # view(-1) flattens tensor

        return self.loss_function(outputs,
                                  real_labels), fake_images  # loss function evaluated discriminator output vs. 1 (generator trying to get discriminator output to be 1)
コード例 #12
0
ファイル: competetive_net.py プロジェクト: swalpa/mustang
    def compute_loss_against(self, opponent, input):
        batch_size = input.size(0)

        real_labels = to_pytorch_variable(torch.ones(batch_size))

        z = noise(batch_size, self.data_size)
        fake_images = self.net(z)
        outputs = opponent.net(fake_images).view(-1)

        # Compute BCELoss using D(G(z))
        if self.loss_function.__class__.__name__ == 'SMuGANLoss':
            prob = np.random.uniform()
            if prob < 0.33:
                loss = self.bceloss(outputs, real_labels)
            elif prob < 0.66 :
                loss = self.mseloss(outputs, real_labels)
            else:
                loss = self.heuristicloss(outputs, real_labels)
            return loss, fake_images
        else:
            return self.loss_function(outputs, real_labels), fake_images
コード例 #13
0
    def compute_loss_against(self, opponent, input):
        batch_size = input.size(0)
        sequence_length = input.size(1)
        num_inputs = input.size(2)
        # batch_size = input.shape[0]

        # Define differently based on whether we're evaluating entire sequences as true or false, vs. individual messages.
        real_labels = to_pytorch_variable(torch.ones(batch_size))

        z = noise(batch_size, self.data_size)

        # Repeats the noise to match the shape
        new_z = z.unsqueeze(1).repeat(1, sequence_length, 1)
        fake_sequences = self.net(new_z)

        outputs_intermediate = opponent.net(fake_sequences)

        # Compute BCELoss using D(G(z))
        sm = Softmax()
        outputs = sm(outputs_intermediate[:, -1, :].contiguous().view(-1))

        return self.loss_function(outputs, real_labels), fake_sequences
コード例 #14
0
    def compute_loss_against(self, opponent, input, training_epoch=None):

        print('ITERATION: {}'.format(training_epoch))
        # If HeuristicLoss is applied in the Generator, the Discriminator applies BCELoss
        if self.loss_function.__class__.__name__ == 'MustangsLoss':
            if 'HeuristicLoss' in self.loss_function.get_applied_loss_name():
                self.loss_function.set_applied_loss(torch.nn.BCELoss())

        # Compute loss using real images
        # Second term of the loss is always zero since real_labels == 1
        batch_size = input.size(0)

        # Adding noise to prevent Discriminator from getting too strong
        if training_epoch is not None:
            std = max(self.in_std_min,
                      self.in_std - training_epoch * self.in_std_decay_rate)
        else:
            std = self.in_std
        print('Perturbation Std: {}'.format(std))
        input_perturbation = to_pytorch_variable(
            torch.empty(input.shape).normal_(mean=self.in_mean, std=std))
        input = input + input_perturbation

        input = input.view(-1, 1, self.image_length, self.image_width)

        real_labels = to_pytorch_variable(torch.ones(batch_size))
        fake_labels = to_pytorch_variable(torch.zeros(batch_size))

        outputs = self.net(input).view(-1)
        d_loss_real = self.loss_function(outputs, real_labels)

        # Compute loss using fake images
        # First term of the loss is always zero since fake_labels == 0
        z = noise(batch_size, self.data_size)
        fake_images = opponent.net(z)
        outputs = self.net(fake_images).view(-1)
        d_loss_fake = self.loss_function(outputs, fake_labels)

        return d_loss_real + d_loss_fake, None, None
コード例 #15
0
    def _update_discriminators(self, population_attacker, population_defender,
                               input_var, loaded, data_iterator):

        batch_size = input_var.size(0)
        # Randomly pick one only, referred from asynchronous_ea_trainer
        generator = random.choice(population_defender.individuals)

        for i, discriminator in enumerate(population_attacker.individuals):
            if i < len(population_attacker.individuals) - 1:
                # https://stackoverflow.com/a/42132767
                # Perform deep copy first instead of directly updating iterator passed in
                data_iterator, curr_iterator = tee(data_iterator)
            else:
                # Directly update the iterator with the last individual only, so that
                # every individual can learn from the full batch
                curr_iterator = data_iterator

            # Use temporary batch variable for each individual
            # so that every individual can learn from the full batch
            curr_batch_number = self.batch_number
            optimizer = self._get_optimizer(discriminator)

            # Train the discriminator Diters times
            if self.gen_iterations < 25 or self.gen_iterations % 500 == 0:
                discriminator_iterations = 100
            else:
                discriminator_iterations = DISCRIMINATOR_STEPS

            j = 0
            while j < discriminator_iterations and curr_batch_number < len(
                    loaded):
                if j > 0:
                    input_var = to_pytorch_variable(
                        self.dataloader.transpose_data(next(curr_iterator)[0]))
                j += 1

                # Train with real data
                discriminator.genome.net.zero_grad()
                error_real = discriminator.genome.net(input_var)
                error_real = error_real.mean(0).view(1)
                error_real.backward(self.real_labels)

                # Train with fake data
                z = noise(batch_size, generator.genome.data_size)
                z.volatile = True
                fake_data = Variable(generator.genome.net(z).data)
                loss = discriminator.genome.net(fake_data).mean(0).view(1)
                loss.backward(self.fake_labels)
                optimizer.step()

                # Clamp parameters to a cube
                for p in discriminator.genome.net.parameters():
                    p.data.clamp_(CLAMP_LOWER, CLAMP_UPPER)

                curr_batch_number += 1

            discriminator.optimizer_state = optimizer.state_dict()
        # Update the final batch_number to class variable after all individuals are updated
        self.batch_number = curr_batch_number

        return input_var
コード例 #16
0
    def compute_loss_against(self, opponent, input, labels = None, alpha = None, beta = None, iter = None, log_class_distribution = False,):

        # need to pass in the labels from dataloader too in lipizzaner_gan_trainer.py
        # Compute loss using real images
        # Second term of the loss is always zero since real_labels == 1
        batch_size = input.size(0)

        FloatTensor = torch.cuda.FloatTensor if is_cuda_enabled() else torch.FloatTensor
        LongTensor = torch.cuda.LongTensor if is_cuda_enabled() else torch.LongTensor

        real_labels = torch.Tensor(batch_size)
        real_labels.fill_(0.9)
        real_labels = to_pytorch_variable(real_labels)

        fake_labels = to_pytorch_variable(torch.zeros(batch_size))

        labels = labels.view(-1, 1).cuda() if is_cuda_enabled() else labels.view(-1, 1)
        labels_onehot = torch.FloatTensor(batch_size, self.num_classes)
        labels_onehot.zero_()
        labels_onehot.scatter_(1, labels, 1)

        labels = to_pytorch_variable(labels_onehot.type(FloatTensor))

        instance_noise_std_dev_min = 0.5
        instance_noise_std_dev_max = 5.0
        instance_noise_std_dev = 2.5
        instance_noise_mean = 0


        # Adding instance noise to prevent Discriminator from getting too strong
        if iter is not None:
            std = max(
                instance_noise_std_dev_min,
                instance_noise_std_dev_max - iter * 0.001,
            )
        else:
            instance_noise_std_dev

        input_perturbation = to_pytorch_variable(
            torch.empty(input.shape).normal_(mean=instance_noise_mean, std=std)
        )

        input = input + input_perturbation

        dis_input = torch.cat((input, labels), -1)  # discriminator training data input

        outputs = self.net(dis_input).view(-1)  # pass in training data input and respective labels to discriminator
        d_loss_real = self.loss_function(outputs, real_labels)  # get real image loss of discriminator (output vs. 1)

        # torch.cat((img.view(img.size(0), -1), self.label_embedding(gen_labels)), -1)

        # Compute loss using fake images
        # First term of the loss is always zero since fake_labels == 0
        gen_labels = LongTensor(np.random.randint(0, self.num_classes, batch_size))  # random labels for generator input

        z = noise(batch_size, self.data_size)  # noise for generator input

        gen_labels = gen_labels.view(-1, 1)
        labels_onehot = torch.FloatTensor(batch_size, self.num_classes)
        labels_onehot.zero_()
        labels_onehot.scatter_(1, gen_labels, 1)

        gen_labels = to_pytorch_variable(labels_onehot.type(FloatTensor))

        gen_input = torch.cat((gen_labels, z), -1)

        fake_images = opponent.net(gen_input)
        # print('fake images shape')
        # print(fake_images.shape)
        dis_input = torch.cat((fake_images, gen_labels), -1)  # discriminator training data input
        outputs = self.net(dis_input).view(-1)
        d_loss_fake = self.loss_function(outputs, fake_labels)  # get fake image loss of discriminator (output vs. 0)

        return (d_loss_real + d_loss_fake), None
コード例 #17
0
    def optimize_generator_mixture_weights(self):
        generators = self.neighbourhood.best_generators
        weights_generators = self.neighbourhood.mixture_weights_generators

        # Not necessary for single-cell grids, as mixture must always be [1]
        if self.neighbourhood.grid_size == 1:
            return

        # Create random vector from latent space
        z_noise = noise(self.score_sample_size,
                        generators.individuals[0].genome.data_size)

        # Include option to start from random weights
        if self.es_random_init:
            aux_weights = np.random.rand(len(weights_generators))
            aux_weights /= np.sum(aux_weights)
            weights_generators = OrderedDict(
                zip(weights_generators.keys(), aux_weights))
            self.neighbourhood.mixture_weights_generators = weights_generators

        dataset = MixedGeneratorDataset(
            generators,
            weights_generators,
            self.score_sample_size,
            self.mixture_generator_samples_mode,
            z_noise,
        )

        self.score = self.score_calc.calculate(dataset)[0]
        init_score = self.score

        self._logger.info(
            "Mixture weight mutation - Starting mixture weights optimization ..."
        )
        self._logger.info("Init score: {}\tInit weights: {}.".format(
            init_score, weights_generators))

        for g in range(self.es_generations):

            # Mutate mixture weights
            z = np.random.normal(loc=0,
                                 scale=self.mixture_sigma,
                                 size=len(weights_generators))
            transformed = np.asarray(
                [value for _, value in weights_generators.items()])
            transformed += z

            # Don't allow negative values, normalize to sum of 1.0
            transformed = np.clip(transformed, 0, None)
            transformed /= np.sum(transformed)
            new_mixture_weights = OrderedDict(
                zip(weights_generators.keys(), transformed))

            # TODO: Testing the idea of not generating the images again
            dataset = MixedGeneratorDataset(
                generators,
                new_mixture_weights,
                self.score_sample_size,
                self.mixture_generator_samples_mode,
                z_noise,
                epoch=g)

            if self.score_calc is not None:
                score_after_mutation = self.score_calc.calculate(dataset)[0]
                self._logger.info(
                    "Mixture weight mutation - Generation: {} \tScore of new weights: {}\tNew weights: {}."
                    .format(g, score_after_mutation, new_mixture_weights))

                # For fid the lower the better, for inception_score, the higher the better
                if (score_after_mutation < self.score
                        and self.score_calc.is_reversed) or (
                            score_after_mutation > self.score and
                            (not self.score_calc.is_reversed)):
                    weights_generators = new_mixture_weights
                    self.score = score_after_mutation
                    self._logger.info(
                        "Mixture weight mutation - Generation: {} \tNew score: {}\tWeights changed to: {}."
                        .format(g, self.score, weights_generators))
        self.neighbourhood.mixture_weights_generators = weights_generators

        self._logger.info(
            "Mixture weight mutation - Score before mixture weight optimzation: {}\tScore after mixture weight optimzation: {}."
            .format(init_score, self.score))
コード例 #18
0
    def __init__(self,
                 generator_population,
                 weights,
                 n_samples,
                 mixture_generator_samples_mode,
                 z=None):
        """
        Creates samples from a mixture of generators, with sample probability defined given a random noise vector
        sampled from the latent space by a weights vector

        :param generator_population: Population of generators that will be used to create the images
        :param weights: Dictionary that maps generator IDs to weights, e.g. {'127.0.0.1:5000': 0.8, '127.0.0.1:5001': 0.2}
        :param n_samples: Number of samples that will be generated
        :param mixture_generator_samples_mode:
        :param z: Noise vector from latent space. If it is not given it generates a new one
        """
        self.n_samples = n_samples
        self.individuals = sorted(generator_population.individuals,
                                  key=lambda x: x.source)
        for individual in self.individuals:
            individual.genome.net.eval()
        self.data = []

        weights = collections.OrderedDict(sorted(weights.items()))
        weights = {
            k: v
            for k, v in weights.items()
            if any([i for i in self.individuals if i.source == k])
        }
        weights_np = np.asarray(list(weights.values()))

        if np.sum(weights_np) != 1:
            weights_np = weights_np / np.sum(weights_np).astype(
                float)  # A bit of patching, but normalize it again

        if mixture_generator_samples_mode == 'independent_probability':
            self.gen_indices = np.random.choice(len(self.individuals),
                                                n_samples,
                                                p=weights_np.tolist())
        elif mixture_generator_samples_mode == 'exact_proportion':
            # Does not perform checking here if weights_np.tolist() sum up to one
            # There will be some trivial error if prob*n_samples is not integer for prob in weights_np.tolist()
            self.gen_indices = [
                i for gen_idx, prob in enumerate(weights_np.tolist())
                for i in [gen_idx] * math.ceil(n_samples * prob)
            ]
            np.random.shuffle(self.gen_indices)
            self.gen_indices = self.gen_indices[:n_samples]
        else:
            raise NotImplementedError(
                "Invalid argument for mixture_generator_samples_mode: {}".
                format(mixture_generator_samples_mode))
        if z is None:
            self.z = noise(n_samples, self.individuals[0].genome.data_size)
        else:
            self.z = z

        #HACK: If it's a sequential model, add another dimension to the noise input
        # Also we're currently just using a fixed sequence length for sequence generation; make this
        # able to be specified by the user.
        if self.individuals[0].genome.name in [
                "DiscriminatorSequential", "GeneratorSequential"
        ]:
            sequence_length = 100
            self.z = self.z.unsqueeze(1).repeat(1, sequence_length, 1)
コード例 #19
0
    def __init__(self, generator_population, weights, n_samples, mixture_generator_samples_mode, z=None):
        """
        Creates samples from a mixture of generators, with sample probability defined given a random noise vector
        sampled from the latent space by a weights vector

        :param generator_population: Population of generators that will be used to create the images
        :param weights: Dictionary that maps generator IDs to weights, e.g. {'127.0.0.1:5000': 0.8, '127.0.0.1:5001': 0.2}
        :param n_samples: Number of samples that will be generated
        :param mixture_generator_samples_mode:
        :param z: Noise vector from latent space. If it is not given it generates a new one
        """
        self.n_samples = n_samples
        self.individuals = sorted(generator_population.individuals, key=lambda x: x.source)
        for individual in self.individuals:
            individual.genome.net.eval()
        self.data = []

        self.cc = ConfigurationContainer.instance()

        weights = collections.OrderedDict(sorted(weights.items()))
        weights = {k: v for k, v in weights.items() if any([i for i in self.individuals if i.source == k])}
        weights_np = np.asarray(list(weights.values()))

        if np.sum(weights_np) != 1:
            weights_np = weights_np / np.sum(weights_np).astype(float)    # A bit of patching, but normalize it again

        if mixture_generator_samples_mode == 'independent_probability':
            self.gen_indices = np.random.choice(len(self.individuals), n_samples, p=weights_np.tolist())
        elif mixture_generator_samples_mode == 'exact_proportion':
            # Does not perform checking here if weights_np.tolist() sum up to one
            # There will be some trivial error if prob*n_samples is not integer for prob in weights_np.tolist()
            self.gen_indices = [
                i for gen_idx, prob in enumerate(weights_np.tolist()) for i in [gen_idx] * math.ceil(n_samples * prob)
            ]
            np.random.shuffle(self.gen_indices)
            self.gen_indices = self.gen_indices[:n_samples]
        else:
            raise NotImplementedError(
                "Invalid argument for mixture_generator_samples_mode: {}".format(mixture_generator_samples_mode)
            )

        num_classes = self.individuals[0].genome.num_classes if hasattr(self.individuals[0].genome, 'num_classes') \
                                                                and self.individuals[0].genome.num_classes != 0 else 0

        if z is None:
            z = noise(n_samples, self.individuals[0].genome.data_size)

            if num_classes != 0 and self.cc.settings["network"]["name"] == 'conditional_four_layer_perceptron':
                FloatTensor = torch.cuda.FloatTensor if is_cuda_enabled() else torch.FloatTensor
                LongTensor = torch.cuda.LongTensor if is_cuda_enabled() else torch.LongTensor
                labels = LongTensor(np.random.randint(0, num_classes, n_samples))  # random labels between 0 and 9, output of shape batch_size

                labels = labels.view(-1, 1)
                labels_onehot = torch.FloatTensor(n_samples, num_classes)
                labels_onehot.zero_()
                labels_onehot.scatter_(1, labels, 1)

                input_labels = to_pytorch_variable(labels_onehot.type(FloatTensor))

                self.z = torch.cat((input_labels, z), -1)
            else:
                self.z = z

        else:
            self.z = z

        #HACK: If it's a sequential model, add another dimension to the noise input
        # Also we're currently just using a fixed sequence length for sequence generation; make this
        # able to be specified by the user.
        if self.individuals[0].genome.name in ["DiscriminatorSequential", "GeneratorSequential"]:
            sequence_length = 100
            self.z = self.z.unsqueeze(1).repeat(1,sequence_length,1)