コード例 #1
0
    def __init__(self, output_size=1, genome=None, input_shape=None, optimizer_conf=None):
        super().__init__(output_size=output_size, genome=genome, input_shape=input_shape)
        self.output_size = output_size
        self.optimizer_conf = optimizer_conf or config.gan.discriminator.optimizer

        if genome is None:
            if config.gan.discriminator.fixed:
                self.genome = Genome(random=False, add_layer_prob=0, rm_layer_prob=0, gene_mutation_prob=0,
                                     mutate_gan_type_prob=0)
                self.genome.add(Conv2d(256, stride=2, activation_type="LeakyReLU", activation_params={"negative_slope": 0.2}))
                self.genome.add(Conv2d(128, stride=2, activation_type="LeakyReLU", activation_params={"negative_slope": 0.2}))
                self.genome.add(Conv2d(64, stride=2, activation_type="LeakyReLU", activation_params={"negative_slope": 0.2}))
                # self.genome.add(SelfAttention())
            else:
                self.genome = Genome(random=not config.evolution.sequential_layers)
                self.genome.possible_genes = [(getattr(evolution, l), {}) for l in config.gan.discriminator.possible_layers]

            if not config.gan.discriminator.fixed:
                self.genome.input_genes = [Conv2d(stride=1, normalize="spectral")]
            else:
                self.genome.input_genes = []
            if not config.gan.discriminator.fixed:
                self.genome.output_genes = [Linear(1, activation_type=None, normalize="spectral", bias=False)]
            else:
                self.genome.output_genes = [Linear(1, activation_type=None, normalize="none", bias=False)]
コード例 #2
0
 def setUp(self):
     self.genome = Genome()
     self.phenotype = Phenotype(1, self.genome)
     config.gan.generator.optimizer.copy_optimizer_state = True
     config.gan.generator.optimizer.type = "Adam"
     config.gan.discriminator.optimizer = config.gan.generator.optimizer
     config.evolution.sequential_layers = True
     self.phenotype.optimizer_conf = config.gan.generator.optimizer
コード例 #3
0
ファイル: generator.py プロジェクト: vfcosta/qd-coegan
    def __init__(self,
                 output_size=(1, 28, 28),
                 genome=None,
                 input_shape=(1, 1, 10, 10),
                 optimizer_conf=config.gan.generator.optimizer):
        super().__init__(output_size=output_size,
                         genome=genome,
                         input_shape=input_shape)
        self.noise_size = int(np.prod(self.input_shape[1:]))
        self.inception_score_mean = 0
        self.fid_score = None
        self.rmse_score = None
        self.optimizer_conf = optimizer_conf

        if genome is None:
            if config.gan.generator.fixed:
                self.genome = Genome(
                    random=False,
                    add_layer_prob=0,
                    rm_layer_prob=0,
                    gene_mutation_prob=0,
                    simple_layers=config.gan.generator.simple_layers,
                    linear_at_end=False)
                self.genome.add(
                    Linear(4 * int(np.prod(output_size)),
                           activation_type="LeakyReLU",
                           activation_params={"negative_slope": 0.2}))
                if not config.gan.generator.simple_layers:
                    self.genome.add(
                        Deconv2d(128,
                                 activation_type="LeakyReLU",
                                 activation_params={"negative_slope": 0.2}))
                    self.genome.add(
                        Deconv2d(64,
                                 activation_type="LeakyReLU",
                                 activation_params={"negative_slope": 0.2}))
            else:
                self.genome = Genome(
                    random=not config.evolution.sequential_layers,
                    linear_at_end=False)
                self.genome.possible_genes = [
                    (getattr(evolution, l), {})
                    for l in config.gan.generator.possible_layers
                ]
                self.genome.add(Linear(4096))
            if config.gan.generator.simple_layers:
                # self.genome.output_genes = [Deconv2d(output_size[0], activation_type="Tanh")]
                self.genome.output_genes = [
                    Linear(int(np.prod(output_size)),
                           activation_type="Tanh",
                           normalize=False)
                ]
            else:
                self.genome.output_genes = [
                    Deconv2d(output_size[0],
                             activation_type="Tanh",
                             normalize=False)
                ]
コード例 #4
0
ファイル: discriminator.py プロジェクト: vfcosta/coegan
    def __init__(self, output_size=1, genome=None, input_shape=None):
        super().__init__(output_size=output_size,
                         genome=genome,
                         input_shape=input_shape)
        self.output_size = output_size

        if genome is None:
            if config.gan.discriminator.fixed:
                self.genome = Genome(
                    random=False,
                    add_layer_prob=0,
                    rm_layer_prob=0,
                    gene_mutation_prob=0,
                    simple_layers=config.gan.discriminator.simple_layers)
                if not config.gan.discriminator.simple_layers:
                    self.genome.add(
                        Conv2d(8,
                               stride=2,
                               activation_type="LeakyReLU",
                               activation_params={"negative_slope": 0.2}))
                    self.genome.add(
                        Conv2d(16,
                               stride=2,
                               activation_type="LeakyReLU",
                               activation_params={"negative_slope": 0.2}))
                    self.genome.add(
                        Conv2d(32,
                               stride=2,
                               activation_type="LeakyReLU",
                               activation_params={"negative_slope": 0.2}))
                    self.genome.add(
                        Conv2d(64,
                               stride=2,
                               activation_type="LeakyReLU",
                               activation_params={"negative_slope": 0.2}))
                    self.genome.add(
                        Conv2d(128,
                               stride=2,
                               activation_type="LeakyReLU",
                               activation_params={"negative_slope": 0.2}))
                # self.genome.add(Linear(1024, activation_type="ELU"))
            else:
                self.genome = Genome(
                    random=not config.evolution.sequential_layers)
                self.genome.possible_genes = [
                    g for g in self.genome.possible_genes if g[0] != Deconv2d
                ]
                self.genome.add_random_gene()

            if config.gan.type == "gan":
                self.genome.output_genes = [
                    Linear(1, activation_type="Sigmoid", normalize=False)
                ]
            else:
                self.genome.output_genes = [
                    Linear(1, activation_type=None, normalize=False)
                ]
コード例 #5
0
ファイル: generator.py プロジェクト: vfcosta/coegan
    def __init__(self,
                 output_size=(1, 28, 28),
                 genome=None,
                 input_shape=(1, 1, 10, 10)):
        super().__init__(output_size=output_size,
                         genome=genome,
                         input_shape=input_shape)
        self.noise_size = int(np.prod(self.input_shape[1:]))
        self.inception_score_mean = 0
        self.fid_score = None
        self.rmse_score = None

        if genome is None:
            if config.gan.generator.fixed:
                self.genome = Genome(
                    random=False,
                    add_layer_prob=0,
                    rm_layer_prob=0,
                    gene_mutation_prob=0,
                    simple_layers=config.gan.generator.simple_layers,
                    linear_at_end=False)
                self.genome.add(
                    Linear(4 * int(np.prod(output_size)),
                           activation_type="ReLU"))
                # self.genome.add(Linear(4*int(np.prod(output_size)), activation_type="LeakyReLU"))
                if not config.gan.generator.simple_layers:
                    self.genome.add(Deconv2d(128, activation_type="ReLU"))
                    self.genome.add(Deconv2d(64, activation_type="ReLU"))
                    self.genome.add(Deconv2d(32, activation_type="ReLU"))
                    self.genome.add(Deconv2d(16, activation_type="ReLU"))
                    # self.genome.add(Deconv2d(8, activation_type="ReLU"))
            else:
                self.genome = Genome(
                    random=not config.evolution.sequential_layers,
                    linear_at_end=False)
                self.genome.possible_genes = [
                    g for g in self.genome.possible_genes if g[0] != Conv2d
                ]
                # IMPORTANT: the performance without a liner layer is pretty bad
                self.genome.add(Linear(512))
                # self.genome.add_random_gene()
            if config.gan.generator.simple_layers:
                # self.genome.output_genes = [Deconv2d(output_size[0], activation_type="Tanh")]
                self.genome.output_genes = [
                    Linear(int(np.prod(output_size)),
                           activation_type="Tanh",
                           normalize=False)
                ]
            else:
                self.genome.output_genes = [
                    Deconv2d(output_size[0],
                             activation_type="Tanh",
                             normalize=False)
                ]
コード例 #6
0
ファイル: generator.py プロジェクト: vfcosta/coegan-trained
    def __init__(self,
                 output_size=(1, 28, 28),
                 genome=None,
                 input_shape=(1, config.gan.latent_dim),
                 optimizer_conf=None):
        super().__init__(output_size=output_size,
                         genome=genome,
                         input_shape=input_shape)
        self.noise_size = int(np.prod(self.input_shape[1:]))
        self.inception_score_mean = 0
        self.fid_score = None
        self.rmse_score = None
        self.optimizer_conf = optimizer_conf or config.gan.generator.optimizer
        deconv2d_class = Deconv2dUpsample if config.layer.deconv2d.use_upsample else Deconv2d

        if genome is None:
            if config.gan.generator.fixed:
                self.genome = Genome(random=False,
                                     add_layer_prob=0,
                                     rm_layer_prob=0,
                                     gene_mutation_prob=0,
                                     mutate_gan_type_prob=0,
                                     linear_at_end=False)
                self.genome.add(
                    deconv2d_class(128,
                                   stride=1,
                                   activation_type="LeakyReLU",
                                   activation_params={"negative_slope": 0.2}))
                self.genome.add(
                    deconv2d_class(64,
                                   stride=1,
                                   activation_type="LeakyReLU",
                                   activation_params={"negative_slope": 0.2}))
            else:
                self.genome = Genome(
                    random=not config.evolution.sequential_layers,
                    linear_at_end=False)
                self.genome.possible_genes = [
                    (getattr(evolution, l), {})
                    for l in config.gan.generator.possible_layers
                ]
            self.genome.input_genes = [
                Linear(8 * int(np.prod(output_size)),
                       activation_type=None,
                       normalize=False)
            ]
            deconv_out = Deconv2d if config.gan.generator.fixed else Deconv2dUpsample
            self.genome.output_genes = [
                deconv_out(output_size[0],
                           size=output_size[-2:],
                           activation_type="Tanh",
                           normalize=False)
            ]
コード例 #7
0
 def test_crossover_phenotype(self):
     # create and train the first genotype
     x = Variable(torch.randn(5, 32 * 32)).view(5, 1, 32, 32)
     self.genome.crossover_rate = 1
     self.genome.add(Conv2d(16))
     self.genome.add(Linear(32))
     self.genome.output_genes.append(Linear(activation_type='Sigmoid'))
     self.phenotype.create_model(x)
     self.train_step(self.phenotype, x)
     # create the mate genotype
     mate = Genome()
     mate.add(Conv2d(8))
     mate.add(Linear(16))
     phenotype_mate = Phenotype(1, mate)
     mate.output_genes.append(Linear(activation_type='Sigmoid'))
     phenotype_mate.create_model(x)
     self.train_step(phenotype_mate, x)
     # breed with crossover
     child = self.phenotype.breed(skip_mutation=True, mate=phenotype_mate)
     # verify if the weights was copied
     self.assertTrue(mate.genes[0].module.weight.equal(
         child.genome.genes[0].module.weight))
     old_state = phenotype_mate.optimizer.state[
         phenotype_mate.optimizer.param_groups[0]['params'][0]]
     new_state = child.optimizer.state[child.optimizer.param_groups[0]
                                       ['params'][0]]
     self.assertTrue(old_state['exp_avg'].equal(new_state['exp_avg']))
コード例 #8
0
ファイル: discriminator.py プロジェクト: vfcosta/qd-coegan
    def __init__(self, output_size=1, genome=None, input_shape=None, optimizer_conf=config.gan.discriminator.optimizer):
        super().__init__(output_size=output_size, genome=genome, input_shape=input_shape)
        self.output_size = output_size
        self.optimizer_conf = optimizer_conf

        if genome is None:
            if config.gan.discriminator.fixed:
                self.genome = Genome(random=False, add_layer_prob=0, rm_layer_prob=0, gene_mutation_prob=0,
                                     simple_layers=config.gan.discriminator.simple_layers)
                if not config.gan.discriminator.simple_layers:
                    self.genome.add(Conv2d(64, stride=2, activation_type="LeakyReLU", activation_params={"negative_slope": 0.2}))
                    self.genome.add(Conv2d(128, stride=2, activation_type="LeakyReLU", activation_params={"negative_slope": 0.2}))
                    self.genome.add(Conv2d(256, stride=2, activation_type="LeakyReLU", activation_params={"negative_slope": 0.2}))
            else:
                self.genome = Genome(random=not config.evolution.sequential_layers)
                self.genome.possible_genes = [(getattr(evolution, l), {}) for l in config.gan.discriminator.possible_layers]
                # self.genome.add_random_gene()
                self.genome.add(Conv2d())

            if config.gan.type == "gan":
                self.genome.output_genes = [Linear(1, activation_type="Sigmoid", normalize=False)]
            else:
                self.genome.output_genes = [Linear(1, activation_type=None, normalize=False)]
コード例 #9
0
 def test_different_genome_setup(self):
     self.genome.add(Conv2d(32, activation_type="LeakyReLU"))
     self.genome.add(Conv2d(32))
     self.genome.add(Linear(16))
     other = Genome()
     other.add(Conv2d(32, activation_type="ReLU"))
     other.add(Conv2d(64))
     other.add(Linear(16))
     x = Variable(torch.randn(5, 32*32)).view(5, 1, 32, 32)
     self.phenotype.create_model(x)
     other_phenotype = Phenotype(1, other)
     other_phenotype.optimizer_conf = self.phenotype.optimizer_conf
     other_phenotype.create_model(x)
     self.assertEqual(0.4, self.genome.distance(other, mode="num_genes"))
コード例 #10
0
 def test_crossover(self):
     # create and train the first genotype
     x = Variable(torch.randn(5, 32*32)).view(5, 1, 32, 32)
     self.genome.crossover_rate = 1
     self.genome.add(Conv2d(16))
     self.genome.add(Linear(32))
     self.genome.add(Linear(16))
     self.genome.output_genes.append(Linear(activation_type='Sigmoid'))
     # create the mate genotype
     mate = Genome()
     mate.add(Conv2d(4))
     mate.add(Conv2d(8))
     mate.add(Linear(activation_type="ReLU"))
     mate.add(Linear(16))
     # apply crossover
     self.genome.crossover(mate)
     # check if layers correspond to expected
     self.assertEqual([Conv2d, Conv2d, Linear, Linear], [gene.__class__ for gene in self.genome.genes])
     # evaluate the created model
     self.evaluate_model([5, 1, 32, 32])
     self.phenotype.create_model(x)
     self.train_step(self.phenotype, x)
コード例 #11
0
class TestEvolution(unittest.TestCase):

    def setUp(self):
        self.genome = Genome()
        self.phenotype = Phenotype(1, self.genome)
        config.gan.generator.optimizer.copy_optimizer_state = True
        config.gan.generator.optimizer.type = "Adam"
        config.gan.discriminator.optimizer = config.gan.generator.optimizer
        config.evolution.sequential_layers = True
        self.phenotype.optimizer_conf = config.gan.generator.optimizer

    def evaluate_model(self, input_shape):
        x = Variable(torch.randn(input_shape[0], int(np.prod(input_shape[1:]))))  # create some input data
        x = x.view(input_shape)  # convert to 4d (batch_size, channels, w, h)
        model = self.phenotype.transform_genotype(x)
        return model(x)

    def train_step(self, phenotype, x):
        out = phenotype.model(x)
        if out.view(-1).size(0) == out.size(0):
            error = phenotype.criterion(out.view(-1), Variable(torch.ones(out.size(0))))
            error.backward()
        else:
            print("out sizes don't match", out.view(-1).size(0), out.size(0))
        phenotype.optimizer.step()

    def test_valid_genotype(self):
        for i in range(3):
            self.genome.add(Linear(5))
        x = Variable(torch.randn(5, 64))  # create some input data
        model = self.phenotype.transform_genotype(x)
        self.assertEqual(3, len(model))

    def test_valid_phenotype(self):
        self.genome.add(Linear(32))
        self.genome.add(Linear(16))
        self.genome.add(Linear(1))
        out = self.evaluate_model([5, 64])
        self.assertEqual((5, 1), out.shape)

    def test_valid_phenotype_activation(self):
        self.genome.add(Linear(32))
        self.genome.add(Linear())
        self.genome.add(Linear(16))
        x = Variable(torch.randn(5, 64))  # create some input data
        self.phenotype.output_size = 16
        model = self.phenotype.transform_genotype(x)
        out = model(x)  # run pytorch module to check if everything is ok
        self.assertEqual((5, 16), out.shape)

    def test_adjust_last_linear(self):
        self.genome.linear_at_end = False
        self.genome.add(Linear(16))
        x = Variable(torch.randn(5, 64))  # create some input data
        model = self.phenotype.transform_genotype(x)
        out = model(x)  # run pytorch module to check if everything is ok
        self.assertEqual((5, 1), out.shape)
        self.genome.add(Linear(4))
        self.phenotype.transform_genotype(x)
        self.assertEqual(16, self.genome.genes[0].out_features)

    def test_valid_phenotype_activation_leakyrelu(self):
        self.genome.add(Linear(32))
        self.genome.add(Linear(activation_type="LeakyReLU", activation_params={"negative_slope": 0.2, "inplace": True}))
        self.genome.add(Linear(16))
        x = Variable(torch.randn(5, 64))  # create some input data
        self.phenotype.output_size = 16
        model = self.phenotype.transform_genotype(x)
        out = model(x)  # run pytorch module to check if everything is ok
        self.assertEqual((5, 16), out.shape)

    def test_add_random_gene(self):
        for i in range(10):
            self.genome.add_random_gene()
        self.genome.add(Linear(1))
        x = Variable(torch.randn(5, 1*32*32))  # create some input data
        x = x.view(5, 1, 32, 32)
        model = self.phenotype.transform_genotype(x)
        out = model(x)  # run pytorch module to check if everything is ok
        self.assertEqual((5, 1), out.shape)

    def test_reuse_weights(self):
        x = Variable(torch.randn(5, 64))
        self.genome.add(Conv2d(2))
        self.genome.add(Linear(16))
        x = x.view(5, 1, 8, 8)
        self.phenotype.create_model(x)
        self.genome.rm_layer_prob = 0
        self.genome.add_layer_prob = 0
        self.genome.gene_mutation_prob = 0
        phenotype2 = self.phenotype.breed()
        self.assertTrue(self.genome.genes[0].module.weight.equal(phenotype2.genome.genes[0].module.weight))
        self.assertTrue(self.genome.genes[1].module.weight.equal(phenotype2.genome.genes[1].module.weight))

    def test_breed_phenotype_with_new_layer(self):
        x = Variable(torch.randn(5, 64)).view(5, 1, 8, 8)
        self.genome.add(Conv2d(2))
        self.genome.add(Linear(16))
        self.genome.output_genes.append(Linear(activation_type='Sigmoid'))
        self.phenotype.create_model(x)
        self.train_step(self.phenotype, x)
        self.genome.add_layer_prob = 1
        self.genome.rm_layer_prob = 0
        self.genome.gene_mutation_prob = 0
        phenotype2 = self.phenotype.breed()
        old_state = self.phenotype.optimizer.state[self.phenotype.optimizer.param_groups[0]['params'][0]]
        new_state = phenotype2.optimizer.state[phenotype2.optimizer.param_groups[0]['params'][0]]
        self.assertTrue(old_state['exp_avg'].equal(new_state['exp_avg']))
        self.assertIsNot(old_state['exp_avg'], new_state['exp_avg'])
        self.train_step(phenotype2, x)

    def test_not_copy_optimizer_new_module(self):
        x = Variable(torch.randn(5, 64)).view(5, 1, 8, 8)
        self.genome.add(Conv2d(2))
        self.genome.add(Linear(16))
        self.genome.output_genes.append(Linear(activation_type='Sigmoid'))
        self.phenotype.create_model(x)
        self.train_step(self.phenotype, x)
        self.genome.add_layer_prob = 0
        conv2d_module = self.genome.genes[0].module
        self.genome.genes[0].module = None
        phenotype2 = self.phenotype.breed()
        old_state = self.phenotype.optimizer.state[self.phenotype.optimizer.param_groups[0]['params'][0]]
        new_state = phenotype2.optimizer.state[phenotype2.optimizer.param_groups[0]['params'][0]]
        self.assertEqual(0, phenotype2.genome.genes[0].used)
        self.assertFalse(conv2d_module.weight.equal(phenotype2.genome.genes[0].module.weight))
        self.assertIn('exp_avg', old_state)
        self.assertNotIn('exp_avg', new_state)

    def test_not_share_reused_weights(self):
        x = Variable(torch.randn(5, 64)).view(5, 1, 8, 8)
        self.genome.add(Conv2d(2))
        self.genome.add(Linear(16))
        self.genome.output_genes.append(Linear(activation_type='Sigmoid'))
        self.phenotype.create_model(x)
        self.train_step(self.phenotype, x)
        self.genome.add_layer_prob = 0
        self.genome.gene_mutation_prob = 0
        phenotype2 = self.phenotype.breed(skip_mutation=True)
        self.train_step(phenotype2, x)
        self.assertIsNot(self.genome.genes[0].module, phenotype2.genome.genes[0].module)
        self.assertFalse(self.genome.genes[0].module.weight.equal(phenotype2.genome.genes[0].module.weight))
        self.assertFalse(self.genome.genes[1].module.weight.equal(phenotype2.genome.genes[1].module.weight))

    def test_reset_module_when_changed(self):
        x = Variable(torch.randn(5, 64)).view(5, 1, 8, 8)
        self.genome.add(Conv2d(2))
        self.genome.add(Linear(16))
        self.phenotype.transform_genotype(x)
        genome = copy.deepcopy(self.genome)
        genome.genes.insert(0, Conv2d(3))
        genome.add(Linear(32))
        self.phenotype.genome = genome
        self.phenotype.transform_genotype(x)
        self.assertFalse(self.genome.genes[0].module.weight.equal(genome.genes[0].module.weight))
        self.assertFalse(self.genome.genes[1].module.weight.equal(genome.genes[2].module.weight))

    def test_conv2d_phenotype(self):
        self.genome.add(Conv2d(3))
        self.genome.add(Conv2d(6))
        self.genome.add(Linear(1))
        x = Variable(torch.randn(5, 144))  # create some input data
        x = x.view(5, 1, 12, 12)  # convert to 4d (batch_size, channels, w, h)
        model = self.phenotype.transform_genotype(x)
        out = model(x)  # run pytorch module to check if everything is ok
        self.assertEqual((5, 1), out.shape)

    def test_linear_after_conv2d(self):
        self.genome.add(Conv2d(1))
        self.genome.add(Linear(32))
        self.genome.add(Conv2d(3))
        self.assertEqual([Conv2d, Conv2d, Linear], [gene.__class__ for gene in self.genome.genes])
        self.evaluate_model([5, 1, 12, 12])

    def test_first_linear_after_conv2d(self):
        self.genome.add(Linear(32))
        self.genome.add(Conv2d(3))
        self.assertEqual([Conv2d, Linear], [gene.__class__ for gene in self.genome.genes])
        self.evaluate_model([5, 3, 5, 5])

    def test_complex_graph(self):
        self.genome.add(Linear(32))
        self.genome.add(Linear(activation_type="ReLU"))
        self.genome.add(Conv2d(3))
        self.genome.add(Dropout())
        self.assertEqual([Conv2d, Linear, Linear, Dropout], [gene.__class__ for gene in self.genome.genes])
        self.evaluate_model([5, 3, 5, 5])

    def test_complex_graph2(self):
        self.genome.add(Conv2d(1))
        self.genome.add(Linear(128))
        self.evaluate_model([5, 1, 28, 28])

    def test_zero_output(self):
        self.genome.add(Conv2d(1, kernel_size=3))
        self.genome.add(Conv2d(6, kernel_size=3))
        self.genome.add(Conv2d(8, kernel_size=3))
        self.genome.add(Conv2d(5, kernel_size=3))
        self.evaluate_model([5, 1, 5, 5])

    def test_2d_after_linear(self):
        self.phenotype.output_size = (1, 32, 32)
        self.genome.linear_at_end = False
        self.genome.add(Linear(32*32))
        self.genome.add(Deconv2d(1))
        self.genome.add(Linear(32*32*3))
        self.genome.add(Deconv2d(4))
        self.assertEqual([Linear, Linear, Deconv2d, Deconv2d], [gene.__class__ for gene in self.genome.genes])
        self.evaluate_model([8, 1, 32, 32])
        x = Variable(torch.randn(8, 32*32))
        self.phenotype.create_model(x)
        self.train_step(self.phenotype, x)

    def test_linear_at_end_true(self):
        self.genome.linear_at_end = True
        self.genome.random = False
        self.genome.add(Conv2d(1))
        self.genome.add(Conv2d(3))
        self.genome.add(Conv2d(6))
        self.genome.add(Linear(activation_type="Sigmoid"))
        self.assertEqual([Conv2d, Conv2d, Conv2d, Linear], [gene.__class__ for gene in self.genome.genes])
        self.assertEqual([1, 3, 6], [self.genome.genes[i].out_channels for i in range(3)])

    def test_linear_at_end_false(self):
        self.genome.linear_at_end = False
        self.genome.random = False
        self.genome.add(Conv2d(1))
        self.genome.add(Conv2d(3))
        self.genome.add(Linear(activation_type="Sigmoid"))
        self.genome.add(Conv2d(6))
        self.assertEqual([Linear, Conv2d, Conv2d, Conv2d], [gene.__class__ for gene in self.genome.genes])
        self.assertEqual([1, 3, 6], [self.genome.genes[i].out_channels for i in range(1, 4)])

    def test_linear_at_end(self):
        self.genome.linear_at_end = False
        self.genome.add(Linear(10))
        self.genome.add(Linear(activation_type="Sigmoid"))
        self.assertEqual([Linear, Linear], [gene.__class__ for gene in self.genome.genes])
        self.evaluate_model([5, 1, 12, 12])

    def test_random_genome(self):
        self.genome.random = True
        self.genome.add(Linear(16))
        self.genome.add(Linear(32))
        self.genome.add(Linear(1), force_sequence=True)
        self.genome.add(Conv2d(3, kernel_size=3))
        self.genome.add(Conv2d(6, kernel_size=3))
        self.genome.add(Conv2d(9, kernel_size=3), force_sequence=True)
        self.assertEqual([Conv2d, Conv2d, Conv2d, Linear, Linear, Linear], [gene.__class__ for gene in self.genome.genes])
        self.assertSetEqual(set([3, 6, 9]), set([self.genome.genes[i].out_channels for i in range(3)]))
        self.assertSetEqual(set([16, 32, 1]), set([self.genome.genes[i].out_features for i in range(3, 6)]))
        self.evaluate_model([5, 3, 5, 5])

    def test_limit_layers(self):
        self.genome.max_layers = 2
        self.genome.add_layer_prob = 1
        self.genome.rm_layer_prob = 0
        for i in range(3):
            self.genome.mutate()
        self.assertEqual(self.genome.max_layers, len(self.genome.genes))

    def test_equal_genome_distance(self):
        self.genome.add(Linear(32))
        self.genome.add(Linear(16))
        other = copy.deepcopy(self.genome)
        self.assertEqual(0, self.genome.distance(other))

    def test_different_genome_distance(self):
        self.genome.add(Linear(32))
        self.genome.add(Linear(16))
        other = copy.deepcopy(self.genome)
        other.add(Linear(8))
        other.add(Linear(1))
        self.assertEqual(2, self.genome.distance(other, mode="num_genes"))

    def test_different_genome_setup(self):
        self.genome.add(Conv2d(32, activation_type="LeakyReLU"))
        self.genome.add(Conv2d(32))
        self.genome.add(Linear(16))
        other = Genome()
        other.add(Conv2d(32, activation_type="ReLU"))
        other.add(Conv2d(64))
        other.add(Linear(16))
        x = Variable(torch.randn(5, 32*32)).view(5, 1, 32, 32)
        self.phenotype.create_model(x)
        other_phenotype = Phenotype(1, other)
        other_phenotype.optimizer_conf = self.phenotype.optimizer_conf
        other_phenotype.create_model(x)
        self.assertEqual(0.4, self.genome.distance(other, mode="num_genes"))

    def test_equal_genome_distance_after_breed(self):
        self.genome.add(Linear(32))
        self.genome.add(Linear(16))
        self.genome.output_genes.append(Linear(activation_type='Sigmoid'))
        x = Variable(torch.randn(5, 64))
        self.phenotype.create_model(x)
        self.train_step(self.phenotype, x)
        phenotype2 = self.phenotype.breed(skip_mutation=True)
        self.assertEqual(0, self.genome.distance(phenotype2.genome))

    def test_crossover(self):
        # create and train the first genotype
        x = Variable(torch.randn(5, 32*32)).view(5, 1, 32, 32)
        self.genome.crossover_rate = 1
        self.genome.add(Conv2d(16))
        self.genome.add(Linear(32))
        self.genome.add(Linear(16))
        self.genome.output_genes.append(Linear(activation_type='Sigmoid'))
        # create the mate genotype
        mate = Genome()
        mate.add(Conv2d(4))
        mate.add(Conv2d(8))
        mate.add(Linear(activation_type="ReLU"))
        mate.add(Linear(16))
        # apply crossover
        self.genome.crossover(mate)
        # check if layers correspond to expected
        self.assertEqual([Conv2d, Conv2d, Linear, Linear], [gene.__class__ for gene in self.genome.genes])
        # evaluate the created model
        self.evaluate_model([5, 1, 32, 32])
        self.phenotype.create_model(x)
        self.train_step(self.phenotype, x)

    def test_crossover_phenotype(self):
        # create and train the first genotype
        x = Variable(torch.randn(5, 32*32)).view(5, 1, 32, 32)
        self.genome.crossover_rate = 1
        self.genome.add(Conv2d(16))
        self.genome.add(Linear(32))
        self.genome.output_genes.append(Linear(activation_type='Sigmoid'))
        self.phenotype.create_model(x)
        self.train_step(self.phenotype, x)
        # create the mate genotype
        mate = Genome()
        mate.add(Conv2d(8))
        mate.add(Linear(16))
        phenotype_mate = Phenotype(1, mate, optimizer_conf=self.phenotype.optimizer_conf)
        mate.output_genes.append(Linear(activation_type='Sigmoid'))
        phenotype_mate.create_model(x)
        self.train_step(phenotype_mate, x)
        # breed with crossover
        child = self.phenotype.breed(skip_mutation=True, mate=phenotype_mate)
        # verify if the weights was copied
        self.assertTrue(mate.genes[0].module.weight.equal(child.genome.genes[0].module.weight))
        old_state = phenotype_mate.optimizer.state[phenotype_mate.optimizer.param_groups[0]['params'][0]]
        new_state = child.optimizer.state[child.optimizer.param_groups[0]['params'][0]]
        self.assertTrue(old_state['exp_avg'].equal(new_state['exp_avg']))

    def test_crossover_empty(self):
        # create and train the first genotype
        x = Variable(torch.randn(5, 32*32)).view(5, 1, 32, 32)
        self.genome.add(Conv2d(8))
        self.genome.output_genes.append(Linear(activation_type='Sigmoid'))
        # create the mate genotype
        mate = Genome(crossover_rate=1)
        mate.add(Linear(16))
        # apply crossover
        mate.crossover(self.genome)
        # check if layers correspond to expected
        self.assertEqual([Conv2d, Linear], [gene.__class__ for gene in mate.genes])
        # evaluate the created model
        self.evaluate_model([5, 1, 32, 32])
        self.phenotype.create_model(x)
        self.train_step(self.phenotype, x)

    def test_convert_phenotype_to_json(self):
        self.genome.add(Conv2d(4, activation_type="ReLU"))
        self.genome.add(Linear(32))
        self.genome.add(Linear(16))
        self.evaluate_model([5, 1, 8, 8])
        layers = json.loads(self.phenotype.to_json())
        self.assertEqual("ReLU", layers[0]["activation_type"])
        self.assertEqual(3, len(layers))
        self.assertEqual(["Conv2d", "Linear", "Linear"], [l["type"] for l in layers])

    def test_invalid_graph(self):
        self.phenotype.output_size = (1, 28, 28)
        self.genome.linear_at_end = False
        self.genome.add(Linear(1568))
        self.genome.add(Deconv2d(32))
        self.genome.add(Deconv2d(32))
        self.genome.add(Deconv2d(32))
        self.genome.add(Deconv2d(32))
        self.genome.add(Deconv2d(32))
        self.genome.output_genes.append(Deconv2d(1))
        x = Variable(torch.randn(5, 100)).view(5, 1, 10, 10)
        self.assertRaises(Exception, self.phenotype.create_model, x)

    def test_multiple_deconv2d(self):
        self.phenotype.output_size = (1, 28, 28)
        self.genome.linear_at_end = False
        self.genome.add(Linear(1568))
        self.genome.add(Deconv2d(32, kernel_size=3))
        self.genome.add(Deconv2d(32, kernel_size=3))
        self.genome.add(Deconv2d(32, kernel_size=3))
        self.genome.output_genes.append(Deconv2d(1))
        x = Variable(torch.randn(5, 100)).view(5, 1, 10, 10)
        self.phenotype.create_model(x)
        out = self.phenotype.model(x)
        self.assertEqual([28, 28], list(out.size()[2:]))

    def test_multiple_deconv2d_outchannels(self):
        self.phenotype.output_size = (1, 28, 28)
        self.genome.linear_at_end = False
        self.genome.add(Linear(576))
        self.genome.add(Deconv2d(64, kernel_size=3))
        self.genome.add(Deconv2d(32, kernel_size=3))
        self.genome.output_genes.append(Deconv2d(1))
        x = Variable(torch.randn(5, 100)).view(5, 1, 10, 10)
        self.phenotype.create_model(x)
        self.genome.add(Deconv2d())
        config.layer.conv2d.random_out_channels = False
        model = self.phenotype.transform_genotype(x)
        out = model(x)
        self.assertEqual(self.genome.genes[-2].out_channels//2, self.genome.genes[-1].out_channels)
        self.assertEqual([28, 28], list(out.size()[2:]))

    def test_simple_deconv2d(self):
        self.phenotype.output_size = (1, 28, 28)
        self.genome.linear_at_end = False
        self.genome.add(Deconv2d(32))
        self.genome.output_genes.append(Deconv2d(1))
        x = Variable(torch.randn(5, 100)).view(5, 1, 10, 10)
        self.phenotype.create_model(x)
        out = self.phenotype.model(x)
        self.assertEqual([28, 28], list(out.size()[2:]))

    def test_simple_deconv2d_32(self):
        self.phenotype.output_size = (3, 32, 32)
        self.genome.linear_at_end = False
        self.genome.add(Deconv2d(32))
        self.genome.output_genes.append(Deconv2d(3))
        x = Variable(torch.randn(5, 100)).view(5, 1, 10, 10)
        self.phenotype.create_model(x)
        out = self.phenotype.model(x)
        self.assertEqual([32, 32], list(out.size()[2:]))

    def test_simple_deconv2d_64(self):
        self.phenotype.output_size = (3, 64, 64)
        self.genome.linear_at_end = False
        self.genome.add(Deconv2d(32))
        self.genome.output_genes.append(Deconv2d(3))
        x = Variable(torch.randn(5, 100)).view(5, 1, 10, 10)
        self.phenotype.create_model(x)
        out = self.phenotype.model(x)
        self.assertEqual([64, 64], list(out.size()[2:]))

    def test_freeze_weight(self):
        config.gan.generator.optimizer.type = "RMSprop"
        self.genome.add(Conv2d(16))
        self.genome.add(Linear(32))
        self.genome.add(Linear(16))
        self.genome.output_genes.append(Linear(out_features=1, activation_type='Sigmoid'))
        x = Variable(torch.randn(5, 64)).view(5, 1, 8, 8)
        self.phenotype.create_model(x)
        self.train_step(self.phenotype, x)
        weight = self.genome.genes[0].module.weight.clone()
        self.train_step(self.phenotype, x)
        self.assertFalse(weight.equal(self.genome.genes[0].module.weight))
        config.evolution.freeze_when_change = True
        for gene in self.genome.genes:
            weight, bias = gene.module.weight.clone(), gene.module.bias.clone()
            gene.freeze()
            self.train_step(self.phenotype, x)
            self.assertTrue(weight.equal(gene.module.weight))
            self.assertTrue(bias.equal(gene.module.bias))
            gene.unfreeze()
            self.train_step(self.phenotype, x)
            self.assertFalse(weight.equal(gene.module.weight))
            self.assertFalse(bias.equal(gene.module.bias))

    def test_deconv_output_channels(self):
        self.phenotype.output_size = (1, 28, 28)
        self.genome.linear_at_end = False
        self.genome.add(Linear())
        self.genome.add(Deconv2d(32))
        self.genome.add(Deconv2d(16))
        self.genome.add(Deconv2d(8))
        self.genome.add(Deconv2d(4))
        x = Variable(torch.randn(1, 100))
        model = self.phenotype.transform_genotype(x)
        print(model)
        out = model(x)
        self.assertEqual([1, 1, 28, 28], list(out.size()))

    def test_add_deconv_not_random_out(self):
        config.layer.conv2d.random_out_channels = False
        self.phenotype.output_size = (1, 28, 28)
        self.genome.linear_at_end = False
        self.genome.add(Linear())
        self.genome.add(Deconv2d())
        x = Variable(torch.randn(8, 100))
        model = self.phenotype.transform_genotype(x)
        print(model)
        out = model(x)
        self.assertEqual([8, 1, 28, 28], list(out.size()))
        self.genome.add(Deconv2d())
        model = self.phenotype.transform_genotype(x)
        print(model)
        out = model(x)
        self.assertEqual([8, 1, 28, 28], list(out.size()))

    def test_breed_copy_skill_Rating(self):
        x = Variable(torch.randn(5, 64)).view(5, 1, 8, 8)
        self.genome.add(Conv2d(2))
        self.genome.add(Linear(16))
        self.genome.output_genes.append(Linear(activation_type='Sigmoid'))
        self.phenotype.create_model(x)
        self.phenotype.skill_rating.mu = 2000
        phenotype2 = self.phenotype.breed()
        self.assertEqual(self.phenotype.skill_rating.mu, phenotype2.skill_rating.mu)
コード例 #12
0
ファイル: generator.py プロジェクト: vfcosta/coegan-trained
class Generator(Phenotype):

    fid_noise = None
    real_labels = None
    fake_labels = None
    selected_loss = None
    noise_images = None

    def __init__(self,
                 output_size=(1, 28, 28),
                 genome=None,
                 input_shape=(1, config.gan.latent_dim),
                 optimizer_conf=None):
        super().__init__(output_size=output_size,
                         genome=genome,
                         input_shape=input_shape)
        self.noise_size = int(np.prod(self.input_shape[1:]))
        self.inception_score_mean = 0
        self.fid_score = None
        self.rmse_score = None
        self.optimizer_conf = optimizer_conf or config.gan.generator.optimizer
        deconv2d_class = Deconv2dUpsample if config.layer.deconv2d.use_upsample else Deconv2d

        if genome is None:
            if config.gan.generator.fixed:
                self.genome = Genome(random=False,
                                     add_layer_prob=0,
                                     rm_layer_prob=0,
                                     gene_mutation_prob=0,
                                     mutate_gan_type_prob=0,
                                     linear_at_end=False)
                self.genome.add(
                    deconv2d_class(128,
                                   stride=1,
                                   activation_type="LeakyReLU",
                                   activation_params={"negative_slope": 0.2}))
                self.genome.add(
                    deconv2d_class(64,
                                   stride=1,
                                   activation_type="LeakyReLU",
                                   activation_params={"negative_slope": 0.2}))
            else:
                self.genome = Genome(
                    random=not config.evolution.sequential_layers,
                    linear_at_end=False)
                self.genome.possible_genes = [
                    (getattr(evolution, l), {})
                    for l in config.gan.generator.possible_layers
                ]
            self.genome.input_genes = [
                Linear(8 * int(np.prod(output_size)),
                       activation_type=None,
                       normalize=False)
            ]
            deconv_out = Deconv2d if config.gan.generator.fixed else Deconv2dUpsample
            self.genome.output_genes = [
                deconv_out(output_size[0],
                           size=output_size[-2:],
                           activation_type="Tanh",
                           normalize=False)
            ]

    def forward(self, x):
        out = super().forward(x)
        if out is not None and len(out.size()) == 2:
            out = out.view(out.size(0), *self.output_size)
        return out

    def train_step(self, D, images):
        self.inception_score_mean = 0
        batch_size = images.size(0)
        # 2. Train G on D's response (but DO NOT train D on these labels)
        self.zero_grad()

        if self.real_labels is None:
            self.real_labels = tools.cuda(Tensor(torch.ones(batch_size)))
            self.real_labels = self.real_labels * 0.9 if config.gan.label_smoothing else self.real_labels

        if self.fake_labels is None:
            self.fake_labels = tools.cuda(Tensor(torch.zeros(images.size(0))))
            self.fake_labels = self.fake_labels + 0.1 if config.gan.label_smoothing else self.fake_labels

        error, decision = self.loss(D, images)
        error.backward()
        self.optimizer.step()  # Only optimizes G's parameters
        self.calc_metrics(D, error.item(), decision, images)
        return error.item()

    def loss(self, D, images, gen_input=None):
        if gen_input is None:
            gen_input = self.generate_noise(images.size(0))
        fake_data = self(gen_input)
        fake_decision = D(fake_data)
        loss_function = getattr(self, f"loss_{self.genome.gan_type}")
        error = loss_function(D, fake_decision, images)
        return error, fake_decision

    def loss_wgan(self, D, fake_decision, images):
        return -fake_decision.mean()

    def loss_wgan_gp(self, D, fake_decision, images):
        return self.loss_wgan(D, fake_decision, images)

    def loss_rsgan(self, D, fake_decision, images):
        real_decision = D(images)
        return binary_cross_entropy_with_logits(
            fake_decision.view(-1) - real_decision.view(-1), self.real_labels)

    def loss_rasgan(self, D, fake_decision, images):
        real_decision = D(images)
        error = (binary_cross_entropy_with_logits(
            real_decision.view(-1) - torch.mean(fake_decision.view(-1)),
            self.fake_labels) + binary_cross_entropy_with_logits(
                fake_decision.view(-1) - torch.mean(real_decision.view(-1)),
                self.real_labels)) / 2
        return error

    def loss_lsgan(self, D, fake_decision, images):
        return torch.mean((fake_decision - 1)**2)

    def loss_gan(self, D, fake_decision, images):
        return binary_cross_entropy_with_logits(fake_decision.view(-1),
                                                self.real_labels)

    def loss_hinge(self, D, fake_decision, images):
        return -fake_decision.mean()

    def eval_step(self, D, images):
        error, decision = self.loss(D, images)
        self.calc_metrics(D, error.item(), decision, images)
        return error.item()

    def calc_global_metrics(self, best_discriminators, images):
        if Generator.noise_images is None:
            Generator.noise_images = tools.cuda(
                torch.FloatTensor(len(images),
                                  *images[0].shape).uniform_(-1, 1))
        D = tools.cuda(best_discriminators[0])
        labels = tools.cuda(Tensor(torch.ones(images.size(0))))
        fake_labels = tools.cuda(Tensor(torch.zeros(images.size(0))))
        if config.evolution.fitness.generator.startswith("validation_loss_"):
            loss_function = getattr(self,
                                    config.evolution.fitness.generator[11:])
            fake_data = self(self.generate_noise(images.size(0)))
            fake_decision = D(fake_data)
            error = loss_function(D, fake_decision, images).item()
            self.fitness_values = [error]
        elif config.evolution.fitness.generator == "rel_avg":
            with torch.no_grad():
                real_decision = D(images)
                fake_data = self(self.generate_noise(images.size(0)))
                fake_decision = D(fake_data)

                train_score = torch.mean(torch.sigmoid(real_decision))
                gen_score = torch.mean(torch.sigmoid(fake_decision))
                noise_score = torch.mean(
                    torch.sigmoid(D(Generator.noise_images)))
                d_conf = (1 + train_score - noise_score) / 2
                value = -d_conf * gen_score
                self.fitness_values = [value.item()]
        elif config.evolution.fitness.generator == "rel_avg2":
            with torch.no_grad():
                real_decision = D(images)
                fake_data = self(self.generate_noise(images.size(0)))
                fake_decision = D(fake_data)
                noise_decision = D(Generator.noise_images)
                error = (binary_cross_entropy_with_logits(
                    real_decision.view(-1) -
                    torch.mean(fake_decision.view(-1)), fake_labels) +
                         binary_cross_entropy_with_logits(
                             fake_decision.view(-1) -
                             torch.mean(real_decision.view(-1)), labels) +
                         binary_cross_entropy_with_logits(
                             noise_decision.view(-1) -
                             torch.mean(real_decision.view(-1)), labels)) / 3
                self.fitness_values = [error.item()]
        elif config.evolution.fitness.generator == "rel_avg3":
            with torch.no_grad():
                real_decision = D(images)
                fake_data = self(self.generate_noise(images.size(0)))
                fake_decision = D(fake_data)
                noise_decision = D(Generator.noise_images)
                mean_noise = torch.mean(noise_decision)
                error = (binary_cross_entropy_with_logits(
                    real_decision.view(-1) -
                    (torch.mean(fake_decision.view(-1)) + mean_noise) / 2,
                    fake_labels) + binary_cross_entropy_with_logits(
                        fake_decision.view(-1) -
                        (torch.mean(real_decision.view(-1)) + mean_noise) / 2,
                        labels)) / 2
                self.fitness_values = [error.item()]
        D.cpu()

    def calc_metrics(self, D, error, fake_decision, images):
        if config.evolution.fitness.discriminator == "rel_avg":
            pass
            # real_decision = D(images)
            # with torch.no_grad():
            #     c, w, h = D.input_shape[-3], D.input_shape[-2], D.input_shape[-1]
            #     if Generator.noise_images is None:
            #         Generator.noise_images = tools.cuda(torch.FloatTensor(real_decision.shape[0], c, w, h).uniform_(-1, 1))
            #     noise_score = torch.mean(torch.sigmoid(D(Generator.noise_images)))
            #     train_score = torch.mean(torch.sigmoid(real_decision))
            #     gen_score = torch.mean(torch.sigmoid(fake_decision))
            #     value = -train_score * gen_score * (1-noise_score)
            #     self.fitness_values.append(value.item())
        elif config.evolution.fitness.generator == "loss":
            self.fitness_values.append(error)
        elif config.evolution.fitness.generator.startswith("loss_"):
            loss_function = getattr(self, config.evolution.fitness.generator)
            error = loss_function(D, fake_decision, images).item()
            self.fitness_values.append(error)
        elif config.evolution.fitness.generator == "AUC":
            self.fitness_values.append(1 - accuracy_score(
                np.ones(fake_decision.size(0)),
                torch.sigmoid(fake_decision).detach().cpu() > 0.5))
        elif config.evolution.fitness.generator == "BCE":
            self.fitness_values.append(
                binary_cross_entropy_with_logits(fake_decision.squeeze(),
                                                 self.real_labels).item())
        elif config.evolution.fitness.generator == "random_loss":
            if Generator.selected_loss is None:
                Generator.selected_loss = np.random.choice(
                    config.gan.possible_gan_types)
                logger.info(
                    f"using random loss function as fitness: {Generator.selected_loss}"
                )
            loss_function = getattr(self, f"loss_{Generator.selected_loss}")
            self.fitness_values.append(
                loss_function(D, fake_decision, images).item())

    def calc_win_rate(self, fake_decision):
        if self.skill_rating_enabled():
            self.win_rate += sum(
                (fake_decision >= 0.5).float()).item() / len(fake_decision)

    def generate_noise(self, batch_size, volatile=False, cuda=True):
        with torch.set_grad_enabled(not volatile):
            gen_input = tools.cuda(torch.randn([batch_size] +
                                               list(self.input_shape[1:]),
                                               requires_grad=True),
                                   condition=cuda)
        return gen_input

    def inception_score(self, batch_size=10, splits=10):
        """Computes the inception score of the generated images
        n -- amount of generated images
        batch_size -- batch size for feeding into Inception v3
        splits -- number of splits
        """
        generated_images = self(self.generate_common_noise()).detach()
        self.inception_score_mean, _ = inception_score(generated_images,
                                                       batch_size=batch_size,
                                                       resize=True,
                                                       splits=splits)
        return self.inception_score_mean

    def calc_rmse_score(self):
        generated_images = self(self.generate_common_noise()).detach()
        self.rmse_score = rmse_score.rmse(generated_images)

    def generate_common_noise(self,
                              noise_path='generator_noise.pt',
                              size=None):
        """Generate a noise to be used as base for comparisons"""
        size = size or config.evolution.fitness.fid_sample_size
        if os.path.isfile(noise_path) and Generator.fid_noise is None:
            Generator.fid_noise = torch.load(noise_path)
            logger.info(
                f"generator noise loaded from file with shape {Generator.fid_noise.shape}"
            )
            if Generator.fid_noise.shape[0] != size:
                logger.info(
                    f"discard loaded generator noise because the sample size is different: {size}"
                )
                Generator.fid_noise = None
        if Generator.fid_noise is None:
            Generator.fid_noise = self.generate_noise(size).cpu()
            torch.save(Generator.fid_noise, noise_path)
            logger.info(
                f"generator noise saved to file with shape {Generator.fid_noise.shape}"
            )
        return Generator.fid_noise

    def finish_generation(self, **kwargs):
        if kwargs.get("calc_fid") and (config.evolution.fitness.generator
                                       == "FID"
                                       or config.stats.calc_fid_score):
            self.calc_fid()
        if config.evolution.fitness.generator == "FID":
            self.fitness_values.append(self.fid_score)
        elif config.evolution.fitness.generator == "skill_rating":
            self.fitness_values.append(
                -self.skill_rating.mu)  #+ 2*self.skill_rating.phi
        elif config.evolution.fitness.generator == "random":
            self.fitness_values = [np.random.rand()]

    def calc_fid(self):
        noise = self.generate_common_noise()
        self.fid_score = generative_score.fid_images(GeneratorDataset(
            self, noise=noise),
                                                     size=noise.shape[0])
コード例 #13
0
 def setUp(self):
     self.genome = Genome()
     self.phenotype = Phenotype(1, self.genome)
     config.optimizer.copy_optimizer_state = True
     config.optimizer.type = "Adam"
     config.evolution.sequential_layers = True
コード例 #14
0
ファイル: discriminator.py プロジェクト: vfcosta/coegan
class Discriminator(Phenotype):
    def __init__(self, output_size=1, genome=None, input_shape=None):
        super().__init__(output_size=output_size,
                         genome=genome,
                         input_shape=input_shape)
        self.output_size = output_size

        if genome is None:
            if config.gan.discriminator.fixed:
                self.genome = Genome(
                    random=False,
                    add_layer_prob=0,
                    rm_layer_prob=0,
                    gene_mutation_prob=0,
                    simple_layers=config.gan.discriminator.simple_layers)
                if not config.gan.discriminator.simple_layers:
                    self.genome.add(
                        Conv2d(8,
                               stride=2,
                               activation_type="LeakyReLU",
                               activation_params={"negative_slope": 0.2}))
                    self.genome.add(
                        Conv2d(16,
                               stride=2,
                               activation_type="LeakyReLU",
                               activation_params={"negative_slope": 0.2}))
                    self.genome.add(
                        Conv2d(32,
                               stride=2,
                               activation_type="LeakyReLU",
                               activation_params={"negative_slope": 0.2}))
                    self.genome.add(
                        Conv2d(64,
                               stride=2,
                               activation_type="LeakyReLU",
                               activation_params={"negative_slope": 0.2}))
                    self.genome.add(
                        Conv2d(128,
                               stride=2,
                               activation_type="LeakyReLU",
                               activation_params={"negative_slope": 0.2}))
                # self.genome.add(Linear(1024, activation_type="ELU"))
            else:
                self.genome = Genome(
                    random=not config.evolution.sequential_layers)
                self.genome.possible_genes = [
                    g for g in self.genome.possible_genes if g[0] != Deconv2d
                ]
                self.genome.add_random_gene()

            if config.gan.type == "gan":
                self.genome.output_genes = [
                    Linear(1, activation_type="Sigmoid", normalize=False)
                ]
            else:
                self.genome.output_genes = [
                    Linear(1, activation_type=None, normalize=False)
                ]

    def forward(self, x):
        out = super().forward(x)
        out = out.view(out.size(0), -1)
        return out

    def train_step(self, G, images):
        """Train the discriminator on real+fake"""
        self.zero_grad()

        #  1A: Train D on real
        real_error, real_decision = self.step_real(images)
        if config.gan.type == "wgan":
            real_error.backward(
                tensor_constants.ONE
            )  # compute/store gradients, but don't change params
        elif config.gan.type not in ["rsgan", "rasgan"]:
            real_error.backward(
            )  # compute/store gradients, but don't change params

        #  1B: Train D on fake
        fake_error, fake_data, fake_decision = self.step_fake(
            G, batch_size=images.size()[0])
        if config.gan.type == "wgan":
            fake_error.backward(tensor_constants.MONE)
        elif config.gan.type not in ["rsgan", "rasgan"]:
            fake_error.backward()

        if config.gan.type == "rsgan":
            labels = tools.cuda(Variable(torch.ones(images.size(0))))
            real_error = self.criterion(
                real_decision.view(-1) - fake_decision.view(-1), labels)
            real_error.backward()
            fake_error = tools.cuda(torch.FloatTensor([0]))
        elif config.gan.type == "rasgan":
            labels = tools.cuda(Variable(torch.ones(images.size(0))))
            labels_zeros = tools.cuda(Variable(torch.zeros(images.size(0))))
            real_error = (self.criterion(
                real_decision.view(-1) - torch.mean(fake_decision.view(-1)),
                labels) + self.criterion(
                    torch.mean(fake_decision.view(-1)) -
                    real_decision.view(-1), labels_zeros)) / 2
            real_error.backward()
            fake_error = tools.cuda(torch.FloatTensor([0]))

        if config.evolution.fitness.discriminator == "AUC":
            # full_decision = np.concatenate((real_decision.cpu().data.numpy().flatten(), fake_decision.cpu().data.numpy().flatten()))
            # full_labels = np.concatenate((np.ones(real_decision.size()[0]), np.zeros(fake_decision.size()[0])))

            # self.fitness_value -= roc_auc_score(full_labels, full_decision)
            # self.fitness_value -= average_precision_score(full_labels, full_decision)
            # self.fitness_value += 1 - accuracy_score(full_labels, full_decision>0.5)
            # self.fitness_value += np.random.rand()

            self.fitness_value += abs(
                accuracy_score(
                    np.zeros(fake_decision.size()[0]),
                    fake_decision.cpu().data.numpy().flatten() > 0.5) -
                accuracy_score(
                    np.ones(real_decision.size()[0]),
                    real_decision.cpu().data.numpy().flatten() > 0.5))

        if config.gan.discriminator.use_gradient_penalty:
            gradient_penalty = self.gradient_penalty(images.data,
                                                     fake_data.data)
            gradient_penalty.backward()

        self.optimizer.step(
        )  # Only optimizes D's parameters; changes based on stored gradients from backward()
        # Wasserstein distance
        if config.gan.type == "wgan":
            return (real_error - fake_error).item()
        return (real_error + fake_error).item()

    def step_real(self, images):
        real_decision = self(images)

        if config.gan.type in ["wgan", "rsgan", "rasgan"]:
            return real_decision.mean(), real_decision
        elif config.gan.type == "lsgan":
            return 0.5 * torch.mean((real_decision - 1)**2), real_decision

        labels = tools.cuda(Variable(torch.ones(images.size(0))))
        labels = labels * 0.9 if config.gan.label_smoothing else labels
        return self.criterion(real_decision.view(-1), labels), real_decision

    def step_fake(self, G, batch_size):
        gen_input = G.generate_noise(batch_size)
        fake_data = G(
            gen_input).detach()  # detach to avoid training G on these labels
        fake_decision = self(fake_data)

        if config.gan.type in ["wgan", "rsgan", "rasgan"]:
            return fake_decision.mean(), fake_data, fake_decision
        elif config.gan.type == "lsgan":
            return 0.5 * torch.mean(
                (fake_decision)**2), fake_data, fake_decision

        fake_labels = tools.cuda(Variable(torch.zeros(batch_size)))
        fake_labels = fake_labels + 0.1 if config.gan.label_smoothing else fake_labels
        return self.criterion(fake_decision.view(-1),
                              fake_labels), fake_data, fake_decision

    def eval_step(self, G, images):
        fake_error, _, _ = self.step_fake(G, images.size(0))
        real_error, _ = self.step_real(images)
        return real_error.item() + fake_error.item()

    def gradient_penalty(self, real_data, fake_data):
        batch_size = real_data.size()[0]
        alpha = torch.rand(batch_size, 1, 1, 1)
        alpha = tools.cuda(alpha.expand_as(real_data))

        interpolates = alpha * real_data + ((1 - alpha) * fake_data)
        interpolates = autograd.Variable(interpolates, requires_grad=True)

        disc_interpolates = tools.cuda(self(interpolates))
        gradients = autograd.grad(outputs=disc_interpolates,
                                  inputs=interpolates,
                                  grad_outputs=tools.cuda(
                                      torch.ones(disc_interpolates.size())),
                                  create_graph=True,
                                  retain_graph=True,
                                  only_inputs=True)[0]

        return ((gradients.norm(2, dim=1) - 1)**
                2).mean() * config.gan.discriminator.gradient_penalty_lambda

    def fitness(self):
        if config.evolution.fitness.discriminator == "AUC":
            return self.fitness_value
        return super().fitness()
コード例 #15
0
ファイル: generator.py プロジェクト: vfcosta/qd-coegan
class Generator(Phenotype):

    fid_noise = None
    real_labels = None

    def __init__(self,
                 output_size=(1, 28, 28),
                 genome=None,
                 input_shape=(1, 1, 10, 10),
                 optimizer_conf=config.gan.generator.optimizer):
        super().__init__(output_size=output_size,
                         genome=genome,
                         input_shape=input_shape)
        self.noise_size = int(np.prod(self.input_shape[1:]))
        self.inception_score_mean = 0
        self.fid_score = None
        self.rmse_score = None
        self.optimizer_conf = optimizer_conf

        if genome is None:
            if config.gan.generator.fixed:
                self.genome = Genome(
                    random=False,
                    add_layer_prob=0,
                    rm_layer_prob=0,
                    gene_mutation_prob=0,
                    simple_layers=config.gan.generator.simple_layers,
                    linear_at_end=False)
                self.genome.add(
                    Linear(4 * int(np.prod(output_size)),
                           activation_type="LeakyReLU",
                           activation_params={"negative_slope": 0.2}))
                if not config.gan.generator.simple_layers:
                    self.genome.add(
                        Deconv2d(128,
                                 activation_type="LeakyReLU",
                                 activation_params={"negative_slope": 0.2}))
                    self.genome.add(
                        Deconv2d(64,
                                 activation_type="LeakyReLU",
                                 activation_params={"negative_slope": 0.2}))
            else:
                self.genome = Genome(
                    random=not config.evolution.sequential_layers,
                    linear_at_end=False)
                self.genome.possible_genes = [
                    (getattr(evolution, l), {})
                    for l in config.gan.generator.possible_layers
                ]
                self.genome.add(Linear(4096))
            if config.gan.generator.simple_layers:
                # self.genome.output_genes = [Deconv2d(output_size[0], activation_type="Tanh")]
                self.genome.output_genes = [
                    Linear(int(np.prod(output_size)),
                           activation_type="Tanh",
                           normalize=False)
                ]
            else:
                self.genome.output_genes = [
                    Deconv2d(output_size[0],
                             activation_type="Tanh",
                             normalize=False)
                ]

    def forward(self, x):
        out = super().forward(x)
        if out is not None and len(out.size()) == 2:
            out = out.view(out.size(0), *self.output_size)
        return out

    def train_step(self, D, images):
        self.inception_score_mean = 0
        batch_size = images.size(0)
        # 2. Train G on D's response (but DO NOT train D on these labels)
        self.zero_grad()

        if self.real_labels is None:
            self.real_labels = tools.cuda(Tensor(torch.ones(batch_size)))
            self.real_labels = self.real_labels * 0.9 if config.gan.label_smoothing else self.real_labels

        error, decision = self.step(D, batch_size)
        if config.gan.type == "wgan":
            error.backward(tensor_constants.ONE)
        elif config.gan.type == "rsgan":
            real_decision = D(images)
            error = self.criterion(
                decision.view(-1) - real_decision.view(-1), self.real_labels)
            error.backward()
        elif config.gan.type == "rasgan":
            real_decision = D(images)
            labels_zeros = tools.cuda(Tensor(torch.zeros(images.size(0))))
            error = (self.criterion(
                real_decision.view(-1) - torch.mean(decision.view(-1)),
                labels_zeros) + self.criterion(
                    torch.mean(decision.view(-1)) - real_decision.view(-1),
                    self.real_labels)) / 2
            error.backward()
        else:
            error.backward()

        if config.evolution.fitness.generator == "AUC":
            labels = np.ones(images.size(0))
            self.fitness_value += 1 - accuracy_score(labels,
                                                     decision.cpu() > 0.5)

        self.optimizer.step()  # Only optimizes G's parameters

        if config.gan.type == "wgan":
            return error.item()
        return error.item()

    def step(self, D, batch_size, gen_input=None):
        if gen_input is None:
            gen_input = self.generate_noise(batch_size)

        fake_data = self(gen_input)
        fake_decision = D(fake_data)

        if config.gan.type in ["wgan", "rsgan", "rasgan"]:
            return fake_decision.mean(), fake_decision
        elif config.gan.type == "lsgan":
            return 0.5 * torch.mean((fake_decision - 1)**2), fake_decision

        return self.criterion(fake_decision.view(-1),
                              self.real_labels), fake_decision

    def eval_step(self, D, images):
        error, decision = self.step(D, images.size(0))
        self.calc_win_rate(decision)
        return error.item()

    def calc_win_rate(self, fake_decision):
        if self.skill_rating_enabled():
            self.win_rate += sum(
                (fake_decision >= 0.5).float()).item() / len(fake_decision)

    def generate_noise(self, batch_size, volatile=False):
        with torch.set_grad_enabled(not volatile):
            gen_input = tools.cuda(
                torch.randn([batch_size] + list(self.input_shape[1:]),
                            requires_grad=True))
        return gen_input

    def inception_score(self, batch_size=10, splits=10):
        """Computes the inception score of the generated images
        n -- amount of generated images
        batch_size -- batch size for feeding into Inception v3
        splits -- number of splits
        """
        generated_images = self(self.generate_common_noise()).detach()
        self.inception_score_mean, _ = inception_score(generated_images,
                                                       batch_size=batch_size,
                                                       resize=True,
                                                       splits=splits)
        return self.inception_score_mean

    def calc_rmse_score(self):
        generated_images = self(self.generate_common_noise()).detach()
        self.rmse_score = rmse_score.rmse(generated_images)

    def generate_common_noise(self, noise_path='generator_noise.pt'):
        """Generate a noise to be used as base for comparisons"""
        if os.path.isfile(noise_path) and Generator.fid_noise is None:
            Generator.fid_noise = torch.load(noise_path)
            logger.info(
                f"generator noise loaded from file with shape {Generator.fid_noise.shape}"
            )
            if Generator.fid_noise.shape[
                    0] != config.evolution.fitness.fid_sample_size:
                logger.info(
                    f"discard loaded generator noise because the sample size is different: {config.evolution.fitness.fid_sample_size}"
                )
                Generator.fid_noise = None
        if Generator.fid_noise is None:
            Generator.fid_noise = self.generate_noise(
                config.evolution.fitness.fid_sample_size).cpu()
            torch.save(Generator.fid_noise, noise_path)
            logger.info(
                f"generator noise saved to file with shape {Generator.fid_noise.shape}"
            )
        return Generator.fid_noise

    def calc_fid(self):
        self.fid_score = generative_score.fid(
            self, noise=self.generate_common_noise())

    def fitness(self):
        if config.evolution.fitness.generator == "FID":
            return self.fid_score
        if config.evolution.fitness.generator == "AUC":
            return self.fitness_value
        if config.evolution.fitness.generator == "skill_rating":
            return -self.skill_rating.mu  #+ 2*self.skill_rating.phi
        return super().fitness()
コード例 #16
0
class Discriminator(Phenotype):

    labels = None
    fake_labels = None
    selected_loss = None

    def __init__(self, output_size=1, genome=None, input_shape=None, optimizer_conf=None):
        super().__init__(output_size=output_size, genome=genome, input_shape=input_shape)
        self.output_size = output_size
        self.optimizer_conf = optimizer_conf or config.gan.discriminator.optimizer

        if genome is None:
            if config.gan.discriminator.fixed:
                self.genome = Genome(random=False, add_layer_prob=0, rm_layer_prob=0, gene_mutation_prob=0,
                                     mutate_gan_type_prob=0)
                self.genome.add(Conv2d(256, stride=2, activation_type="LeakyReLU", activation_params={"negative_slope": 0.2}))
                self.genome.add(Conv2d(128, stride=2, activation_type="LeakyReLU", activation_params={"negative_slope": 0.2}))
                self.genome.add(Conv2d(64, stride=2, activation_type="LeakyReLU", activation_params={"negative_slope": 0.2}))
                # self.genome.add(SelfAttention())
            else:
                self.genome = Genome(random=not config.evolution.sequential_layers)
                self.genome.possible_genes = [(getattr(evolution, l), {}) for l in config.gan.discriminator.possible_layers]

            if not config.gan.discriminator.fixed:
                self.genome.input_genes = [Conv2d(stride=1, normalize="spectral")]
            else:
                self.genome.input_genes = []
            if not config.gan.discriminator.fixed:
                self.genome.output_genes = [Linear(1, activation_type=None, normalize="spectral", bias=False)]
            else:
                self.genome.output_genes = [Linear(1, activation_type=None, normalize="none", bias=False)]

    def forward(self, x):
        out = super().forward(x)
        out = out.view(out.size(0), -1)
        return out

    def train_step(self, G, images):
        """Train the discriminator on real+fake"""
        self.zero_grad()

        if self.labels is None:
            self.labels = tools.cuda(Tensor(torch.ones(images.size(0))))
            self.labels = self.labels * 0.9 if config.gan.label_smoothing else self.labels

        if self.fake_labels is None:
            self.fake_labels = tools.cuda(Tensor(torch.zeros(images.size(0))))
            self.fake_labels = self.fake_labels + 0.1 if config.gan.label_smoothing else self.fake_labels

        error, real_decision, fake_decision, fake_data = self.loss(G, images)

        if self.use_gradient_penalty():
            gradient_penalty = self.gradient_penalty(images.data, fake_data.data)
            error += gradient_penalty

        error.backward()
        self.optimizer.step()

        # clip weights for WGAN
        if self.genome.gan_type == "wgan" and not self.use_gradient_penalty():
            clip_value = 0.01
            for p in self.parameters():
                p.data.clamp_(-clip_value, clip_value)

        self.calc_metrics(G, error.item(), real_decision, fake_decision)
        return error.item()

    def loss(self, G, images):
        real_decision = self(images)
        fake_data = G(G.generate_noise(images.size()[0])).detach()  # detach to avoid training G on these labels
        fake_decision = self(fake_data)
        loss_function = getattr(self, f"loss_{self.genome.gan_type}")
        error = loss_function(real_decision, fake_decision)
        return error, real_decision, fake_decision, fake_data

    def loss_wgan(self, real_decision, fake_decision):
        return -real_decision.mean() + fake_decision.mean()

    def loss_wgan_gp(self, real_decision, fake_decision):
        return self.loss_wgan(real_decision, fake_decision)

    def loss_rsgan(self, real_decision, fake_decision):
        return binary_cross_entropy_with_logits(real_decision.view(-1) - fake_decision.view(-1), self.labels)

    def loss_rasgan(self, real_decision, fake_decision):
        error = (binary_cross_entropy_with_logits(real_decision.view(-1) - torch.mean(fake_decision.view(-1)),
                                                  self.labels) +
                 binary_cross_entropy_with_logits(fake_decision.view(-1) - torch.mean(real_decision.view(-1)),
                                                  self.fake_labels)) / 2
        return error

    def loss_lsgan(self, real_decision, fake_decision):
        return (torch.mean((real_decision - 1) ** 2) + torch.mean(fake_decision ** 2)) / 2

    def loss_gan(self, real_decision, fake_decision):
        real_error = binary_cross_entropy_with_logits(real_decision.view(-1), self.labels)
        fake_error = binary_cross_entropy_with_logits(fake_decision.view(-1), self.fake_labels)
        return (real_error + fake_error) / 2

    def loss_hinge(self, real_decision, fake_decision):
        real_error = torch.mean(torch.nn.ReLU(inplace=True)(1 - real_decision))
        fake_error = torch.mean(torch.nn.ReLU(inplace=True)(1 + fake_decision))
        return real_error + fake_error

    def eval_step(self, G, images):
        error, real_decision, fake_decision, _ = self.loss(G, images)
        self.calc_metrics(G, error.item(), real_decision, fake_decision)
        return error.item()

    def use_gradient_penalty(self):
        return config.gan.discriminator.use_gradient_penalty or self.genome.gan_type == "wgan_gp"

    def calc_global_metrics(self, best_generators, images):
        if Generator.noise_images is None:
            Generator.noise_images = tools.cuda(torch.FloatTensor(len(images), *images[0].shape).uniform_(-1, 1))
        G = tools.cuda(best_generators[0])
        labels = tools.cuda(Tensor(torch.ones(images.size(0))))
        fake_labels = tools.cuda(Tensor(torch.zeros(images.size(0))))
        if config.evolution.fitness.discriminator.startswith("validation_loss_"):
            fake_data = G(G.generate_noise(images.size(0)))
            loss_function = getattr(self, config.evolution.fitness.discriminator[11:])
            error = loss_function(self(images), self(fake_data)).item()
            self.fitness_values = [error]
        elif config.evolution.fitness.discriminator == "rel_avg":
            with torch.no_grad():
                real_decision = self(images)
                fake_data = G(G.generate_noise(images.size(0)))
                fake_decision = self(fake_data)

                noise_score = torch.mean(torch.sigmoid(self(Generator.noise_images)))
                train_score = torch.mean(torch.sigmoid(real_decision))
                gen_score = torch.mean(torch.sigmoid(fake_decision))

                d_conf = (1 + train_score - noise_score) / 2
                value = -d_conf * (1 - gen_score)
                self.fitness_values = [value.item()]
        elif config.evolution.fitness.discriminator == "rel_avg2":
            with torch.no_grad():
                real_decision = self(images)
                fake_data = G(G.generate_noise(images.size(0)))
                fake_decision = self(fake_data)
                noise_decision = self(Generator.noise_images)
                error = (binary_cross_entropy_with_logits(real_decision.view(-1) - torch.mean(fake_decision.view(-1)), labels) +
                         binary_cross_entropy_with_logits(fake_decision.view(-1) - torch.mean(real_decision.view(-1)), fake_labels) +
                         binary_cross_entropy_with_logits(noise_decision.view(-1) - torch.mean(real_decision.view(-1)), fake_labels)) / 3
                self.fitness_values = [error.item()]
        if config.evolution.fitness.discriminator == "rel_avg3":
            with torch.no_grad():
                real_decision = self(images)
                fake_data = G(G.generate_noise(images.size(0)))
                fake_decision = self(fake_data)
                noise_decision = self(Generator.noise_images)
                mean_noise = torch.mean(noise_decision)
                error = (binary_cross_entropy_with_logits(real_decision.view(-1) - (torch.mean(fake_decision.view(-1)) + mean_noise)/2, labels) +
                         binary_cross_entropy_with_logits(fake_decision.view(-1) - (torch.mean(real_decision.view(-1)) + mean_noise)/2, fake_labels)) / 2
                self.fitness_values = [error.item()]
        G.cpu()

    def calc_metrics(self, G, error, real_decision, fake_decision):
        self.calc_win_rate(torch.sigmoid(real_decision), torch.sigmoid(fake_decision), G)
        if config.evolution.fitness.discriminator == "rel_avg":
            pass
            # with torch.no_grad():
            #     c, w, h = self.input_shape[-3], self.input_shape[-2], self.input_shape[-1]
            #     if Generator.noise_images is None:
            #         Generator.noise_images = tools.cuda(torch.FloatTensor(real_decision.shape[0], c, w, h).uniform_(-1, 1))
            #     noise_score = torch.mean(torch.sigmoid(self(Generator.noise_images)))
            #     train_score = torch.mean(torch.sigmoid(real_decision))
            #     gen_score = torch.mean(torch.sigmoid(fake_decision))
            #     value = -train_score*(1-noise_score)*(1-gen_score)
            #     self.fitness_values.append(value.item())
        elif config.evolution.fitness.discriminator == "loss":
            self.fitness_values.append(error)
        elif config.evolution.fitness.discriminator.startswith("loss_"):
            loss_function = getattr(self, config.evolution.fitness.discriminator)
            error = loss_function(real_decision, fake_decision).item()
            self.fitness_values.append(error)
        elif config.evolution.fitness.discriminator == "AUC":
            full_decision = np.concatenate((torch.sigmoid(real_decision).detach().cpu().numpy().flatten(),
                                            torch.sigmoid(fake_decision).detach().cpu().numpy().flatten()))
            full_labels = np.concatenate((np.ones(real_decision.size()[0]), np.zeros(fake_decision.size()[0])))
            self.fitness_values.append(1 - roc_auc_score(full_labels, full_decision))
            # self.fitness_value -= average_precision_score(full_labels, full_decision)
            # self.fitness_value += 1 - np.mean(
            #     [accuracy_score(np.zeros(fake_decision.size()[0]), fake_decision.cpu().data.numpy().flatten() > 0.5),
            #      accuracy_score(np.ones(real_decision.size()[0]), real_decision.cpu().data.numpy().flatten() > 0.5)])
        elif config.evolution.fitness.discriminator == "BCE":
            self.fitness_values.append(self.loss_gan(real_decision, fake_decision).item())
        elif config.evolution.fitness.discriminator == "random_loss":
            if Discriminator.selected_loss is None:
                Discriminator.selected_loss = np.random.choice(config.gan.possible_gan_types)
                logger.info(f"using random loss function as fitness: {Discriminator.selected_loss}")
            loss_function = getattr(self, f"loss_{Discriminator.selected_loss}")
            self.fitness_values.append(loss_function(real_decision, fake_decision).item())

    def calc_win_rate(self, real_decision, fake_decision, G):
        if self.skill_rating_enabled():
            self.win_rate += (sum((real_decision > 0.5).float()) + sum((fake_decision < 0.5).float())).item()/(len(real_decision) + len(fake_decision))
            # self.win_rate += (torch.mean(real_decision).item() + (1-torch.mean(fake_decision)).item())/2
            # self.win_rate += (1-torch.mean(fake_decision)).item()

            # for match in [torch.mean((real_decision > 0.5).float()).item(), torch.mean((real_decision < 0.5).float()).item()]:
            # for match in [torch.mean(real_decision) > 0.5, torch.mean(fake_decision).item() < 0.5]:
            #     match = int(match)
            #     self.skill_rating_games.append((match, G.skill_rating))
            #     G.skill_rating_games.append((1 - match, self.skill_rating))

            # matches = [(p > 0.5) for p in real_decision] + [(p < 0.5) for p in fake_decision]
            # for match in matches:
            #     match = match.float().item()
            #     self.skill_rating_games.append((match, G.skill_rating))
            #     G.skill_rating_games.append((1 - match, self.skill_rating))

    def gradient_penalty(self, real_data, fake_data, epsilon=1e-12):
        batch_size = real_data.size()[0]
        alpha = torch.rand(batch_size, 1, 1, 1)
        alpha = tools.cuda(alpha.expand_as(real_data))

        interpolates = alpha * real_data + ((1 - alpha) * fake_data)
        interpolates = interpolates.clone().detach().requires_grad_(True)

        disc_interpolates = tools.cuda(self(interpolates))
        gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
                                  grad_outputs=tools.cuda(torch.ones(disc_interpolates.size())),
                                  create_graph=True, retain_graph=True, only_inputs=True)[0]
        gradients_norm = torch.sqrt(torch.sum(gradients ** 2, dim=1) + epsilon)
        return ((gradients_norm - 1) ** 2).mean() * config.gan.discriminator.gradient_penalty_lambda

    def finish_generation(self, **kwargs):
        if config.evolution.fitness.discriminator == "skill_rating":
            self.fitness_values.append(-self.skill_rating.mu) #+ 2*self.skill_rating.phi
        elif config.evolution.fitness.discriminator == "random":
            self.fitness_values = [np.random.rand()]