Ejemplo n.º 1
0
    def train_discriminator_step(self, configuration: Configuration, metadata: Metadata, architecture: Architecture,
                                 batch: Batch) -> float:
        # clean previous gradients
        architecture.discriminator_optimizer.zero_grad()

        # generate a batch of fake features with the same size as the real feature batch
        fake_features = self.sample_fake(architecture, len(batch["features"]), condition=batch.get("labels"))
        fake_features = fake_features.detach()  # do not propagate to the generator

        # calculate loss
        loss = architecture.discriminator_loss(architecture,
                                               batch["features"],
                                               fake_features,
                                               condition=batch.get("labels"))

        # calculate gradients
        loss.backward()

        # update the discriminator weights
        architecture.discriminator_optimizer.step()

        # return the loss
        return to_cpu_if_was_in_gpu(loss).item()
Ejemplo n.º 2
0
    def train_discriminator_step(configuration: Configuration,
                                 metadata: Metadata,
                                 architecture: Architecture,
                                 batch: Batch) -> float:
        # clean previous gradients
        architecture.discriminator_optimizer.zero_grad()

        # generate a batch of fake features with the same size as the real feature batch
        generated = architecture.generator(batch["features"],
                                           missing_mask=batch["missing_mask"])
        # replace the missing features by the generated
        imputed = compose_with_mask(
            mask=batch["missing_mask"],
            differentiable=True,  # now there are no NaNs and this should be used
            where_one=generated,
            where_zero=batch["raw_features"])
        imputed = imputed.detach()  # do not propagate to the generator
        # generate hint
        hint = generate_hint(batch["missing_mask"],
                             configuration.hint_probability, metadata)

        # calculate loss
        loss = architecture.discriminator_loss(
            architecture=architecture,
            imputed=imputed,
            hint=hint,
            missing_mask=batch["missing_mask"])

        # calculate gradients
        loss.backward()

        # update the discriminator weights
        architecture.discriminator_optimizer.step()

        # return the loss
        return to_cpu_if_was_in_gpu(loss).item()