def training_step(self, batch, optimizer_idx): if optimizer_idx == 0: (z, ) = batch g_out = self._generator(z, trainable=True, const_init=True) g_logits = self._discriminator(g_out, trainable=False, const_init=True) g_loss = flow.nn.sigmoid_cross_entropy_with_logits( flow.ones_like(g_logits), g_logits, name="Gloss_sigmoid_cross_entropy_with_logits", ) return (g_loss, g_out) elif optimizer_idx == 1: (z, images) = batch g_out = self._generator(z, trainable=False, const_init=True) g_logits = self._discriminator(g_out, trainable=True, const_init=True) d_loss_fake = flow.nn.sigmoid_cross_entropy_with_logits( flow.zeros_like(g_logits), g_logits, name="Dloss_fake_sigmoid_cross_entropy_with_logits", ) d_logits = self._discriminator(images, trainable=True, reuse=True, const_init=True) d_loss_real = flow.nn.sigmoid_cross_entropy_with_logits( flow.ones_like(d_logits), d_logits, name="Dloss_real_sigmoid_cross_entropy_with_logits", ) d_loss = d_loss_fake + d_loss_real return d_loss
def test_discriminator( z: oft.Numpy.Placeholder((self.batch_size, 100)), images: oft.Numpy.Placeholder((self.batch_size, 1, 28, 28)), label1: oft.Numpy.Placeholder((self.batch_size, 1)), label0: oft.Numpy.Placeholder((self.batch_size, 1)), ): g_out = self.generator(z, trainable=False, const_init=True) g_logits = self.discriminator(g_out, trainable=True, const_init=True) d_loss_fake = flow.nn.sigmoid_cross_entropy_with_logits( flow.zeros_like(g_logits), g_logits, name="Dloss_fake_sigmoid_cross_entropy_with_logits", ) d_logits = self.discriminator(images, trainable=True, reuse=True, const_init=True) d_loss_real = flow.nn.sigmoid_cross_entropy_with_logits( flow.ones_like(d_logits), d_logits, name="Dloss_real_sigmoid_cross_entropy_with_logits", ) d_loss = d_loss_fake + d_loss_real flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler( [], [self.lr]), momentum=0).minimize(d_loss) return d_loss
def train_generator( z=flow.FixedTensorDef((self.batch_size, self.z_dim)), ): g_out = self.generator(z, trainable=True) g_logits = self.discriminator(g_out, trainable=False) g_loss = flow.nn.sigmoid_cross_entropy_with_logits( flow.ones_like(g_logits), g_logits, name="Gloss_sigmoid_cross_entropy_with_logits" ) flow.losses.add_loss(g_loss) return g_loss, g_out
def test_generator( z: oft.Numpy.Placeholder((self.batch_size, self.z_dim)), label1: oft.Numpy.Placeholder((self.batch_size, 1)), ): g_out = self.generator(z, trainable=True, const_init=True) g_logits = self.discriminator(g_out, trainable=False, const_init=True) g_loss = flow.nn.sigmoid_cross_entropy_with_logits( flow.ones_like(g_logits), g_logits, name="Gloss_sigmoid_cross_entropy_with_logits", ) flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler( [], [self.lr]), momentum=0).minimize(g_loss) return g_loss
def train_discriminator( z=flow.FixedTensorDef((self.batch_size, 100)), images=flow.FixedTensorDef((self.batch_size, 1, 28, 28)), ): g_out = self.generator(z, trainable=False) g_logits = self.discriminator(g_out, trainable=True) d_loss_fake = flow.nn.sigmoid_cross_entropy_with_logits( flow.zeros_like(g_logits), g_logits, name="Dloss_fake_sigmoid_cross_entropy_with_logits" ) d_logits = self.discriminator(images, trainable=True, reuse=True) d_loss_real = flow.nn.sigmoid_cross_entropy_with_logits( flow.ones_like(d_logits), d_logits, name="Dloss_real_sigmoid_cross_entropy_with_logits" ) d_loss = d_loss_fake + d_loss_real flow.losses.add_loss(d_loss) return d_loss, d_loss_fake, d_loss_real