Exemplo n.º 1
0
    def _train(self):
        for i, (images, _) in logger.enum("Train", self.train_loader):
            targets_real = torch.empty(images.size(0), 1, device=self.device).uniform_(0.8, 1.0)
            targets_fake = torch.empty(images.size(0), 1, device=self.device).uniform_(0.0, 0.2)

            images = images.to(self.device)

            self.optimizer_D.zero_grad()
            logits_real = self.discriminator(images)
            fake_images = self.generator(
                noise(self.device, self.batch_size, self.noise_dim)).detach()
            logits_fake = self.discriminator(fake_images)
            discriminator_loss = DLoss(logits_real, logits_fake, targets_real, targets_fake)
            discriminator_loss.backward()
            self.optimizer_D.step()

            self.optimizer_G.zero_grad()
            fake_images = self.generator(noise(self.device, self.batch_size, self.noise_dim))
            logits_fake = self.discriminator(fake_images)
            generator_loss = GLoss(logits_fake, targets_real)
            generator_loss.backward()
            self.optimizer_G.step()

            logger.store(G_Loss=generator_loss.item())
            logger.store(D_Loss=discriminator_loss.item())
            logger.add_global_step()

        for j in range(1, 10):
            img = fake_images[j].squeeze()
            logger.store('generated', img)
Exemplo n.º 2
0
def loop():
    logger.info(a=2, b=1)

    logger.add_indicator('loss_ma', IndicatorType.queue,
                         IndicatorOptions(queue_size=10))
    for i in range(10):
        logger.add_global_step(1)
        logger.store(loss=100 / (i + 1), loss_ma=100 / (i + 1))
        logger.write()
        if (i + 1) % 2 == 0:
            logger.new_line()

        time.sleep(2)
Exemplo n.º 3
0
    def _train(self):
        self.model.train()
        for i, (data, target) in logger.enum("Train", self.train_loader):
            data, target = data.to(self.device), target.to(self.device)

            self.optimizer.zero_grad()
            output = self.model(data)
            loss = F.cross_entropy(output, target)
            loss.backward()
            self.optimizer.step()

            logger.store(train_loss=loss)
            logger.add_global_step()

            if i % self.train_log_interval == 0:
                logger.write()
Exemplo n.º 4
0
    def _train(self):
        self.model.train()
        for i, (data, target) in logger.enumerator("Train", self.train_loader):
            data, target = data.to(self.device), target.to(self.device)
            self.optimizer.zero_grad()
            output = self.model(data)
            loss = F.nll_loss(output, target)
            loss.backward()
            self.optimizer.step()

            # Add training loss to the logger.
            # The logger will queue the values and output the mean
            logger.store(train_loss=loss.item())
            logger.add_global_step()

            # Print output to the console
            if i % self.train_log_interval == 0:
                # Output the indicators
                logger.write()
Exemplo n.º 5
0
    def _train(self):
        for i, (input_tensor,
                target_tensor) in logger.enum("Train", self.train_loader):
            encoder_hidden = self.encoder.init_hidden(self.device).double().to(
                self.device)

            input_tensor = input_tensor.to(self.device).unsqueeze(1)
            target_tensor = target_tensor.to(self.device).double()

            self.optimizer.zero_grad()
            encoder_output, encoder_hidden = self.encoder(
                input_tensor, encoder_hidden)

            train_loss = self.loss(encoder_output, target_tensor)

            train_loss.backward()
            self.optimizer.step()

            logger.store(loss=train_loss.item())
            logger.add_global_step()
            logger.write()