Exemplo n.º 1
0
 def training_step(self, batch, batch_idx):
     x, y = batch
     y_hat = self(x)
     loss = F.cross_entropy(y_hat, y)
     result = pl.TrainResult(loss)
     result.log("train_loss", loss, on_epoch=True)
     return result
Exemplo n.º 2
0
 def training_step(self, batch, batch_idx):
     x, y = batch
     x = x.view(x.size(0), -1)
     z = self.encoder(x)
     x_hat = self.decoder(z)
     loss = F.mse_loss(x_hat, x)
     return pl.TrainResult(loss, checkpoint_on=loss)
Exemplo n.º 3
0
 def training_step(self, batch, batch_idx):
     x, y = batch
     out = self(x)
     loss = torch.nn.functional.binary_cross_entropy_with_logits(out, y)
     result = pl.TrainResult(minimize=loss)
     result.log_dict({"loss/train_loss": loss})
     return result
Exemplo n.º 4
0
 def training_step(self, batch, batch_idx):
     x, y = batch
     y_hat = self(x)
     loss = F.cross_entropy(y_hat, y)
     result = pl.TrainResult(minimize=loss)
     result.log('train_loss', loss)
     return result
Exemplo n.º 5
0
 def training_step(self, batch, batch_idx):
     x, y = batch
     y_hat = self(x)
     criterion = nn.MSELoss()
     loss = criterion(y_hat, y)
     result = pl.TrainResult(loss)
     return result
Exemplo n.º 6
0
 def training_step(self, batch, batch_idx):
     x, m, p, x_aug = batch
     x_hat = self.forward(x_aug)
     loss = self.loss_aae(x, x_hat, bootstrap_ratio=4)
     result = pl.TrainResult(loss)
     # result.log('loss', loss, prog_bar=True)
     return result
Exemplo n.º 7
0
    def training_step(self, batch, batch_idx):
        x, y = batch

        y_hat, _ = self(x, batch_idx)
        loss = F.cross_entropy(y_hat, y)

        return pl.TrainResult(loss)
Exemplo n.º 8
0
 def training_step(self, batch, batch_idx):
     x, m, p = batch
     x_hat = ae.forward(x)
     loss = self.loss_aae(x, x_hat, bootstrap_ratio=4)
     result = pl.TrainResult(loss)
     result.log('AE reconstruction loss', loss, prog_bar=True)
     return result
    def training_step(self, batch, batch_idx):
        """
        step == batch
        Lightning calls this inside the training loop with the data from the training dataloader
        passed in as `batch`.
        """
        # forward pass
        batch_imgs, heatmaps_gt = batch  #[batch_size, channel(3), size, size] [batch_size, n_joints, size, size]

        #visiualization
        #cv2.imshow('img',batch_imgs[0].cpu().numpy())
        #cv2.waitKey(0)

        #get prediction
        combined_heatmap_preds = self(
            batch_imgs)  #[batch_size, nstack, n_joints, size, size]
        #debug pl.core.LightningModule is not correct
        #print(combined_heatmap_preds.shape)
        #print(torch.sum(combined_heatmap_preds))
        #print(self.state_dict()['pre.0.conv.weight'])
        #print(self.state_dict()['pre.0.conv.weight'].grad)
        #calculate loss
        train_loss = self.calc_loss(combined_heatmap_preds, heatmaps_gt)
        train_result = pl.TrainResult(
            minimize=train_loss)  #minimize: what metrics to do bp learning
        train_result.log('train_loss',
                         train_loss,
                         on_step=True,
                         on_epoch=True,
                         prog_bar=True)
        return train_result
Exemplo n.º 10
0
    def training_step(self, batch, batch_idx):
        images, target = batch
        output = self(images)
        loss = F.cross_entropy(output, target)
        acc1, acc5 = self.__accuracy(output, target, topk=(1, 5))

        result = pl.TrainResult(minimize=loss)
        result.log('train_loss',
                   loss,
                   on_step=True,
                   on_epoch=True,
                   prog_bar=True,
                   logger=True,
                   sync_dist=True)
        result.log('train_acc1',
                   acc1,
                   on_step=True,
                   on_epoch=True,
                   prog_bar=True,
                   logger=True,
                   sync_dist=True)
        result.log('train_acc5',
                   acc5,
                   on_step=True,
                   on_epoch=True,
                   prog_bar=True,
                   logger=True,
                   sync_dist=True)

        return result
Exemplo n.º 11
0
    def training_step(self, batch, batch_idx):

        emb = (None if (self.hparams["emb_channels"] == 0) else batch.embedding
               )  # Does this work??

        with torch.no_grad():
            sections = 8
            cut_list = []
            for j in range(sections):
                subset_ind = torch.chunk(torch.arange(batch.e_radius.shape[1]),
                                         sections)[j]
                output = self(torch.cat([
                    batch.cell_data, batch.x
                ], axis=-1), batch.e_radius[:, subset_ind], emb).squeeze() if (
                    'ci' in self.hparams["regime"]) else self(
                        batch.x, batch.e_radius[:, subset_ind], emb).squeeze()
                cut = F.sigmoid(output) > self.hparams["filter_cut"]
                cut_list.append(cut)

            cut_list = torch.cat(cut_list)

            num_true, num_false = batch.y.bool().sum(), (~batch.y.bool()).sum()
            true_indices = torch.where(batch.y.bool())[0]
            hard_negatives = cut_list & ~batch.y.bool()
            hard_indices = torch.where(hard_negatives)[0]
            hard_indices = hard_indices[torch.randperm(
                len(hard_indices
                    ))][:int(len(true_indices) * self.hparams["ratio"] / 2)]
            easy_indices = torch.where(~batch.y.bool())[0][torch.randint(
                num_false,
                (int(num_true.item() * self.hparams['ratio'] / 2), ))]

            combined_indices = torch.cat(
                [true_indices, hard_indices, easy_indices])

            # Shuffle indices:
            combined_indices[torch.randperm(len(combined_indices))]
            weight = torch.tensor(self.hparams["weight"])

        output = (self(torch.cat([batch.cell_data, batch.x], axis=-1),
                       batch.e_radius[:, combined_indices], emb).squeeze() if
                  ('ci' in self.hparams["regime"]) else self(
                      batch.x, batch.e_radius[:, combined_indices],
                      emb).squeeze())

        if ('pid' in self.hparams["regime"]):
            y_pid = batch.pid[batch.e_radius[
                0, combined_indices]] == batch.pid[batch.e_radius[
                    1, combined_indices]]
            loss = F.binary_cross_entropy_with_logits(output,
                                                      y_pid.float(),
                                                      pos_weight=weight)
        else:
            loss = F.binary_cross_entropy_with_logits(
                output, batch.y[combined_indices], pos_weight=weight)

        result = pl.TrainResult(minimize=loss)
        result.log('train_loss', loss, prog_bar=True)

        return result
Exemplo n.º 12
0
	def training_step(self, batch, batch_idx):
		x, y = batch
		y_hat = self(x)
		loss = criterion(y_hat, y)
		result = pl.TrainResult(loss)
		result.log('train_loss', loss, on_epoch=True)
		return result
Exemplo n.º 13
0
 def training_step(self, batch, batch_idx):
     x, y = batch
     y_hat = self(x)
     loss = F.mse_loss(y_hat, y)
     result = pl.TrainResult(loss)
     result.log('train_loss', torch.sqrt(loss))
     return result
Exemplo n.º 14
0
    def training_step(self, batch, batch_nb):
        inf_input, _ = batch
        _inf_input = inf_input.to(dtype=torch.float) / 255
        training_result = self(_inf_input, inf_input)
        loss = training_result['loss']

        result = pl.TrainResult(minimize=loss)
        elbo = \
            - training_result['log']['loss'] + \
            training_result['log']['encoder_entropy'] + \
            self.hparams.latent_size * torch.log(torch.tensor(0.5))
        result.log('-train_elbo', -elbo, prog_bar=True, logger=True)

        if 'support' in training_result['log'].keys():
            result.log('train_support_median',
                       training_result['log']['support'],
                       reduce_fx=torch.median,
                       on_epoch=True,
                       on_step=False)
            result.log('train_support_mean',
                       torch.mean(training_result['log']['support']),
                       prog_bar=True,
                       reduce_fx=torch.mean,
                       on_epoch=True,
                       on_step=False)

        # Update temperature if Gumbel
        if self.hparams.mode == 'gs':
            self.lvm_method.encoder.update_temperature(
                self.global_step, self.hparams.temperature_update_freq,
                self.hparams.temperature_decay)
            result.log('temperature', self.lvm_method.encoder.temperature)

        return result
Exemplo n.º 15
0
 def training_step(self, batch: TensorDict, _) -> pl.TrainResult:
     loss, info = self.train_loss(batch)
     info = self.stat_to_tensor_dict(info)
     result = pl.TrainResult(loss, early_stop_on=loss)
     result.log("train/loss", loss)
     result.log_dict({"train/" + k: v for k, v in info.items()})
     return result
Exemplo n.º 16
0
 def training_step(self, batch, batch_nb):
     loss = batch.num_graphs * self.loss_op(
         self.forward(batch.x, batch.edge_index), batch.y
     )
     result = pl.TrainResult(loss)
     result.log("train_loss", loss, prog_bar=True)
     return result
    def training_step(self, batch, batch_idx):

        weight = (torch.tensor(self.hparams["weight"]) if
                  ("weight" in self.hparams) else torch.tensor(
                      (~batch.y_pid.bool()).sum() / batch.y_pid.sum()))

        output = (self(torch.cat([batch.cell_data, batch.x], axis=-1),
                       batch.edge_index) if
                  ('ci' in self.hparams["regime"]) else self(
                      batch.x, batch.edge_index))

        if ('pid' in self.hparams["regime"]):
            y_pid = (batch.pid[batch.edge_index[0]] == batch.pid[
                batch.edge_index[1]]).float()
            y_pid = y_pid.repeat((self.hparams["n_graph_iters"]))
            loss = F.binary_cross_entropy_with_logits(torch.cat(output),
                                                      y_pid.float(),
                                                      pos_weight=weight)
        else:
            y = batch.y.repeat((self.hparams["n_graph_iters"]))
            loss = F.binary_cross_entropy_with_logits(torch.cat(output),
                                                      y,
                                                      pos_weight=weight)

        result = pl.TrainResult(minimize=loss)
        result.log('train_loss', loss, prog_bar=True)

        return result
Exemplo n.º 18
0
    def training_step(self, batch, batch_idx):
        loss, ppl, acc = self._step(batch)
        cur_lr = self.lr_scheduler.get_last_lr()[0]

        if self.hparams.n_gpus > 1:
            loss = loss.unsqueeze(0)
            cur_lr = torch.Tensor([cur_lr]).to(loss.device)

        result = pl.TrainResult(minimize=loss)
        result.log("lr", torch.Tensor([cur_lr]), on_step=True, on_epoch=True)
        result.log("train_loss",
                   loss,
                   on_step=False,
                   on_epoch=True,
                   prog_bar=True)
        result.log("train_ppl",
                   ppl,
                   on_step=False,
                   on_epoch=True,
                   prog_bar=True)
        result.log("train_acc",
                   acc,
                   on_step=False,
                   on_epoch=True,
                   prog_bar=True)

        return result
Exemplo n.º 19
0
    def training_epoch_end(self, training_step_outputs):
        z_appr = torch.normal(mean=0,
                              std=1,
                              size=(16, self.hparams.z_dim),
                              device=training_step_outputs[0].minimize.device)

        # Generate images from latent vector
        sample_imgs = self.decoder(z_appr)
        grid = torchvision.utils.make_grid(sample_imgs,
                                           normalize=True,
                                           range=(-1, 1))

        # where to save the image
        path = os.path.join(self.hparams.generated_images_folder,
                            f"generated_images_{self.current_epoch}.png")
        torchvision.utils.save_image(sample_imgs,
                                     path,
                                     normalize=True,
                                     range=(-1, 1))

        # Log images in tensorboard
        self.logger.experiment.add_image(f'generated_images', grid,
                                         self.current_epoch)

        # Epoch level metrics
        epoch_loss = torch.mean(
            torch.stack([x['minimize'] for x in training_step_outputs]))
        results = pl.TrainResult()
        results.log("epoch_loss", epoch_loss, prog_bar=False)

        return results
Exemplo n.º 20
0
 def training_step(self, batch, batch_idx):
     y_true = batch.y.float()
     y_pred = self.forward(batch)
     loss = self.loss(y_pred, y_true)
     result = pl.TrainResult(loss)
     result.log('loss/train', loss)
     return result
Exemplo n.º 21
0
 def training_step(self, batch, batch_idx):
     scans, true_masks = batch
     predicted_masks = self(scans)
     loss = self.criterion(predicted_masks, true_masks)
     result = pl.TrainResult(loss, early_stop_on=loss, checkpoint_on=loss)
     result.log('train_loss', loss)
     return result
 def training_step(self, batch, batch_idx):
     x, y = batch
     y_hat = self(x)
     loss = F.binary_cross_entropy_with_logits(y_hat, y)
     result = pl.TrainResult(loss)
     result.log('train_loss', loss, on_epoch=True)
     return result
Exemplo n.º 23
0
    def training_step(self, batch, batch_idx):
        batch = from_batch_get_model_input(batch,
                                           self.d_model,
                                           use_pointer=self.use_pointer,
                                           use_coverage=self.use_coverage)
        batch[0] = patch_src(batch[0])
        batch[6], batch[8] = patch_trg(batch[6])

        inputs = {
            'encoder_input': batch[0],
            'encoder_mask': batch[1],
            'encoder_with_oov': batch[2],
            'oovs_zero': batch[3],
            'context_vec': batch[4],
            'coverage': batch[5],
            'decoder_input': batch[6],
            'decoder_mask': batch[7],
            'decoder_target': batch[8],
            'mode': 'train'
        }

        loss = self(**inputs)
        result = pl.TrainResult(loss)
        result.log('train_loss', loss, prog_bar=True)
        return result
 def training_step(self, batch, batch_idx):
     loss, logs = self.step(batch, batch_idx)
     result = pl.TrainResult(minimize=loss)
     result.log_dict(
         {f"train_{k}": v for k, v in logs.items()}, on_step=True, on_epoch=False
     )
     return result
 def training_step(self, batch, batch_idx):
     outputs = self.shared_step(batch)
     losses = self.loss(outputs)
     # TODO: DEBUG
     # assert isinstance(losses["total"], torch.Tensor), f'{losses["total"]}, {type(losses["total"])} <- torch.Tensor'
     # logging
     result = pl.TrainResult(minimize=losses["total"])
     result.log_dict(
         {
             "train_loss": losses["total"],
             "train_loss_rank": losses["rank"],
         },
         on_step=True,
         on_epoch=False,
     )
     if self._use_disc():
         result.log_dict(
             {
                 "train_loss_disc": losses["disc"],
                 "train_acc_disc": losses["acc_disc"],
             },
             on_step=True,
             on_epoch=False,
         )
     return result
    def training_step(self, batch, batch_idx):
        """
        step == batch
        Lightning calls this inside the training loop with the data from the training dataloader
        passed in as `batch`.
        """
        # forward pass
        batch_imgs, heatmaps_gt, last_heatmap_preds_tch = batch  # [batch_size, channel(3), size, size] [batch_size, n_joints, size, size]
        #batch_imgs, heatmaps_gt = batch

        # get prediction
        combined_heatmap_preds = self(batch_imgs)  # [batch_size, nstack, n_joints, size, size]

        #get prediction from teacher network
        #combined_heatmap_preds_tch = self.net_tch(batch_imgs)
        #last_heatmap_preds_tch = combined_heatmap_preds_tch[:,self.nstack_tch-1]
        #print('gt ', torch.sum(heatmaps_gt), ' tch ', torch.sum(last_heatmap_preds_tch))
        #print('pre ',torch.sum(combined_heatmap_preds))

        # calculate loss
        gt_loss = self.calc_loss(combined_heatmap_preds, heatmaps_gt)
        tch_loss = self.calc_loss(combined_heatmap_preds, last_heatmap_preds_tch)

        #get total loss
        train_loss = self.this_config['lambda'] * gt_loss + (1 - self.this_config['lambda']) * tch_loss
        train_result = pl.TrainResult(minimize=train_loss)  # minimize: what metrics to do bp learning
        train_result.log('train_loss', train_loss, on_step=True, on_epoch=True, prog_bar=True)
        return train_result
    def training_step(self, batch, batch_idx):

        weight = (torch.tensor(self.hparams["weight"]) if
                  ("weight" in self.hparams) else torch.tensor(
                      (~batch.y_pid.bool()).sum() / batch.y_pid.sum()))

        output = (self(torch.cat([batch.cell_data, batch.x], axis=-1),
                       batch.edge_index).squeeze() if
                  ("ci" in self.hparams["regime"]) else self(
                      batch.x, batch.edge_index).squeeze())

        if "pid" in self.hparams["regime"]:
            y_pid = (batch.pid[batch.edge_index[0, batch.nested_ind[0]]] ==
                     batch.pid[batch.edge_index[1,
                                                batch.nested_ind[0]]]).float()
            loss = F.binary_cross_entropy_with_logits(
                output[batch.nested_ind[0]], y_pid.float(), pos_weight=weight)
        else:
            loss = F.binary_cross_entropy_with_logits(
                output[batch.nested_ind[0]],
                batch.y[batch.nested_ind[0]],
                pos_weight=weight,
            )

        result = pl.TrainResult(minimize=loss)
        result.log("train_loss", loss, prog_bar=True)

        return result
Exemplo n.º 28
0
 def training_step(self, batch, batch_idx):
     x, y = batch
     y_hat = self(x).squeeze(1)
     loss = F.binary_cross_entropy(y_hat, y.type(torch.cuda.FloatTensor))
     result = pl.TrainResult(loss)
     result.log('train_loss', loss, on_epoch=True)
     return result
Exemplo n.º 29
0
    def training_forward(self, batched_inputs):
        images, labels = batched_inputs
        images = self.preprocessing(images)
        out_dict = self.net(images)
        logits = out_dict['outputs']

        BS = logits.shape[0]
        inout_mask = labels < 2
        label_mask = labels >= 2
        inout_logits = logits[inout_mask][:, :2]
        label_logits = logits[label_mask][:, 2:]
        gt_inout = labels[inout_mask]
        gt_labels = labels[label_mask] - 2
        inout_loss = F.cross_entropy(inout_logits, gt_inout,
                                     reduction='sum') / BS
        label_loss = F.cross_entropy(label_logits, gt_labels,
                                     reduction='sum') / BS
        loss = inout_loss + label_loss

        inout_acc = modules.accuracy(inout_logits, gt_inout, topk=(1, ))[0]
        label_top1, label_top5 = modules.accuracy(label_logits,
                                                  gt_labels,
                                                  topk=(1, 5))

        train_result = pl.TrainResult(minimize=loss)
        train_result.log_dict(
            {
                'label_top1': label_top1,
                'label_top5': label_top5,
                'inout_acc': inout_acc
            },
            prog_bar=True,
            logger=True,
            on_step=True)
        return train_result
Exemplo n.º 30
0
    def discriminator_step(self, x):
        # Measure discriminator's ability to classify real from generated samples
        d_loss = self.discriminator_loss(x)

        # log to prog bar on each step AND for the full epoch
        result = pl.TrainResult(minimize=d_loss)
        result.log('d_loss', d_loss, on_epoch=True, prog_bar=True)
        return result