Esempio n. 1
0
 def iterate(self, epoch, phase):
     meter = Meter(phase, epoch)
     start = time.strftime("%H:%M:%S")
     print(f"Starting epoch: {epoch} | phase: {phase} | ⏰: {start}")
     batch_size = self.batch_size[phase]
     dataloader = self.dataloaders[phase]
     running_loss = 0.0
     total_batches = len(dataloader)
     tk0 = tqdm(dataloader, total=total_batches)
     self.optimizer.zero_grad()
     for itr, batch in enumerate(tk0):
         if phase == "train" and self.do_cutmix:
             images, targets = self.cutmix(batch, 0.5)
         else:
             images, targets = batch
         seg_loss, outputs, preds = self.forward(images, targets)
         loss = seg_loss / self.accumulation_steps
         if phase == "train":
             loss.backward()
             torch.nn.utils.clip_grad_norm_(self.net.parameters(), 1)
             if (itr + 1) % self.accumulation_steps == 0:
                 self.optimizer.step()
                 self.optimizer.zero_grad()
         running_loss += loss.item()
         meter.update(outputs, preds)
         tk0.set_postfix(loss=(running_loss / ((itr + 1))))
     epoch_loss = (running_loss * self.accumulation_steps) / total_batches
     dice, iou, f2 = epoch_log(phase, epoch, epoch_loss, meter, start)
     self.losses[phase].append(epoch_loss)
     self.dice_scores[phase].append(dice)
     self.iou_scores[phase].append(iou)
     self.f2_scores[phase].append(f2)
     torch.cuda.empty_cache()
     return epoch_loss, dice, iou
Esempio n. 2
0
    def validation(self, test_loader):
        self.model.eval()
        self.model.train(False)
        checkpoint = torch.load(self.weight_path, map_location=torch.device('cpu'))
        self.model.load_state_dict(checkpoint['state_dict'])
        meter = Meter()
        tbar = tqdm.tqdm(test_loader, ncols=80)
        loss_sum = 0

        with torch.no_grad():
            for i, (x, labels) in enumerate(tbar):
                labels_predict = self.solver.forward(x)
                labels_predict = torch.sigmoid(labels_predict)
                loss = self.solver.cal_loss(labels, labels_predict, self.criterion)
                loss_sum += loss.item()

                meter.update(labels, labels_predict.cpu())

                descript = "Val Loss: {:.7f}".format(loss.item())
                tbar.set_description(desc=descript)
        loss_mean = loss_sum / len(tbar)

        class_neg_accuracy, class_pos_accuracy, class_accuracy, neg_accuracy, pos_accuracy, accuracy = meter.get_metrics()
        print(
            "Class_0_accuracy: %0.4f | Class_1_accuracy: %0.4f | Negative accuracy: %0.4f | positive accuracy: %0.4f | accuracy: %0.4f" %
            (class_accuracy[0], class_accuracy[1], neg_accuracy, pos_accuracy, accuracy))
        return class_neg_accuracy, class_pos_accuracy, class_accuracy, neg_accuracy, pos_accuracy, accuracy, loss_mean
Esempio n. 3
0
    def validation(self, valid_loader):
        self.model.eval()
        meter = Meter()
        tbar = tqdm(valid_loader)
        loss_sum = 0

        with torch.no_grad():
            for i, (images, labels) in enumerate(tbar):
                labels_predict = self.solver.forward(images)
                # print(labels + (labels_predict.cpu() > 0.5).int())
                # labels_predict = labels_predict.unsqueeze(dim=2).unsqueeze(dim=3)
                loss = self.solver.cal_loss(labels, labels_predict,
                                            self.criterion)
                # loss = F.cross_entropy(labels_predict[0], labels)
                loss_sum += loss.item()

                meter.update(labels, labels_predict.cpu())

                descript = "Val Loss: {:.7f}".format(loss.item())
                tbar.set_description(desc=descript)
        loss_mean = loss_sum / len(tbar)

        class_neg_accuracy, class_pos_accuracy, class_accuracy, neg_accuracy, pos_accuracy, accuracy = meter.get_metrics(
        )
        print(
            "Class_0_accuracy: %0.4f | Positive accuracy: %0.4f | Negative accuracy: %0.4f | \n"
            "Class_1_accuracy: %0.4f | Positive accuracy: %0.4f | Negative accuracy: %0.4f |"
            %
            (class_accuracy[0], class_pos_accuracy[0], class_neg_accuracy[0],
             class_accuracy[1], class_pos_accuracy[1], class_neg_accuracy[1]))
        # print("Class_0_accuracy: %0.4f | Positive accuracy: %0.4f | Negative accuracy: %0.4f | \n"
        #       "Class_1_accuracy: %0.4f | Positive accuracy: %0.4f | Negative accuracy: %0.4f | \n"
        #       "Class_2_accuracy: %0.4f | Positive accuracy: %0.4f | Negative accuracy: %0.4f |" %
        #       (class_accuracy[0], class_pos_accuracy[0], class_neg_accuracy[0],
        #        class_accuracy[1], class_pos_accuracy[1], class_neg_accuracy[1],
        #        class_accuracy[2], class_pos_accuracy[2], class_neg_accuracy[2]))
        return class_neg_accuracy, class_pos_accuracy, class_accuracy, neg_accuracy, pos_accuracy, accuracy, loss_mean
Esempio n. 4
0
    def iterate(self,
                epoch,
                phase,
                max_samples=1000000,
                flip=0,
                transpose=False,
                use_neptune=True):

        start = time.strftime("%H:%M:%S")
        print(f"Starting epoch: {epoch} | phase: {phase} | ⏰: {start}")
        self.phase = phase
        self.net.train(phase == "train")
        self.net.freeze_bn()
        dataloader = self.dataloaders[phase]
        total_batches = len(dataloader)
        meter = Meter(phase,
                      epoch,
                      use_neptune=use_neptune,
                      total_batches=total_batches)
        if self.use_tqdm:
            tk0 = tqdm(dataloader)
        else:
            tk0 = dataloader
        self.optimizer.zero_grad()
        if self.dataloader2 is not None:
            dli = iter(self.dataloader2)
        for itr, batch in enumerate(
                tk0):  # replace `dataloader` with `tk0` for tqdm
            images, masks, idxs, scales = batch

            if flip > 10:
                images = images.flip([2, 3])
                masks = masks.flip([2, 3])
            elif flip != 0:
                images = images.flip(flip)
                masks = masks.flip(flip)

            if transpose:
                images = images.permute(0, 1, 3, 2)
                masks = masks.permute(0, 1, 3, 2)

            loss, dice_loss, dice_loss2, cls_loss = self.forward(
                images, masks, scales, meter, idxs)
            lo = loss + 0.1 * dice_loss2 + cls_loss

            if phase == "train":
                lo.backward()
                if self.scheduler is not None:
                    try:
                        self.scheduler.step()
                    except:
                        pass  #TODO so uggly :)

                if self.dataloader2 is not None and itr % 5 == 0:
                    images, masks, idxs, scales = next(dli)
                    lo = self.forward2(images, masks, scales, meter, idxs)
                    lo.backward()
                    meter.update(KLDiv=lo.item(), itr=-1)

                self.optimizer.step()
                self.optimizer.zero_grad()

            if itr > 0 and itr % 100 == 0:
                if phase == 'train':
                    for i, o in enumerate(self.optimizer.param_groups):
                        if self.use_neptune:
                            neptune.log_metric("lr/group_{}".format(i),
                                               itr + epoch * total_batches,
                                               o["lr"])

                if itr % 5000 == 0:
                    state = {
                        "state_dict": self.net.state_dict(),
                        "optimizer": self.optimizer.state_dict(),
                    }
                    torch.save(state, f'm{itr}.pth')

            if itr > max_samples:
                break

            meter.update(loss=loss.item(),
                         lossdice=dice_loss.item(),
                         lossdice2=dice_loss2.item(),
                         itr=itr)
        #             tk0.set_postfix(loss=(running_loss / ((itr + 1))))
        meter.get_metrics()
        torch.cuda.empty_cache()

        return loss