def training_step(self, train_loader): self.train() train_loss = [] train_iou = [] train_mAP = [] for i, (index, im, mask, z) in enumerate(train_loader): self.optimizer.zero_grad() im = im.cuda() mask = mask.cuda() z = z.cuda() logit = self.forward(im, z) pred = torch.sigmoid(logit) loss = self.criterion(logit, mask) iou = dice_accuracy(pred, mask, is_average=False) mAP = do_mAP(pred.data.cpu().numpy(), mask.cpu().numpy(), is_average=False) train_loss.append(loss.item()) train_iou.extend(iou) train_mAP.extend(mAP) loss.backward() self.optimizer.step() for metric in ['loss', 'iou', 'mAP']: self.train_log[metric].append(np.mean(eval('train_{}'.format(metric)))) return train_loss, train_iou, train_mAP
def perform_validation(self, val_loader): # self.set_mode('valid') self.model.eval() val_loss = [] val_iou = [] val_mAP = [] for images, targets in val_loader: images = images.cuda() targets = targets.long().cuda() with torch.no_grad(): logit = self.model(images) pred = torch.sigmoid(logit) # pred = torch.softmax(logit, dim=1) pred, pred_ind = pred.data.max(1) loss = self.criterion(logit, targets) iou = dice_accuracy(pred, targets, is_average=False).cpu().numpy() mAP = do_mAP(pred.cpu().numpy(), targets.cpu().numpy(), is_average=False) val_loss.append(loss.item()) val_iou.extend(iou) val_mAP.extend(mAP) # Append epoch data to metrics dict for metric in ['loss', 'iou', 'mAP']: self.val_log[metric].append(np.mean(eval('val_{}'.format(metric)))) return val_loss, val_iou, val_mAP
def perform_validation(self, val_loader): self.set_mode('valid') val_loss = [] val_iou = [] val_mAP = [] for index, im, mask, z in val_loader: im = im.cuda() mask = mask.cuda() z = z.cuda() with torch.no_grad(): logit = self.forward(im, z) pred = torch.sigmoid(logit) loss = self.criterion(logit, mask) iou = dice_accuracy(pred, mask, is_average=False) mAP = do_mAP(pred.cpu().numpy(), mask.cpu().numpy(), is_average=False) val_loss.append(loss.item()) val_iou.extend(iou) val_mAP.extend(mAP) # Append epoch data to metrics dict for metric in ['loss', 'iou', 'mAP']: self.val_log[metric].append(np.mean(eval('val_{}'.format(metric)))) return val_loss, val_iou, val_mAP
def training_step(self, train_loader): # self.set_mode('train') self.model.train() train_loss = [] train_iou = [] train_mAP = [] for i, (images, targets) in enumerate(train_loader): self.optimizer.zero_grad() images = images.cuda() targets = targets.long().cuda() # logit = self.forward(im, z) self.model logit = self.model(images) pred = torch.sigmoid(logit) # pred = torch.softmax(logit, dim=1) pred, pred_ind = pred.data.max(1) loss = self.criterion(logit, targets) iou = dice_accuracy(pred, targets, is_average=False).cpu().numpy() mAP = do_mAP(pred.data.cpu().numpy(), targets.cpu().numpy(), is_average=False) train_loss.append(loss.item()) train_iou.extend(iou) train_mAP.extend(mAP) loss.backward() self.optimizer.step() if self.debug and not self.epoch % 5 and not i % 30: # if self.debug and self.epoch == 1 and i == 2: show_image_mask_pred(images.cpu().data.numpy(), targets.cpu().data.numpy(), logit.cpu().data.numpy()) # Append epoch data to metrics dict for metric in ['loss', 'iou', 'mAP']: self.train_log[metric].append( np.mean(eval('train_{}'.format(metric)))) return train_loss, train_iou, train_mAP