def set_log_data(self, cfg): super().set_log_data(cfg) self.log_keys = [ 'TRAIN_CROSS_LOSS', 'TRAIN_HOMO_LOSS', 'TRAIN_LOCAL_LOSS', 'TRAIN_PRIOR_LOSS', 'INTERSECTION_MLP', 'LABEL_MLP', 'INTERSECTION_LIN', 'LABEL_LIN', 'VAL_CLS_ACC_LIN', 'VAL_CLS_MEAN_ACC_LIN', 'VAL_CLS_ACC_MLP', 'VAL_CLS_MEAN_ACC_MLP' ] for item in self.log_keys: self.loss_meters[item] = AverageMeter()
def set_log_data(self, cfg): super().set_log_data(cfg) self.log_keys = [ 'TRAIN_SEMANTIC_LOSS_2DEPTH', 'TRAIN_SEMANTIC_LOSS_2SEG', 'TRAIN_PIX2PIX_LOSS_2DEPTH', 'TRAIN_PIX2PIX_LOSS_2SEG', ] for item in self.log_keys: self.loss_meters[item] = AverageMeter()
def validate(self): self.phase = 'test' # switch to evaluate mode self.net.eval() intersection_meter = AverageMeter() union_meter = AverageMeter() target_meter = AverageMeter() # batch = tqdm(self.val_loader, total=self.val_image_num // self.batch_size_val) with torch.no_grad(): for i, data in enumerate(self.val_loader): self.set_input(data) self._forward(if_cls=True, if_trans=False) cls_loss = self.result['loss_cls'].mean() * self.cfg.ALPHA_CLS self.loss_meters['VAL_CLS_LOSS'].update(cls_loss) self.pred = self.cls.data.max(1)[1] # self.pred = self.cls.data.max(1)[1].cpu().numpy() # label = np.uint8(self.label) intersection, union, label = util.intersectionAndUnionGPU( self.pred.cuda(), self.label.cuda(), self.cfg.NUM_CLASSES) if self.cfg.multiprocessing_distributed: dist.all_reduce(intersection), dist.all_reduce( union), dist.all_reduce(label) intersection, union, label = intersection.cpu().numpy( ), union.cpu().numpy(), label.cpu().numpy() intersection_meter.update(intersection), union_meter.update( union), target_meter.update(label) iou_class = intersection_meter.sum / (union_meter.sum + 1e-10) accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10) mIoU = np.mean(iou_class) mAcc = np.mean(accuracy_class) allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10) self.loss_meters['VAL_CLS_ACC'].update(allAcc) self.loss_meters['VAL_CLS_MEAN_ACC'].update(mAcc) self.loss_meters['VAL_CLS_MEAN_IOU'].update(mIoU)
def set_log_data(self, cfg): self.loss_meters = defaultdict() self.log_keys = [ 'TRAIN_CLS_ACC', 'VAL_CLS_ACC', # classification 'TRAIN_CLS_LOSS', 'VAL_CLS_LOSS', 'TRAIN_CLS_MEAN_ACC', 'VAL_CLS_MEAN_ACC' ] for item in self.log_keys: self.loss_meters[item] = AverageMeter()
def set_log_data(self, cfg): self.loss_meters = defaultdict() self.log_keys = [ 'TRAIN_G_LOSS', 'TRAIN_SEMANTIC_LOSS', # semantic 'TRAIN_PIX2PIX_LOSS', 'TRAIN_CLS_ACC', 'VAL_CLS_ACC', # classification 'TRAIN_CLS_LOSS', 'TRAIN_CLS_MEAN_IOU', 'VAL_CLS_LOSS', 'VAL_CLS_MEAN_IOU', 'VAL_CLS_MEAN_ACC' ] for item in self.log_keys: self.loss_meters[item] = AverageMeter()
def set_log_data(self, cfg): self.loss_meters = defaultdict() self.log_keys = [ 'TRAIN_G_LOSS', 'TRAIN_D_REAL', # GAN 'TRAIN_D_FAKE', 'TRAIN_PIXEL_LOSS', # pixel-wise 'VAL_PIXEL_LOSS', 'TRAIN_SEMANTIC_LOSS', # semantic 'TRAIN_CLS_ACC', 'VAL_CLS_ACC', # classification 'TRAIN_CLS_LOSS', 'VAL_CLS_LOSS', 'TRAIN_CLS_MEAN_ACC', 'VAL_CLS_MEAN_ACC' ] for item in self.log_keys: self.loss_meters[item] = AverageMeter()
def set_log_data(self, cfg): self.loss_meters = defaultdict() self.log_keys = [ 'TRAIN_GAN_G_LOSS', 'TRAIN_GAN_D_LOSS', 'TRAIN_SEMANTIC_LOSS', # semantic 'TRAIN_PIX2PIX_LOSS', 'TRAIN_CONTRAST_LOSS', 'TRAIN_CLS_ACC', 'TRAIN_CLS_LOSS', 'TRAIN_CLS_MEAN_IOU', 'VAL_CLS_ACC', # classification 'VAL_CLS_LOSS', 'VAL_CLS_MEAN_IOU', 'VAL_CLS_MEAN_ACC', 'INTERSECTION', 'UNION', 'LABEL', 'TRAIN_CLS_LOSS_COMPL', 'TRAIN_CLS_LOSS_FUSE' ] for item in self.log_keys: self.loss_meters[item] = AverageMeter()
def test_slide(self): intersection_meter = AverageMeter() union_meter = AverageMeter() target_meter = AverageMeter() self.net.eval() self.phase = 'test' print('testing sliding windows...') # batch = tqdm(self.val_loader, total=self.val_image_num // self.batch_size_val) for i, data in enumerate(self.val_loader): self.set_input(data) prediction = util.slide_cal(model=self.net, image=self.source_modal, classes=self.cfg.NUM_CLASSES, crop_size=self.cfg.FINE_SIZE) # self.pred = prediction.data.max(1)[1].cpu().numpy() self.pred = prediction.max(1)[1] # label = np.uint8(self.label) intersection, union, label = util.intersectionAndUnionGPU( self.pred.cuda(), self.label.cuda(), self.cfg.NUM_CLASSES) if self.cfg.multiprocessing_distributed: dist.all_reduce(intersection), dist.all_reduce( union), dist.all_reduce(label) intersection, union, label = intersection.cpu().numpy(), union.cpu( ).numpy(), label.cpu().numpy() intersection_meter.update(intersection) union_meter.update(union) target_meter.update(label) print(sum(intersection_meter.val) / sum(target_meter.val)) iou_class = intersection_meter.sum / (union_meter.sum + 1e-10) accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10) mIoU = np.mean(iou_class) mAcc = np.mean(accuracy_class) allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10) self.loss_meters['VAL_CLS_ACC'].update(allAcc) self.loss_meters['VAL_CLS_MEAN_ACC'].update(mAcc) self.loss_meters['VAL_CLS_MEAN_IOU'].update(mIoU)