def mask2rws(mask_path, rws_path, dcm_path=None): if isinstance(mask_path, str): mask = mv.imread(mask_path, mv.ImreadMode.GRAY) dcm_path = mv.basename(mask_path).replace('.png', '.dcm') else: mask = mask_path assert dcm_path is not None contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) shapes = [] for contour in contours: shape = _gen_rws_shape('auto', contour) shapes.append(shape) data = dict( version='0.1.0', flags={}, shapes=shapes, lineColor=None, fillColor=None, imagePath=dcm_path, imageData=None, imageHeight=mask.shape[0], imageWidth=mask.shape[1], ) with open(rws_path, 'w') as f: json.dump(data, f, ensure_ascii=False, indent=2)
def test_imread_imwrite(img): dst_dir = mv.joinpath(DATA_DIR, 'temporary_subdir') dst_path = mv.joinpath(dst_dir, mv.basename(PNG_IMG_PATH)) mv.mkdirs(dst_dir) ret_val = mv.imwrite(img, dst_path) assert ret_val img_reloaded = mv.imread(dst_path, mv.ImreadMode.UNCHANGED) assert_image_equal(img, img_reloaded) mv.rmtree(dst_dir)
def bdc2rws_contour(dcm_path, bdc_path, rws_path): ds = mv.dcminfo(dcm_path) contours = load_bdc_dr_contour(bdc_path) shapes = [] for label, contour in contours: shape = _gen_rws_shape(label, contour) shapes.append(shape) data = dict( version='0.1.0', flags={}, shapes=shapes, lineColor=None, fillColor=None, imagePath=mv.basename(dcm_path), imageData=None, imageHeight=ds.Rows, imageWidth=ds.Columns, ) with open(rws_path, 'w') as f: json.dump(data, f, ensure_ascii=False, indent=2)
def __init__(self, mode, model, batch_processor, train_dataloader=None, val_dataloader=None, optimizer=None, work_dir=None, max_epochs=10000): """ A training helper for PyTorch. Args: model (`torch.nn.Module`): The model to be run. mode ('ModeKey'): running mode. batch_processor (callable): A callable method that process a data batch. The interface of this method should be `batch_processor(model, data, train_mode) -> dict` train_dataloader ('DataLoader'): train data loader. val_dataloader ('DataLoader'): validation data loader. optimizer (dict or `Optimizer`): If it is a dict, runner will construct an optimizer according to it. work_dir (str, optional): The working directory to save checkpoints, logs and other outputs. max_epochs (int): Total training epochs. """ assert isinstance(mode, mv.ModeKey) assert isinstance(model, torch.nn.Module) assert callable(batch_processor) assert isinstance(optimizer, (str, torch.optim.Optimizer)) assert isinstance(work_dir, str) or work_dir is None assert isinstance(max_epochs, int) self.mode = mode self.epoch_runner = getattr(self, mode.value) self.model = model self.batch_processor = batch_processor self.train_dataloader = train_dataloader self.val_dataloader = val_dataloader self.optimizer = self.build_optimizer(optimizer) # create work_dir self.work_dir = mv.abspath(work_dir if work_dir is not None else '.') mv.mkdirs(self.work_dir) # init TensorboardX visualizer and dataloader if mode == mv.ModeKey.TRAIN: experiment = mv.basename(self.work_dir) self.visualizer = mv.TensorboardVisualizer(experiment) self.dataloader = self.train_dataloader else: self.visualizer = None self.dataloader = self.val_dataloader # init hooks and average meter self._hooks = [] self.average_meter = AverageMeter() # init loop parameters self._epoch = 0 self._max_epochs = max_epochs if mode == mv.ModeKey.TRAIN else 1 self._inner_iter = 0 self._iter = 0 self._max_iters = 0 # get model name from model class if hasattr(self.model, 'module'): self._model_name = self.model.module.__class__.__name__ else: self._model_name = self.model.__class__.__name__