def create_dataloader(): train_joint_transform = joint_augment.Compose([ joint_augment.To_PIL_Image(), joint_augment.RandomAffine(0, translate=(0.125, 0.125)), joint_augment.RandomRotate((-180, 180)), joint_augment.FixResize(256) ]) transform = standard_augment.Compose([ standard_augment.to_Tensor(), # standard_augment.normalize([cfg.DATASET.MEAN], [cfg.DATASET.STD])]) standard_augment.normalize_meanstd() ]) target_transform = standard_augment.Compose([standard_augment.to_Tensor()]) if cfg.DATASET.NAME == 'acdc': train_set = AcdcDataset(data_list=cfg.DATASET.TRAIN_LIST, joint_augment=train_joint_transform, augment=transform, target_augment=target_transform) # train_sampler = torch.utils.data.distributed.DistributedSampler(train_set, # num_replicas=dist.get_world_size(), rank=dist.get_rank()) train_loader = DataLoader( train_set, batch_size=args.batch_size, pin_memory=True, num_workers=args.workers, shuffle=False, ) if args.train_with_eval: eval_transform = joint_augment.Compose([ joint_augment.To_PIL_Image(), joint_augment.FixResize(256), # divided by 32 joint_augment.To_Tensor() ]) evalImg_transform = standard_augment.Compose([ # standard_augment.normalize([cfg.DATASET.MEAN], [cfg.DATASET.STD])],) standard_augment.normalize_meanstd() ]) if cfg.DATASET.NAME == 'acdc': test_set = AcdcDataset(data_list=cfg.DATASET.TEST_LIST, joint_augment=eval_transform, augment=evalImg_transform) test_loader = DataLoader( test_set, batch_size=args.batch_size, pin_memory=True, num_workers=args.workers, shuffle=False, ) else: test_loader = None return train_loader, test_loader
def joint_transform(imgs, gts, cfg): trans = joint_augment.Compose([ joint_augment.To_PIL_Image(), # joint_augment.RandomAffine(0,translate=(0.125, 0.125)), # joint_augment.RandomRotate((-180,180)), joint_augment.FixResize(256), joint_augment.To_Tensor() ]) S, H, W, C, T = gts.shape trans_imgs = [None] * T trans_gts = [None] * T for i in range(T): trans_imgs[i], trans_gts[i] = [], [] for j in range(S): t0, t1 = trans(imgs[j, ..., i], gts[j, ..., i]) trans_imgs[i].append(transform(t0, cfg)) trans_gts[i].append(t1) aligned_imgs = [] aligned_gts = [] for i in range(T): aligned_imgs.append(to_image_list(trans_imgs[i], size_divisible=32)) aligned_gts.append(to_image_list(trans_gts[i], size_divisible=32)) return aligned_imgs, aligned_gts
def create_dataloader(): eval_transform = joint_augment.Compose([ joint_augment.To_PIL_Image(), joint_augment.FixResize(256), joint_augment.To_Tensor() ]) evalImg_transform = standard_augment.Compose( [standard_augment.normalize([cfg.DATASET.MEAN], [cfg.DATASET.STD])]) if cfg.DATASET.NAME == "acdc": test_set = AcdcDataset(cfg.DATASET.TEST_LIST, df_used=True, joint_augment=eval_transform, augment=evalImg_transform) test_loader = DataLoader(test_set, batch_size=1, pin_memory=True, num_workers=args.workers, shuffle=False, collate_fn=BatchCollator(size_divisible=32, df_used=True)) return test_loader, test_set
def create_dataloader_Insensee(do_elastic_transform=True, alpha=(100., 350.), sigma=(14., 17.), do_rotation=True, a_x=(0., 2 * np.pi), a_y=(-0.000001, 0.00001), a_z=(-0.000001, 0.00001), do_scale=True, scale_range=(0.7, 1.3)): transform = SpatialTransform((352, 352), list(np.array((352, 352)) // 2), do_elastic_transform, alpha, sigma, do_rotation, a_x, a_y, a_z, do_scale, scale_range, 'constant', 0, 3, 'constant', 0, 0, random_crop=False) train_set_Isensee = AcdcDataset_Isensee(data_list=cfg.DATASET.TRAIN_LIST, Isensee_augment=transform) train_loader_Isensee = DataLoader(train_set_Isensee, batch_size=args.batch_size, pin_memory=True, num_workers=1, shuffle=False) if args.train_with_eval: eval_transform = joint_augment.Compose([ joint_augment.To_PIL_Image(), joint_augment.FixResize(352), # divided by 32 joint_augment.To_Tensor() ]) evalImg_transform = standard_augment.Compose( [standard_augment.normalize_meanstd()]) if cfg.DATASET.NAME == 'acdc': test_set = AcdcDataset(data_list=cfg.DATASET.TEST_LIST, joint_augment=eval_transform, augment=evalImg_transform) test_loader = DataLoader( test_set, batch_size=args.batch_size, pin_memory=True, num_workers=args.workers, shuffle=False, ) else: test_loader = None return train_loader_Isensee, test_loader
# basically look for connected components and choose the largest one, delete everything else mask = seg != 0 # change label to {0,1} 0:background 1:mask(many be not one kind) lbls = label(mask, 8) # calculate number of connected region lbls_sizes = [np.sum(lbls == i) for i in np.unique(lbls)] # calculate every region's number largest_region = np.argmax( lbls_sizes[1:]) + 1 # from 1 because need excluding the background print('labls:', np.unique(lbls), 'largest_region:', largest_region) seg[lbls != largest_region] = 0 # only allow one pred region,set others to zero return seg eval_transform = joint_augment.Compose([ joint_augment.To_PIL_Image(), joint_augment.FixResize(256), joint_augment.To_Tensor() ]) evalImg_transform = standard_augment.Compose( [standard_augment.normalize_meanstd()]) if cfg.DATASET.NAME == 'acdc': test_set = AcdcDataset_Upload(data_list=cfg.DATASET.TEST_UPLOAD, joint_augment=eval_transform, augment=evalImg_transform) test_loader = DataLoader(test_set, batch_size=1, pin_memory=True, num_workers=args.workers,
def postprocess_prediction(seg): # basically look for connected components and choose the largest one, delete everything else mask = seg != 0 # change label to {0,1} 0:background 1:mask(many be not one kind) lbls = label(mask, 4) # calculate number of connected region lbls_sizes = [np.sum(lbls == i) for i in np.unique(lbls)] # calculate every region's number largest_region = np.argmax( lbls_sizes[1:]) + 1 # from 1 because need excluding the background seg[lbls != largest_region] = 0 # only allow one pred region,set others to zero return seg train_joint_transform = joint_augment.Compose([ joint_augment.To_PIL_Image(), joint_augment.RandomAffine(0, translate=(0.125, 0.125)), joint_augment.RandomRotate((-180, 180)), joint_augment.FixResize(256) ]) transform = standard_augment.Compose( [standard_augment.to_Tensor(), standard_augment.normalize_meanstd()]) target_transform = standard_augment.Compose([standard_augment.to_Tensor()]) if cfg.DATASET.NAME == 'acdc': train_set = AcdcDataset(data_list=cfg.DATASET.TRAIN_LIST, joint_augment=train_joint_transform, augment=transform, target_augment=target_transform) train_loader = DataLoader(train_set, batch_size=args.batch_size,
def create_dataloader(logger): train_joint_transform = joint_augment.Compose([ joint_augment.To_PIL_Image(), joint_augment.RandomAffine(0, translate=(0.125, 0.125)), joint_augment.RandomRotate((-180, 180)), joint_augment.FixResize(256) ]) transform = standard_augment.Compose([ standard_augment.to_Tensor(), standard_augment.normalize([cfg.DATASET.MEAN], [cfg.DATASET.STD]) ]) target_transform = standard_augment.Compose([standard_augment.to_Tensor()]) if cfg.DATASET.NAME == 'acdc': train_set = AcdcDataset(data_list=cfg.DATASET.TRAIN_LIST, df_used=cfg.DATASET.DF_USED, df_norm=cfg.DATASET.DF_NORM, boundary=cfg.DATASET.BOUNDARY, joint_augment=train_joint_transform, augment=transform, target_augment=target_transform) train_sampler = torch.utils.data.distributed.DistributedSampler( train_set, num_replicas=dist.get_world_size(), rank=dist.get_rank()) train_loader = DataLoader(train_set, batch_size=args.batch_size, pin_memory=True, num_workers=args.workers, shuffle=False, sampler=train_sampler, collate_fn=BatchCollator( size_divisible=32, df_used=cfg.DATASET.DF_USED, boundary=cfg.DATASET.BOUNDARY)) if args.train_with_eval: eval_transform = joint_augment.Compose([ joint_augment.To_PIL_Image(), joint_augment.FixResize(256), joint_augment.To_Tensor() ]) evalImg_transform = standard_augment.Compose([ standard_augment.normalize([cfg.DATASET.MEAN], [cfg.DATASET.STD]) ]) if cfg.DATASET.NAME == 'acdc': test_set = AcdcDataset(data_list=cfg.DATASET.TEST_LIST, df_used=cfg.DATASET.DF_USED, df_norm=cfg.DATASET.DF_NORM, boundary=cfg.DATASET.BOUNDARY, joint_augment=eval_transform, augment=evalImg_transform) test_sampler = torch.utils.data.distributed.DistributedSampler( test_set, num_replicas=dist.get_world_size(), rank=dist.get_rank()) test_loader = DataLoader(test_set, batch_size=args.batch_size, pin_memory=True, num_workers=args.workers, shuffle=False, sampler=test_sampler, collate_fn=BatchCollator( size_divisible=32, df_used=cfg.DATASET.DF_USED, boundary=cfg.DATASET.BOUNDARY)) else: test_loader = None return train_loader, test_loader