def create_dataloader(): train_joint_transform = joint_augment.Compose([ joint_augment.To_PIL_Image(), joint_augment.RandomAffine(0, translate=(0.125, 0.125)), joint_augment.RandomRotate((-180, 180)), joint_augment.FixResize(256) ]) transform = standard_augment.Compose([ standard_augment.to_Tensor(), # standard_augment.normalize([cfg.DATASET.MEAN], [cfg.DATASET.STD])]) standard_augment.normalize_meanstd() ]) target_transform = standard_augment.Compose([standard_augment.to_Tensor()]) if cfg.DATASET.NAME == 'acdc': train_set = AcdcDataset(data_list=cfg.DATASET.TRAIN_LIST, joint_augment=train_joint_transform, augment=transform, target_augment=target_transform) # train_sampler = torch.utils.data.distributed.DistributedSampler(train_set, # num_replicas=dist.get_world_size(), rank=dist.get_rank()) train_loader = DataLoader( train_set, batch_size=args.batch_size, pin_memory=True, num_workers=args.workers, shuffle=False, ) if args.train_with_eval: eval_transform = joint_augment.Compose([ joint_augment.To_PIL_Image(), joint_augment.FixResize(256), # divided by 32 joint_augment.To_Tensor() ]) evalImg_transform = standard_augment.Compose([ # standard_augment.normalize([cfg.DATASET.MEAN], [cfg.DATASET.STD])],) standard_augment.normalize_meanstd() ]) if cfg.DATASET.NAME == 'acdc': test_set = AcdcDataset(data_list=cfg.DATASET.TEST_LIST, joint_augment=eval_transform, augment=evalImg_transform) test_loader = DataLoader( test_set, batch_size=args.batch_size, pin_memory=True, num_workers=args.workers, shuffle=False, ) else: test_loader = None return train_loader, test_loader
def transform(imgs): mean = 63.19523533061758 std = 70.74166957523165 trans = standard_augment.Compose([ standard_augment.To_PIL_Image(), # joint_augment.RandomAffine(0,translate=(0.125, 0.125)), # joint_augment.RandomRotate((-180,180)), # joint_augment.FixResize(224), standard_augment.to_Tensor(), standard_augment.normalize([mean], [std]), ]) return trans(imgs)
def create_dataloader(): eval_transform = joint_augment.Compose([ joint_augment.To_PIL_Image(), joint_augment.FixResize(256), joint_augment.To_Tensor() ]) evalImg_transform = standard_augment.Compose( [standard_augment.normalize([cfg.DATASET.MEAN], [cfg.DATASET.STD])]) if cfg.DATASET.NAME == "acdc": test_set = AcdcDataset(cfg.DATASET.TEST_LIST, df_used=True, joint_augment=eval_transform, augment=evalImg_transform) test_loader = DataLoader(test_set, batch_size=1, pin_memory=True, num_workers=args.workers, shuffle=False, collate_fn=BatchCollator(size_divisible=32, df_used=True)) return test_loader, test_set
def create_dataloader_Insensee(do_elastic_transform=True, alpha=(100., 350.), sigma=(14., 17.), do_rotation=True, a_x=(0., 2 * np.pi), a_y=(-0.000001, 0.00001), a_z=(-0.000001, 0.00001), do_scale=True, scale_range=(0.7, 1.3)): transform = SpatialTransform((352, 352), list(np.array((352, 352)) // 2), do_elastic_transform, alpha, sigma, do_rotation, a_x, a_y, a_z, do_scale, scale_range, 'constant', 0, 3, 'constant', 0, 0, random_crop=False) train_set_Isensee = AcdcDataset_Isensee(data_list=cfg.DATASET.TRAIN_LIST, Isensee_augment=transform) train_loader_Isensee = DataLoader(train_set_Isensee, batch_size=args.batch_size, pin_memory=True, num_workers=1, shuffle=False) if args.train_with_eval: eval_transform = joint_augment.Compose([ joint_augment.To_PIL_Image(), joint_augment.FixResize(352), # divided by 32 joint_augment.To_Tensor() ]) evalImg_transform = standard_augment.Compose( [standard_augment.normalize_meanstd()]) if cfg.DATASET.NAME == 'acdc': test_set = AcdcDataset(data_list=cfg.DATASET.TEST_LIST, joint_augment=eval_transform, augment=evalImg_transform) test_loader = DataLoader( test_set, batch_size=args.batch_size, pin_memory=True, num_workers=args.workers, shuffle=False, ) else: test_loader = None return train_loader_Isensee, test_loader
for i in np.unique(lbls)] # calculate every region's number largest_region = np.argmax( lbls_sizes[1:]) + 1 # from 1 because need excluding the background print('labls:', np.unique(lbls), 'largest_region:', largest_region) seg[lbls != largest_region] = 0 # only allow one pred region,set others to zero return seg eval_transform = joint_augment.Compose([ joint_augment.To_PIL_Image(), joint_augment.FixResize(256), joint_augment.To_Tensor() ]) evalImg_transform = standard_augment.Compose( [standard_augment.normalize_meanstd()]) if cfg.DATASET.NAME == 'acdc': test_set = AcdcDataset_Upload(data_list=cfg.DATASET.TEST_UPLOAD, joint_augment=eval_transform, augment=evalImg_transform) test_loader = DataLoader(test_set, batch_size=1, pin_memory=True, num_workers=args.workers, shuffle=False) model = CleanU_Net() nii_numpy_data = [] nii_numpy_lab = []
def transform(imgs, cfg): trans = standard_augment.Compose([ standard_augment.normalize([cfg.DATASET.MEAN], [cfg.DATASET.STD]), ]) return trans(imgs)
for i in np.unique(lbls)] # calculate every region's number largest_region = np.argmax( lbls_sizes[1:]) + 1 # from 1 because need excluding the background seg[lbls != largest_region] = 0 # only allow one pred region,set others to zero return seg train_joint_transform = joint_augment.Compose([ joint_augment.To_PIL_Image(), joint_augment.RandomAffine(0, translate=(0.125, 0.125)), joint_augment.RandomRotate((-180, 180)), joint_augment.FixResize(256) ]) transform = standard_augment.Compose( [standard_augment.to_Tensor(), standard_augment.normalize_meanstd()]) target_transform = standard_augment.Compose([standard_augment.to_Tensor()]) if cfg.DATASET.NAME == 'acdc': train_set = AcdcDataset(data_list=cfg.DATASET.TRAIN_LIST, joint_augment=train_joint_transform, augment=transform, target_augment=target_transform) train_loader = DataLoader(train_set, batch_size=args.batch_size, pin_memory=True, num_workers=1, shuffle=False) eval_transform = joint_augment.Compose([
def create_dataloader(logger): train_joint_transform = joint_augment.Compose([ joint_augment.To_PIL_Image(), joint_augment.RandomAffine(0, translate=(0.125, 0.125)), joint_augment.RandomRotate((-180, 180)), joint_augment.FixResize(256) ]) transform = standard_augment.Compose([ standard_augment.to_Tensor(), standard_augment.normalize([cfg.DATASET.MEAN], [cfg.DATASET.STD]) ]) target_transform = standard_augment.Compose([standard_augment.to_Tensor()]) if cfg.DATASET.NAME == 'acdc': train_set = AcdcDataset(data_list=cfg.DATASET.TRAIN_LIST, df_used=cfg.DATASET.DF_USED, df_norm=cfg.DATASET.DF_NORM, boundary=cfg.DATASET.BOUNDARY, joint_augment=train_joint_transform, augment=transform, target_augment=target_transform) train_sampler = torch.utils.data.distributed.DistributedSampler( train_set, num_replicas=dist.get_world_size(), rank=dist.get_rank()) train_loader = DataLoader(train_set, batch_size=args.batch_size, pin_memory=True, num_workers=args.workers, shuffle=False, sampler=train_sampler, collate_fn=BatchCollator( size_divisible=32, df_used=cfg.DATASET.DF_USED, boundary=cfg.DATASET.BOUNDARY)) if args.train_with_eval: eval_transform = joint_augment.Compose([ joint_augment.To_PIL_Image(), joint_augment.FixResize(256), joint_augment.To_Tensor() ]) evalImg_transform = standard_augment.Compose([ standard_augment.normalize([cfg.DATASET.MEAN], [cfg.DATASET.STD]) ]) if cfg.DATASET.NAME == 'acdc': test_set = AcdcDataset(data_list=cfg.DATASET.TEST_LIST, df_used=cfg.DATASET.DF_USED, df_norm=cfg.DATASET.DF_NORM, boundary=cfg.DATASET.BOUNDARY, joint_augment=eval_transform, augment=evalImg_transform) test_sampler = torch.utils.data.distributed.DistributedSampler( test_set, num_replicas=dist.get_world_size(), rank=dist.get_rank()) test_loader = DataLoader(test_set, batch_size=args.batch_size, pin_memory=True, num_workers=args.workers, shuffle=False, sampler=test_sampler, collate_fn=BatchCollator( size_divisible=32, df_used=cfg.DATASET.DF_USED, boundary=cfg.DATASET.BOUNDARY)) else: test_loader = None return train_loader, test_loader