def __init__(self, opt): self.typeID = DatasetTypeIDs['microtubule'] train_crop_size1 = opt.fineSize * 2 train_crop_size2 = opt.fineSize + 200 train_crop_size3 = opt.fineSize test_size = opt.fineSize self.input_clip = (0, 5) self.output_clip = (2, 100) # prepare the transforms self.iMerge = Merge() self.iElastic = ElasticTransform(alpha=1000, sigma=40) self.iSplit = Split([0, 1], [1, 2]) self.iRot = RandomRotate() self.iRCropTrain = RandomCropNumpy(size=(train_crop_size2, train_crop_size2)) self.iCropFTrain = CenterCropNumpy(size=(train_crop_size1, train_crop_size1)) self.iCropTrain = CenterCropNumpy(size=(train_crop_size3, train_crop_size3)) self.iCropTest = CenterCropNumpy(size=(test_size, test_size)) self.ptrain = './datasets/wei-tubulin-ctrl-20170520-images/train' self.ptest = './datasets/wei-tubulin-ctrl-20170520-images/test' self.dim_ordering = opt.dim_ordering self.opt = opt self.repeat = 30
def __init__(self, opt): self.typeID = DatasetTypeIDs['microtubule'] train_crop_size1 = opt.fineSize * 2 train_crop_size2 = opt.fineSize + 200 train_crop_size3 = opt.fineSize test_size = opt.fineSize self.input_clip = (0, 5) self.output_clip = (2, 100) # prepare the transforms self.iMerge = Merge() self.iElastic = ElasticTransform(alpha=1000, sigma=40) self.iSplit = Split([0, 1], [1, 2]) self.iRot = RandomRotate() self.iRCropTrain = RandomCropNumpy(size=(train_crop_size2, train_crop_size2)) self.iCropFTrain = CenterCropNumpy(size=(train_crop_size1, train_crop_size1)) self.iCropTrain = CenterCropNumpy(size=(train_crop_size3, train_crop_size3)) self.iCropTest = CenterCropNumpy(size=(test_size, test_size)) self.ptrain = '../anet-lite/src/datasets/Christian-TMR-IF-v0.1/train' self.ptest = '../anet-lite/src/datasets/Christian-TMR-IF-v0.1/test' self.dim_ordering = opt.dim_ordering self.opt = opt self.repeat = 30 self.folder_filter = '*' self.drift_correction = False self.scale_LR = True
def __init__(self, opt): self.typeID = DatasetTypeIDs['tubulin'] self.iRot = RandomRotate() self.iMerge = Merge() self.iSplit = Split([0, 1], [1, 2]) self.irCropTrain = RandomCropNumpy(size=(opt.fineSize+100, opt.fineSize+100)) self.ioCropTrain = CenterCropNumpy(size=[opt.fineSize, opt.fineSize]) self.iCropTest = CenterCropNumpy(size=(1024, 1024)) self.iElastic = ElasticTransform(alpha=1000, sigma=40) self.iBlur = GaussianBlurring(sigma=1.5) self.iPoisson = PoissonSubsampling(peak=['lognormal', -0.5, 0.001]) self.iBG = AddGaussianPoissonNoise(sigma=25, peak=0.06) self.train_count = 0 self.test_count = 0 self.dim_ordering = opt.dim_ordering self.repeat = 1 self.opt = opt
def __init__(self, opt): train_crop_size1 = int(opt.fineSize * 1.45) #pre-crop train_crop_size2 = opt.fineSize train_crop_size3 = opt.fineSize test_size = opt.fineSize self.ptrain = os.path.join(opt.workdir, 'train') #'./datasets/Christian-TMR-IF-v0.1/train' self.pvalid = os.path.join(opt.workdir, 'valid') self.ptest = os.path.join(opt.workdir, 'test') #'./datasets/Christian-TMR-IF-v0.1/test' self.input_channels = [] for ch in opt.input_channels.split(','): name, filter = ch.split('=') self.input_channels.append((name, {'filter':filter, 'loader':ImageLoader()}, )) self.output_channels = [] for ch in opt.output_channels.split(','): name, filter = ch.split('=') self.output_channels.append((name, {'filter':filter, 'loader':ImageLoader()}, )) # prepare the transforms self.iMerge = Merge() self.iElastic = ElasticTransform(alpha=1000, sigma=40) self.iSplit = Split([0, len(self.input_channels)], [len(self.input_channels), len(self.input_channels)+len(self.output_channels)]) self.iRCropTrain1 = RandomCropNumpy(size=(train_crop_size1, train_crop_size1)) self.iRot = RandomRotate() self.iCropTrain2 = CenterCropNumpy(size=(train_crop_size2, train_crop_size2)) self.iCropTest = CenterCropNumpy(size=(test_size, test_size)) self.dim_ordering = opt.dim_ordering self.opt = opt self.repeat = 30 self.input_channel_names = [n for n, _ in self.input_channels] self.output_channel_names = [n for n, _ in self.output_channels]