def __init__(self, opt, force_generate=False): super(TransformedCSVImages, self).__init__(opt) self.ptrain = os.path.join(opt.workdir, '__images__', 'train') self.ptest = os.path.join(opt.workdir, '__images__', 'test') self.iSplit = Split([0, 2], [2, 3]) self.test_count = 0 if not os.path.exists(self.ptrain) or force_generate: generate_image_pairs_from_csv(os.path.join(opt.workdir, 'train'), self.ptrain, A_frame=['uniform', 200, 500], B_frame=0.95, A_frame_limit=(0, 0.5), B_frame_limit=(2000, 1.0), image_per_file=30, target_size=(2560, 2560)) if not os.path.exists(self.ptest) or force_generate: if not os.path.exists(os.path.join(opt.workdir, 'test')): return aframes = list(np.logspace(-3, np.log(1.0), 32) * 60000) + [ 0, ] generate_image_pairs_from_csv(os.path.join(opt.workdir, 'test'), self.ptest, A_frame=aframes, B_frame=1.0, A_frame_limit=(0, 1.0), B_frame_limit=(0, 1.0), image_per_file=len(aframes), target_size=(2560, 2560), zero_offset=True)
def __init__(self, opt): self.typeID = DatasetTypeIDs['microtubule'] train_crop_size1 = opt.fineSize * 2 train_crop_size2 = opt.fineSize + 200 train_crop_size3 = opt.fineSize test_size = opt.fineSize self.input_clip = (0, 5) self.output_clip = (2, 100) # prepare the transforms self.iMerge = Merge() self.iElastic = ElasticTransform(alpha=1000, sigma=40) self.iSplit = Split([0, 1], [1, 2]) self.iRot = RandomRotate() self.iRCropTrain = RandomCropNumpy(size=(train_crop_size2, train_crop_size2)) self.iCropFTrain = CenterCropNumpy(size=(train_crop_size1, train_crop_size1)) self.iCropTrain = CenterCropNumpy(size=(train_crop_size3, train_crop_size3)) self.iCropTest = CenterCropNumpy(size=(test_size, test_size)) self.ptrain = './datasets/wei-tubulin-ctrl-20170520-images/train' self.ptest = './datasets/wei-tubulin-ctrl-20170520-images/test' self.dim_ordering = opt.dim_ordering self.opt = opt self.repeat = 30
def __init__(self, opt): self.typeID = DatasetTypeIDs['microtubule'] train_crop_size1 = opt.fineSize * 2 train_crop_size2 = opt.fineSize + 200 train_crop_size3 = opt.fineSize test_size = opt.fineSize self.input_clip = (0, 5) self.output_clip = (2, 100) # prepare the transforms self.iMerge = Merge() self.iElastic = ElasticTransform(alpha=1000, sigma=40) self.iSplit = Split([0, 1], [1, 2]) self.iRot = RandomRotate() self.iRCropTrain = RandomCropNumpy(size=(train_crop_size2, train_crop_size2)) self.iCropFTrain = CenterCropNumpy(size=(train_crop_size1, train_crop_size1)) self.iCropTrain = CenterCropNumpy(size=(train_crop_size3, train_crop_size3)) self.iCropTest = CenterCropNumpy(size=(test_size, test_size)) self.ptrain = '../anet-lite/src/datasets/Christian-TMR-IF-v0.1/train' self.ptest = '../anet-lite/src/datasets/Christian-TMR-IF-v0.1/test' self.dim_ordering = opt.dim_ordering self.opt = opt self.repeat = 30 self.folder_filter = '*' self.drift_correction = False self.scale_LR = True
def __init__(self, opt, force_generate=False): super(TransformedCSVImages, self).__init__(opt) self.ptrain = os.path.join(opt.workdir, 'train') self.ptest = os.path.join(opt.workdir, 'test') self.iSplit = Split([0, 2], [2, 3]) self.test_count = 0 self.folder_filter = '*' self.file_extension = '.png'
def __init__(self, opt): self.typeID = DatasetTypeIDs['tubulin'] self.iRot = RandomRotate() self.iMerge = Merge() self.iSplit = Split([0, 1], [1, 2]) self.irCropTrain = RandomCropNumpy(size=(opt.fineSize+100, opt.fineSize+100)) self.ioCropTrain = CenterCropNumpy(size=[opt.fineSize, opt.fineSize]) self.iCropTest = CenterCropNumpy(size=(1024, 1024)) self.iElastic = ElasticTransform(alpha=1000, sigma=40) self.iBlur = GaussianBlurring(sigma=1.5) self.iPoisson = PoissonSubsampling(peak=['lognormal', -0.5, 0.001]) self.iBG = AddGaussianPoissonNoise(sigma=25, peak=0.06) self.train_count = 0 self.test_count = 0 self.dim_ordering = opt.dim_ordering self.repeat = 1 self.opt = opt
def __init__(self, opt): train_crop_size1 = int(opt.fineSize * 1.45) #pre-crop train_crop_size2 = opt.fineSize train_crop_size3 = opt.fineSize test_size = opt.fineSize self.ptrain = os.path.join(opt.workdir, 'train') #'./datasets/Christian-TMR-IF-v0.1/train' self.pvalid = os.path.join(opt.workdir, 'valid') self.ptest = os.path.join(opt.workdir, 'test') #'./datasets/Christian-TMR-IF-v0.1/test' self.input_channels = [] for ch in opt.input_channels.split(','): name, filter = ch.split('=') self.input_channels.append((name, {'filter':filter, 'loader':ImageLoader()}, )) self.output_channels = [] for ch in opt.output_channels.split(','): name, filter = ch.split('=') self.output_channels.append((name, {'filter':filter, 'loader':ImageLoader()}, )) # prepare the transforms self.iMerge = Merge() self.iElastic = ElasticTransform(alpha=1000, sigma=40) self.iSplit = Split([0, len(self.input_channels)], [len(self.input_channels), len(self.input_channels)+len(self.output_channels)]) self.iRCropTrain1 = RandomCropNumpy(size=(train_crop_size1, train_crop_size1)) self.iRot = RandomRotate() self.iCropTrain2 = CenterCropNumpy(size=(train_crop_size2, train_crop_size2)) self.iCropTest = CenterCropNumpy(size=(test_size, test_size)) self.dim_ordering = opt.dim_ordering self.opt = opt self.repeat = 30 self.input_channel_names = [n for n, _ in self.input_channels] self.output_channel_names = [n for n, _ in self.output_channels]
def __init__(self, opt, force_generate=False): super(TransformedCSVImages, self).__init__(opt) self.ptrain = os.path.join(opt.workdir, 'train') self.ptest = os.path.join(opt.workdir, 'test') self.iSplit = Split([0, 2], [2, 3]) self.test_count = 0