Exemplo n.º 1
0
    def __init__(self, opt):
        self.typeID = DatasetTypeIDs['microtubule']
        train_crop_size1 = opt.fineSize * 2
        train_crop_size2 = opt.fineSize + 200
        train_crop_size3 = opt.fineSize
        test_size = opt.fineSize

        self.input_clip = (0, 5)
        self.output_clip = (2, 100)

        # prepare the transforms
        self.iMerge = Merge()
        self.iElastic = ElasticTransform(alpha=1000, sigma=40)
        self.iSplit = Split([0, 1], [1, 2])
        self.iRot = RandomRotate()
        self.iRCropTrain = RandomCropNumpy(size=(train_crop_size2,
                                                 train_crop_size2))
        self.iCropFTrain = CenterCropNumpy(size=(train_crop_size1,
                                                 train_crop_size1))
        self.iCropTrain = CenterCropNumpy(size=(train_crop_size3,
                                                train_crop_size3))
        self.iCropTest = CenterCropNumpy(size=(test_size, test_size))
        self.ptrain = './datasets/wei-tubulin-ctrl-20170520-images/train'
        self.ptest = './datasets/wei-tubulin-ctrl-20170520-images/test'
        self.dim_ordering = opt.dim_ordering
        self.opt = opt
        self.repeat = 30
Exemplo n.º 2
0
    def __init__(self, opt):
        self.typeID = DatasetTypeIDs['microtubule']
        train_crop_size1 = opt.fineSize * 2
        train_crop_size2 = opt.fineSize + 200
        train_crop_size3 = opt.fineSize
        test_size = opt.fineSize

        self.input_clip = (0, 5)
        self.output_clip = (2, 100)

        # prepare the transforms
        self.iMerge = Merge()
        self.iElastic = ElasticTransform(alpha=1000, sigma=40)
        self.iSplit = Split([0, 1], [1, 2])
        self.iRot = RandomRotate()
        self.iRCropTrain = RandomCropNumpy(size=(train_crop_size2, train_crop_size2))
        self.iCropFTrain = CenterCropNumpy(size=(train_crop_size1, train_crop_size1))
        self.iCropTrain = CenterCropNumpy(size=(train_crop_size3, train_crop_size3))
        self.iCropTest = CenterCropNumpy(size=(test_size, test_size))
        self.ptrain = '../anet-lite/src/datasets/Christian-TMR-IF-v0.1/train'
        self.ptest = '../anet-lite/src/datasets/Christian-TMR-IF-v0.1/test'
        self.dim_ordering = opt.dim_ordering
        self.opt = opt
        self.repeat = 30
        self.folder_filter = '*'
        self.drift_correction = False
        self.scale_LR = True
Exemplo n.º 3
0
def generate_image_pairs_from_csv(csv_folder, output_dir, image_per_file=10, A_frame=150, B_frame=0.85, file_filter='*.csv',
                                  top_left=(0, 0), input_size_nm=512*106, pixel_size=20, A_frame_limit=[0, 1.0], B_frame_limit=[0, 1.0],
                                  output_clip = (0, 255), input_clip = (0, 20), target_size=(2560, 2560), center_crop=None, zero_offset=False):
    lCropTrain = LocalizationCrop(fit_data=True, top_left=top_left)
    fSamplerInTest = LocalizationFrameSampler(frame_num=A_frame, frame_limit=A_frame_limit, zero_offset=zero_offset)
    fSamplerOutTest = LocalizationFrameSampler(frame_num=B_frame, frame_limit=B_frame_limit)
    hRender = HistogramRendering(pixel_size=pixel_size, value_range= (0, 255), target_size=target_size)
    if center_crop:
        cropTest = CenterCropNumpy(size=center_crop)
    def transform_train(imgDict):
        table = imgDict['table']
        repeat = imgDict['table.repeat']
        table = lCropTrain(table)
        tableout = fSamplerOutTest(table, index=repeat)
        tablein = fSamplerInTest(table, index=repeat)
        histout = hRender(tableout)
        histin = hRender(tablein)
        if center_crop:
            histin = cropTest(histin)
            histout = cropTest(histout)
        histin = np.clip(histin, 0, 255)
        histout = np.clip(histout, 0, 255)
        return histin, histout, imgDict['table.path'], tablein.f_range, tableout.f_range

    csvLoader = ThunderstormCSVLoader([0, input_size_nm, 0, input_size_nm])
    source_train = FolderDataset(csv_folder,
                      channels = {'table': {'filter':file_filter, 'loader': csvLoader} },
                     transform = transform_train,
                     recursive=False,
                     repeat=image_per_file)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    print('generating images...')
    for i in range(len(source_train)):
        (aa, bb, p, inf_range, outf_range) = source_train[i]
        print(i, inf_range, outf_range)
        name = os.path.split(p)[1]
        fpath = os.path.join(output_dir, name)
        if not os.path.exists(fpath):
            os.makedirs(fpath)

        ha = aa[:,:,0].astype('uint8')
        imA = Image.fromarray(ha)
        imA.save(os.path.join(fpath, 'A_{}_{}.png'.format(i%image_per_file, str(inf_range))))

        hb = bb[:,:,0].astype('uint8')
        imB = Image.fromarray(hb)
        if B_frame == 1.0:
            imB.save(os.path.join(fpath, 'B_{}_{}.png'.format('all', str(outf_range))))
        else:
            imB.save(os.path.join(fpath, 'B_{}_{}.png'.format(i%image_per_file, str(outf_range))))

        lr_path = p.replace('.csv', '.png')
        lr_target_path = os.path.join(fpath, 'LR_'+ name.replace('.csv', '.png'))
        if os.path.exists(lr_path) and not os.path.exists(lr_target_path):
            copyfile(lr_path, lr_target_path)


    print('done')
Exemplo n.º 4
0
 def __init__(self, opt):
     self.typeID = DatasetTypeIDs['tubulin']
     self.iRot = RandomRotate()
     self.iMerge = Merge()
     self.iSplit = Split([0, 1], [1, 2])
     self.irCropTrain = RandomCropNumpy(size=(opt.fineSize+100, opt.fineSize+100))
     self.ioCropTrain = CenterCropNumpy(size=[opt.fineSize, opt.fineSize])
     self.iCropTest = CenterCropNumpy(size=(1024, 1024))
     self.iElastic = ElasticTransform(alpha=1000, sigma=40)
     self.iBlur = GaussianBlurring(sigma=1.5)
     self.iPoisson = PoissonSubsampling(peak=['lognormal', -0.5, 0.001])
     self.iBG = AddGaussianPoissonNoise(sigma=25, peak=0.06)
     self.train_count = 0
     self.test_count = 0
     self.dim_ordering = opt.dim_ordering
     self.repeat = 1
     self.opt = opt
Exemplo n.º 5
0
    def __init__(self, opt):
        train_crop_size1 = int(opt.fineSize * 1.45) #pre-crop
        train_crop_size2 = opt.fineSize
        train_crop_size3 = opt.fineSize
        test_size = opt.fineSize

        self.ptrain = os.path.join(opt.workdir, 'train') #'./datasets/Christian-TMR-IF-v0.1/train'
        self.pvalid = os.path.join(opt.workdir, 'valid')
        self.ptest = os.path.join(opt.workdir, 'test') #'./datasets/Christian-TMR-IF-v0.1/test'

        self.input_channels = []
        for ch in opt.input_channels.split(','):
            name, filter = ch.split('=')
            self.input_channels.append((name, {'filter':filter, 'loader':ImageLoader()}, ))

        self.output_channels = []
        for ch in opt.output_channels.split(','):
            name, filter = ch.split('=')
            self.output_channels.append((name, {'filter':filter, 'loader':ImageLoader()}, ))

        # prepare the transforms
        self.iMerge = Merge()
        self.iElastic = ElasticTransform(alpha=1000, sigma=40)
        self.iSplit = Split([0, len(self.input_channels)], [len(self.input_channels), len(self.input_channels)+len(self.output_channels)])

        self.iRCropTrain1 = RandomCropNumpy(size=(train_crop_size1, train_crop_size1))
        self.iRot = RandomRotate()
        self.iCropTrain2 = CenterCropNumpy(size=(train_crop_size2, train_crop_size2))

        self.iCropTest = CenterCropNumpy(size=(test_size, test_size))

        self.dim_ordering = opt.dim_ordering
        self.opt = opt
        self.repeat = 30
        self.input_channel_names = [n for n, _ in self.input_channels]
        self.output_channel_names = [n for n, _ in self.output_channels]
Exemplo n.º 6
0
def generate_images_from_csv(csv_folder, output_dir, frame=1.0, image_per_file=1, file_filter='*.csv', zero_offset=False,
                             top_left=(0, 0), input_size_nm=512*106, pixel_size=20, frame_limit=[0, 1.0],
                             output_clip = (0, 255), input_clip = (0, 20), target_size=(2560, 2560), center_crop=None):
    lCropTrain = LocalizationCrop(fit_data=True, top_left=top_left)
    fSamplerOutTest = LocalizationFrameSampler(frame_num=frame, frame_limit=frame_limit, zero_offset=zero_offset)
    hRender = HistogramRendering(pixel_size=pixel_size, value_range= (0, 255), target_size=target_size)
    if center_crop:
        cropTest = CenterCropNumpy(size=center_crop)
    def transform_train(imgDict):
        table = imgDict['table']
        table = lCropTrain(table)
        repeat = imgDict['table.repeat']
        path = imgDict['table.path']
        tableout = fSamplerOutTest(table, index=repeat)
        path = path + '_' + str(repeat)+ '_' + str(tableout.f_range)
        histout = hRender(tableout)
        if center_crop:
            histout = cropTest(histout)
        histout = np.clip(histout, 0, 255)
        return histout, path

    csvLoader = ThunderstormCSVLoader([0, input_size_nm, 0, input_size_nm])
    source_train = FolderDataset(csv_folder,
                      channels = {'table': {'filter':file_filter, 'loader': csvLoader} },
                     transform = transform_train,
                     repeat=image_per_file)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    print('generating images...')
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    for i in range(len(source_train)):
        (bb, p) = source_train[i]
        print(i, p)
        name = os.path.split(p)[1]
        fpath = os.path.join(output_dir, name + '_{}.png'.format(frame))
        hb = bb[:,:,0].astype('uint8')
        imB = Image.fromarray(hb)
        imB.save(fpath)
    print('done')